123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873 |
- // SPDX-License-Identifier: GPL-2.0-only
- /*
- * Copyright (C) 2006-2007 Adam Belay <[email protected]>
- * Copyright (C) 2009 Intel Corporation
- * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
- #include <linux/cpu.h>
- #include <linux/cpuidle.h>
- #include <linux/cpu_pm.h>
- #include <linux/ktime.h>
- #include <linux/module.h>
- #include <linux/pm_domain.h>
- #include <linux/pm_runtime.h>
- #include <linux/pm_qos.h>
- #include <linux/sched/idle.h>
- #if IS_ENABLED(CONFIG_SCHED_WALT)
- #include <linux/sched/walt.h>
- #endif
- #include <linux/smp.h>
- #include <linux/spinlock.h>
- #include <linux/string.h>
- #include <linux/suspend.h>
- #include <linux/tick.h>
- #include <linux/time64.h>
- #include <trace/events/ipi.h>
- #include <trace/events/power.h>
- #include <trace/hooks/cpuidle.h>
- #include "qcom-lpm.h"
- #define CREATE_TRACE_POINTS
- #include "trace-qcom-lpm.h"
- #define LPM_PRED_RESET 0
- #define LPM_PRED_RESIDENCY_PATTERN 1
- #define LPM_PRED_PREMATURE_EXITS 2
- #define LPM_PRED_IPI_PATTERN 3
- #define LPM_SELECT_STATE_DISABLED 0
- #define LPM_SELECT_STATE_QOS_UNMET 1
- #define LPM_SELECT_STATE_RESIDENCY_UNMET 2
- #define LPM_SELECT_STATE_PRED 3
- #define LPM_SELECT_STATE_IPI_PENDING 4
- #define LPM_SELECT_STATE_SCHED_BIAS 5
- #define LPM_SELECT_STATE_MAX 7
- #define UPDATE_REASON(i, u) (BIT(u) << (MAX_LPM_CPUS * i))
- bool prediction_disabled;
- bool sleep_disabled = true;
- static bool suspend_in_progress;
- static bool traces_registered;
- static struct cluster_governor *cluster_gov_ops;
- DEFINE_PER_CPU(struct lpm_cpu, lpm_cpu_data);
- static inline bool check_cpu_isactive(int cpu)
- {
- return cpu_active(cpu);
- }
- static bool lpm_disallowed(s64 sleep_ns, int cpu)
- {
- #if IS_ENABLED(CONFIG_SCHED_WALT)
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
- uint64_t bias_time = 0;
- #endif
- if (suspend_in_progress)
- return true;
- if (!check_cpu_isactive(cpu))
- return false;
- if ((sleep_disabled || sleep_ns < 0))
- return true;
- #if IS_ENABLED(CONFIG_SCHED_WALT)
- if (!sched_lpm_disallowed_time(cpu, &bias_time)) {
- cpu_gov->last_idx = 0;
- cpu_gov->bias = bias_time;
- return true;
- }
- #endif
- return false;
- }
- /**
- * histtimer_fn() - Will be executed when per cpu prediction timer expires
- * @h: cpu prediction timer
- */
- static enum hrtimer_restart histtimer_fn(struct hrtimer *h)
- {
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- cpu_gov->history_invalid = 1;
- return HRTIMER_NORESTART;
- }
- /**
- * histtimer_start() - Program the hrtimer with given timer value
- * @time_ns: Value to be program
- */
- static void histtimer_start(uint32_t time_ns)
- {
- ktime_t hist_ktime = ns_to_ktime(time_ns * NSEC_PER_USEC);
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- struct hrtimer *cpu_histtimer = &cpu_gov->histtimer;
- cpu_histtimer->function = histtimer_fn;
- hrtimer_start(cpu_histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
- }
- /**
- * histtimer_cancel() - Cancel the histtimer after cpu wakes up from lpm
- */
- static void histtimer_cancel(void)
- {
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- struct hrtimer *cpu_histtimer = &cpu_gov->histtimer;
- ktime_t time_rem;
- if (!hrtimer_active(cpu_histtimer))
- return;
- time_rem = hrtimer_get_remaining(cpu_histtimer);
- if (ktime_to_us(time_rem) <= 0)
- return;
- hrtimer_try_to_cancel(cpu_histtimer);
- }
- static void biastimer_cancel(void)
- {
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- struct hrtimer *cpu_biastimer = &cpu_gov->biastimer;
- ktime_t time_rem;
- if (!cpu_gov->bias)
- return;
- cpu_gov->bias = 0;
- time_rem = hrtimer_get_remaining(cpu_biastimer);
- if (ktime_to_us(time_rem) <= 0)
- return;
- hrtimer_try_to_cancel(cpu_biastimer);
- }
- static enum hrtimer_restart biastimer_fn(struct hrtimer *h)
- {
- return HRTIMER_NORESTART;
- }
- static void biastimer_start(uint32_t time_ns)
- {
- ktime_t bias_ktime = ns_to_ktime(time_ns);
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- struct hrtimer *cpu_biastimer = &cpu_gov->biastimer;
- cpu_biastimer->function = biastimer_fn;
- hrtimer_start(cpu_biastimer, bias_ktime, HRTIMER_MODE_REL_PINNED);
- }
- /**
- * find_deviation() - Try to detect repeat patterns by keeping track of past
- * samples and check if the standard deviation of that set
- * of previous sample is below a threshold. If it is below
- * threshold then use average of these past samples as
- * predicted value.
- * @cpu_gov: targeted cpu's lpm data structure
- * @duration_ns: cpu's scheduler sleep length
- */
- static uint64_t find_deviation(struct lpm_cpu *cpu_gov, int *samples_history,
- u64 duration_ns)
- {
- uint64_t max, avg, stddev;
- uint64_t thresh = LLONG_MAX;
- struct cpuidle_driver *drv = cpu_gov->drv;
- int divisor, i, last_level = drv->state_count - 1;
- struct cpuidle_state *max_state = &drv->states[last_level];
- do {
- max = avg = divisor = stddev = 0;
- for (i = 0; i < MAXSAMPLES; i++) {
- int64_t value = samples_history[i];
- if (value <= thresh) {
- avg += value;
- divisor++;
- if (value > max)
- max = value;
- }
- }
- do_div(avg, divisor);
- for (i = 0; i < MAXSAMPLES; i++) {
- int64_t value = samples_history[i];
- if (value <= thresh) {
- int64_t diff = value - avg;
- stddev += diff * diff;
- }
- }
- do_div(stddev, divisor);
- stddev = int_sqrt(stddev);
- /*
- * If the deviation is less, return the average, else
- * ignore one maximum sample and retry
- */
- if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
- || stddev <= PRED_REF_STDDEV) {
- do_div(duration_ns, NSEC_PER_USEC);
- if (avg >= duration_ns ||
- avg > max_state->target_residency)
- return 0;
- cpu_gov->next_pred_time = ktime_to_us(cpu_gov->now) + avg;
- return avg;
- }
- thresh = max - 1;
- } while (divisor > (MAXSAMPLES - 1));
- return 0;
- }
- /**
- * cpu_predict() - Predict the cpus next wakeup.
- * @cpu_gov: targeted cpu's lpm data structure
- * @duration_ns: cpu's scheduler sleep length
- */
- static void cpu_predict(struct lpm_cpu *cpu_gov, u64 duration_ns)
- {
- int i, j;
- struct cpuidle_driver *drv = cpu_gov->drv;
- struct cpuidle_state *min_state = &drv->states[0];
- struct history_lpm *lpm_history = &cpu_gov->lpm_history;
- struct history_ipi *ipi_history = &cpu_gov->ipi_history;
- if (prediction_disabled)
- return;
- /*
- * Samples are marked invalid when woken-up due to timer,
- * so do not predict.
- */
- if (cpu_gov->history_invalid) {
- cpu_gov->history_invalid = false;
- cpu_gov->htmr_wkup = true;
- cpu_gov->next_pred_time = 0;
- return;
- }
- /*
- * If the duration_ns itself is not sufficient for deeper
- * low power modes than clock gating do not predict
- */
- if (min_state->target_residency_ns > duration_ns)
- return;
- /* Predict only when all the samples are collected */
- if (lpm_history->nsamp < MAXSAMPLES) {
- cpu_gov->next_pred_time = 0;
- return;
- }
- /*
- * Check if the samples are not much deviated, if so use the
- * average of those as predicted sleep time. Else if any
- * specific mode has more premature exits return the index of
- * that mode.
- */
- cpu_gov->predicted = find_deviation(cpu_gov, lpm_history->resi, duration_ns);
- if (cpu_gov->predicted) {
- cpu_gov->pred_type = LPM_PRED_RESIDENCY_PATTERN;
- return;
- }
- /*
- * Find the number of premature exits for each of the mode,
- * excluding clockgating mode, and they are more than fifty
- * percent restrict that and deeper modes.
- */
- for (j = 1; j < drv->state_count; j++) {
- struct cpuidle_state *s = &drv->states[j];
- uint32_t min_residency = s->target_residency;
- uint32_t count = 0;
- uint64_t avg_residency = 0;
- for (i = 0; i < MAXSAMPLES; i++) {
- if ((lpm_history->mode[i] == j) &&
- (lpm_history->resi[i] < min_residency)) {
- count++;
- avg_residency += lpm_history->resi[i];
- }
- }
- if (count >= PRED_PREMATURE_CNT) {
- do_div(avg_residency, count);
- cpu_gov->predicted = avg_residency;
- cpu_gov->next_pred_time = ktime_to_us(cpu_gov->now)
- + cpu_gov->predicted;
- cpu_gov->pred_type = LPM_PRED_PREMATURE_EXITS;
- break;
- }
- }
- if (cpu_gov->predicted)
- return;
- cpu_gov->predicted = find_deviation(cpu_gov, ipi_history->interval,
- duration_ns);
- if (cpu_gov->predicted)
- cpu_gov->pred_type = LPM_PRED_IPI_PATTERN;
- }
- /**
- * clear_cpu_predict_history() - Clears the stored previous samples data.
- * It will be called when APSS going to deep sleep.
- */
- void clear_cpu_predict_history(void)
- {
- struct lpm_cpu *cpu_gov;
- struct history_lpm *lpm_history;
- int i, cpu;
- if (prediction_disabled)
- return;
- for_each_possible_cpu(cpu) {
- cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
- lpm_history = &cpu_gov->lpm_history;
- for (i = 0; i < MAXSAMPLES; i++) {
- lpm_history->resi[i] = 0;
- lpm_history->mode[i] = -1;
- lpm_history->samples_idx = 0;
- lpm_history->nsamp = 0;
- cpu_gov->next_pred_time = 0;
- cpu_gov->pred_type = LPM_PRED_RESET;
- }
- }
- }
- /**
- * update_cpu_history() - Update the samples history data every time when
- * cpu comes from sleep.
- * @cpu_gov: targeted cpu's lpm data structure
- */
- static void update_cpu_history(struct lpm_cpu *cpu_gov)
- {
- bool tmr = false;
- int idx = cpu_gov->last_idx;
- struct history_lpm *lpm_history = &cpu_gov->lpm_history;
- u64 measured_us = ktime_to_us(cpu_gov->dev->last_residency_ns);
- struct cpuidle_state *target;
- if (sleep_disabled || prediction_disabled || idx < 0 ||
- idx > cpu_gov->drv->state_count - 1)
- return;
- target = &cpu_gov->drv->states[idx];
- if (measured_us > target->exit_latency)
- measured_us -= target->exit_latency;
- if (cpu_gov->htmr_wkup) {
- if (!lpm_history->samples_idx)
- lpm_history->samples_idx = MAXSAMPLES - 1;
- else
- lpm_history->samples_idx--;
- lpm_history->resi[lpm_history->samples_idx] += measured_us;
- cpu_gov->htmr_wkup = false;
- tmr = true;
- } else
- lpm_history->resi[lpm_history->samples_idx] = measured_us;
- lpm_history->mode[lpm_history->samples_idx] = idx;
- cpu_gov->pred_type = LPM_PRED_RESET;
- trace_gov_pred_hist(idx, lpm_history->resi[lpm_history->samples_idx],
- tmr);
- if (lpm_history->nsamp < MAXSAMPLES)
- lpm_history->nsamp++;
- lpm_history->samples_idx++;
- if (lpm_history->samples_idx >= MAXSAMPLES)
- lpm_history->samples_idx = 0;
- }
- void update_ipi_history(int cpu)
- {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
- struct history_ipi *history = &cpu_gov->ipi_history;
- ktime_t now = ktime_get();
- history->interval[history->current_ptr] =
- ktime_to_us(ktime_sub(now,
- history->cpu_idle_resched_ts));
- (history->current_ptr)++;
- if (history->current_ptr >= MAXSAMPLES)
- history->current_ptr = 0;
- history->cpu_idle_resched_ts = now;
- }
- /**
- * lpm_cpu_qos_notify() - It will be called when any new request came on PM QoS.
- * It wakes up the cpu if it is in idle sleep to honour
- * the new PM QoS request.
- * @nfb: notifier block of the CPU
- * @val: notification value
- * @ptr: pointer to private data structure
- */
- static int lpm_cpu_qos_notify(struct notifier_block *nfb,
- unsigned long val, void *ptr)
- {
- struct lpm_cpu *cpu_gov = container_of(nfb, struct lpm_cpu, nb);
- int cpu = cpu_gov->cpu;
- if (!cpu_gov->enable)
- return NOTIFY_OK;
- preempt_disable();
- if (cpu != smp_processor_id() && cpu_online(cpu) &&
- check_cpu_isactive(cpu))
- wake_up_if_idle(cpu);
- preempt_enable();
- return NOTIFY_OK;
- }
- static int lpm_offline_cpu(unsigned int cpu)
- {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
- struct device *dev = get_cpu_device(cpu);
- if (!dev || !cpu_gov)
- return 0;
- dev_pm_qos_remove_notifier(dev, &cpu_gov->nb,
- DEV_PM_QOS_RESUME_LATENCY);
- return 0;
- }
- static int lpm_online_cpu(unsigned int cpu)
- {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
- struct device *dev = get_cpu_device(cpu);
- if (!dev || !cpu_gov)
- return 0;
- cpu_gov->nb.notifier_call = lpm_cpu_qos_notify;
- dev_pm_qos_add_notifier(dev, &cpu_gov->nb,
- DEV_PM_QOS_RESUME_LATENCY);
- return 0;
- }
- static void ipi_raise(void *ignore, const struct cpumask *mask, const char *unused)
- {
- int cpu;
- struct lpm_cpu *cpu_gov;
- unsigned long flags;
- if (suspend_in_progress)
- return;
- for_each_cpu(cpu, mask) {
- cpu_gov = &(per_cpu(lpm_cpu_data, cpu));
- if (!cpu_gov->enable)
- return;
- spin_lock_irqsave(&cpu_gov->lock, flags);
- cpu_gov->ipi_pending = true;
- spin_unlock_irqrestore(&cpu_gov->lock, flags);
- update_ipi_history(cpu);
- }
- }
- static void ipi_entry(void *ignore, const char *unused)
- {
- int cpu;
- struct lpm_cpu *cpu_gov;
- unsigned long flags;
- if (suspend_in_progress)
- return;
- cpu = raw_smp_processor_id();
- cpu_gov = &(per_cpu(lpm_cpu_data, cpu));
- if (!cpu_gov->enable)
- return;
- spin_lock_irqsave(&cpu_gov->lock, flags);
- cpu_gov->ipi_pending = false;
- spin_unlock_irqrestore(&cpu_gov->lock, flags);
- }
- /**
- * get_cpus_qos() - Returns the aggrigated PM QoS request.
- * @mask: cpumask of the cpus
- */
- static inline s64 get_cpus_qos(const struct cpumask *mask)
- {
- int cpu;
- s64 n, latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE * NSEC_PER_USEC;
- for_each_cpu(cpu, mask) {
- if (!check_cpu_isactive(cpu))
- continue;
- n = cpuidle_governor_latency_req(cpu);
- if (n < latency)
- latency = n;
- }
- return latency;
- }
- /**
- * start_prediction_timer() - Programs the prediction hrtimer and make the timer
- * to run. It wakes up the cpus from shallower state in
- * misprediction case and saves the power by not letting
- * the cpu remains in sollower state.
- * @cpu_gov: cpu's lpm data structure
- * @duration_us: cpu's scheduled sleep length
- */
- static int start_prediction_timer(struct lpm_cpu *cpu_gov, int duration_us)
- {
- struct cpuidle_state *s;
- uint32_t htime = 0, max_residency;
- uint32_t last_level = cpu_gov->drv->state_count - 1;
- if (!cpu_gov->predicted || cpu_gov->last_idx >= last_level)
- return 0;
- if (cpu_gov->next_wakeup > cpu_gov->next_pred_time)
- cpu_gov->next_wakeup = cpu_gov->next_pred_time;
- s = &cpu_gov->drv->states[0];
- max_residency = s[cpu_gov->last_idx + 1].target_residency - 1;
- htime = cpu_gov->predicted + PRED_TIMER_ADD;
- if (htime > max_residency)
- htime = max_residency;
- if ((duration_us > htime) && ((duration_us - htime) > max_residency))
- histtimer_start(htime);
- return htime;
- }
- void register_cluster_governor_ops(struct cluster_governor *ops)
- {
- if (!ops)
- return;
- cluster_gov_ops = ops;
- }
- /**
- * lpm_select() - Find the best idle state for the cpu device
- * @dev: Target cpu
- * @state: Entered state
- * @stop_tick: Is the tick device stopped
- *
- * Return: Best cpu LPM mode to enter
- */
- static int lpm_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
- bool *stop_tick)
- {
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- s64 latency_req = get_cpus_qos(cpumask_of(dev->cpu));
- ktime_t delta_tick;
- u64 reason = 0;
- uint64_t duration_ns, htime = 0;
- int i = 0;
- if (!cpu_gov)
- return 0;
- do_div(latency_req, NSEC_PER_USEC);
- cpu_gov->predicted = 0;
- cpu_gov->predict_started = false;
- cpu_gov->now = ktime_get();
- duration_ns = tick_nohz_get_sleep_length(&delta_tick);
- update_cpu_history(cpu_gov);
- if (lpm_disallowed(duration_ns, dev->cpu))
- goto done;
- for (i = drv->state_count - 1; i > 0; i--) {
- struct cpuidle_state *s = &drv->states[i];
- if (dev->states_usage[i].disable) {
- reason |= UPDATE_REASON(i, LPM_SELECT_STATE_DISABLED);
- continue;
- }
- if (latency_req < s->exit_latency) {
- reason |= UPDATE_REASON(i, LPM_SELECT_STATE_QOS_UNMET);
- continue;
- }
- if (s->target_residency_ns > duration_ns) {
- reason |= UPDATE_REASON(i,
- LPM_SELECT_STATE_RESIDENCY_UNMET);
- continue;
- }
- if (check_cpu_isactive(dev->cpu) && !cpu_gov->predict_started) {
- cpu_predict(cpu_gov, duration_ns);
- cpu_gov->predict_started = true;
- }
- if (cpu_gov->predicted)
- if (s->target_residency > cpu_gov->predicted) {
- reason |= UPDATE_REASON(i,
- LPM_SELECT_STATE_PRED);
- continue;
- }
- break;
- }
- do_div(duration_ns, NSEC_PER_USEC);
- cpu_gov->last_idx = i;
- cpu_gov->next_wakeup = ktime_add_us(cpu_gov->now, duration_ns);
- htime = start_prediction_timer(cpu_gov, duration_ns);
- /* update this cpu next_wakeup into its parent power domain device */
- if (cpu_gov->last_idx == drv->state_count - 1) {
- if (cluster_gov_ops && cluster_gov_ops->select)
- cluster_gov_ops->select(cpu_gov);
- }
- done:
- if ((!cpu_gov->last_idx) && cpu_gov->bias) {
- biastimer_start(cpu_gov->bias);
- reason |= UPDATE_REASON(i, LPM_SELECT_STATE_SCHED_BIAS);
- }
- trace_lpm_gov_select(i, latency_req, duration_ns, reason);
- trace_gov_pred_select(cpu_gov->pred_type, cpu_gov->predicted, htime);
- return i;
- }
- /**
- * lpm_reflect() - Update the state entered by the cpu device
- * @dev: Target CPU
- * @state: Entered state
- */
- static void lpm_reflect(struct cpuidle_device *dev, int state)
- {
- }
- /**
- * lpm_idle_enter() - Notification with cpuidle state during idle entry
- * @unused: unused
- * @state: selected state by governor's .select
- * @dev: cpuidle_device
- */
- static void lpm_idle_enter(void *unused, int *state, struct cpuidle_device *dev)
- {
- struct lpm_cpu *cpu_gov = this_cpu_ptr(&lpm_cpu_data);
- u64 reason = 0;
- unsigned long flags;
- if (*state == 0)
- return;
- if (!cpu_gov->enable)
- return;
- /* Restrict to WFI state if there is an IPI pending on current CPU */
- spin_lock_irqsave(&cpu_gov->lock, flags);
- if (cpu_gov->ipi_pending) {
- reason = UPDATE_REASON(*state, LPM_SELECT_STATE_IPI_PENDING);
- *state = 0;
- trace_lpm_gov_select(*state, 0xdeaffeed, 0xdeaffeed, reason);
- }
- spin_unlock_irqrestore(&cpu_gov->lock, flags);
- }
- /**
- * lpm_idle_exit() - Notification with cpuidle state during idle exit
- * @unused: unused
- * @state: actual entered state by cpuidle
- * @dev: cpuidle_device
- */
- static void lpm_idle_exit(void *unused, int state, struct cpuidle_device *dev)
- {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, dev->cpu);
- if (cpu_gov->enable) {
- histtimer_cancel();
- biastimer_cancel();
- }
- }
- /**
- * lpm_enable_device() - Initialize the governor's data for the CPU
- * @drv: cpuidle driver
- * @dev: Target CPU
- */
- static int lpm_enable_device(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
- {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, dev->cpu);
- struct hrtimer *cpu_histtimer = &cpu_gov->histtimer;
- struct hrtimer *cpu_biastimer = &cpu_gov->biastimer;
- int ret;
- spin_lock_init(&cpu_gov->lock);
- hrtimer_init(cpu_histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_init(cpu_biastimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- if (!traces_registered) {
- ret = register_trace_ipi_raise(ipi_raise, NULL);
- if (ret)
- return ret;
- ret = register_trace_ipi_entry(ipi_entry, NULL);
- if (ret) {
- unregister_trace_ipi_raise(ipi_raise, NULL);
- return ret;
- }
- ret = register_trace_prio_android_vh_cpu_idle_enter(
- lpm_idle_enter, NULL, INT_MIN);
- if (ret) {
- unregister_trace_ipi_raise(ipi_raise, NULL);
- unregister_trace_ipi_entry(ipi_entry, NULL);
- return ret;
- }
- ret = register_trace_prio_android_vh_cpu_idle_exit(
- lpm_idle_exit, NULL, INT_MIN);
- if (ret) {
- unregister_trace_ipi_raise(ipi_raise, NULL);
- unregister_trace_ipi_entry(ipi_entry, NULL);
- unregister_trace_android_vh_cpu_idle_enter(
- lpm_idle_enter, NULL);
- return ret;
- }
- if (cluster_gov_ops && cluster_gov_ops->enable)
- cluster_gov_ops->enable();
- traces_registered = true;
- }
- cpu_gov->cpu = dev->cpu;
- cpu_gov->enable = true;
- cpu_gov->drv = drv;
- cpu_gov->dev = dev;
- cpu_gov->last_idx = -1;
- return 0;
- }
- /**
- * lpm_disable_device() - Clean up the governor's data for the CPU
- * @drv: cpuidle driver
- * @dev: Target CPU
- */
- static void lpm_disable_device(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
- {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, dev->cpu);
- int cpu;
- cpu_gov->enable = false;
- cpu_gov->last_idx = -1;
- for_each_possible_cpu(cpu) {
- struct lpm_cpu *cpu_gov = per_cpu_ptr(&lpm_cpu_data, cpu);
- if (cpu_gov->enable)
- return;
- }
- if (traces_registered) {
- unregister_trace_ipi_raise(ipi_raise, NULL);
- unregister_trace_ipi_entry(ipi_entry, NULL);
- unregister_trace_android_vh_cpu_idle_enter(
- lpm_idle_enter, NULL);
- unregister_trace_android_vh_cpu_idle_exit(
- lpm_idle_exit, NULL);
- if (cluster_gov_ops && cluster_gov_ops->disable)
- cluster_gov_ops->disable();
- traces_registered = false;
- }
- }
- static void qcom_lpm_suspend_trace(void *unused, const char *action,
- int event, bool start)
- {
- int cpu;
- if (start && !strcmp("dpm_suspend_late", action)) {
- suspend_in_progress = true;
- for_each_online_cpu(cpu)
- wake_up_if_idle(cpu);
- return;
- }
- if (!start && !strcmp("dpm_resume_early", action)) {
- suspend_in_progress = false;
- for_each_online_cpu(cpu)
- wake_up_if_idle(cpu);
- }
- }
- static struct cpuidle_governor lpm_governor = {
- .name = "qcom-cpu-lpm",
- .rating = 50,
- .enable = lpm_enable_device,
- .disable = lpm_disable_device,
- .select = lpm_select,
- .reflect = lpm_reflect,
- };
- static int __init qcom_lpm_governor_init(void)
- {
- int ret;
- ret = create_global_sysfs_nodes();
- if (ret)
- goto sysfs_fail;
- ret = qcom_cluster_lpm_governor_init();
- if (ret)
- goto cluster_init_fail;
- ret = cpuidle_register_governor(&lpm_governor);
- if (ret)
- goto cpuidle_reg_fail;
- ret = register_trace_suspend_resume(qcom_lpm_suspend_trace, NULL);
- if (ret)
- goto cpuidle_reg_fail;
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "qcom-cpu-lpm",
- lpm_online_cpu, lpm_offline_cpu);
- if (ret < 0)
- goto cpuhp_setup_fail;
- return 0;
- cpuhp_setup_fail:
- unregister_trace_suspend_resume(qcom_lpm_suspend_trace, NULL);
- cpuidle_reg_fail:
- qcom_cluster_lpm_governor_deinit();
- cluster_init_fail:
- remove_global_sysfs_nodes();
- sysfs_fail:
- return ret;
- }
- module_init(qcom_lpm_governor_init);
- MODULE_DESCRIPTION("Qualcomm Technologies, Inc. cpuidle LPM governor");
- MODULE_LICENSE("GPL v2");
|