Merge branch 'pm-cpuidle'
* pm-cpuidle: cpuidle: Pass exit latency limit to cpuidle_use_deepest_state() cpuidle: Allow idle injection to apply exit latency limit cpuidle: Introduce cpuidle_driver_state_disabled() for driver quirks cpuidle: teo: Avoid code duplication in conditionals cpuidle: teo: Avoid using "early hits" incorrectly cpuidle: teo: Exclude cpuidle overhead from computations cpuidle: Use nanoseconds as the unit of time cpuidle: Consolidate disabled state checks ACPI: processor_idle: Skip dummy wait if kernel is in guest cpuidle: Do not unset the driver if it is there already cpuidle: teo: Fix "early hits" handling for disabled idle states cpuidle: teo: Consider hits and misses metrics of disabled states cpuidle: teo: Rename local variable in teo_select() cpuidle: teo: Ignore disabled idle states that are too deep
This commit is contained in:
@@ -104,7 +104,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
* update no idle residency and return.
|
||||
*/
|
||||
if (current_clr_polling_and_test()) {
|
||||
dev->last_residency = 0;
|
||||
dev->last_residency_ns = 0;
|
||||
local_irq_enable();
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -165,7 +165,9 @@ static void cpuidle_idle_call(void)
|
||||
* until a proper wakeup interrupt happens.
|
||||
*/
|
||||
|
||||
if (idle_should_enter_s2idle() || dev->use_deepest_state) {
|
||||
if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
|
||||
u64 max_latency_ns;
|
||||
|
||||
if (idle_should_enter_s2idle()) {
|
||||
rcu_idle_enter();
|
||||
|
||||
@@ -176,12 +178,16 @@ static void cpuidle_idle_call(void)
|
||||
}
|
||||
|
||||
rcu_idle_exit();
|
||||
|
||||
max_latency_ns = U64_MAX;
|
||||
} else {
|
||||
max_latency_ns = dev->forced_idle_latency_limit_ns;
|
||||
}
|
||||
|
||||
tick_nohz_idle_stop_tick();
|
||||
rcu_idle_enter();
|
||||
|
||||
next_state = cpuidle_find_deepest_state(drv, dev);
|
||||
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
|
||||
call_cpuidle(drv, dev, next_state);
|
||||
} else {
|
||||
bool stop_tick = true;
|
||||
@@ -311,7 +317,7 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
void play_idle(unsigned long duration_us)
|
||||
void play_idle_precise(u64 duration_ns, u64 latency_ns)
|
||||
{
|
||||
struct idle_timer it;
|
||||
|
||||
@@ -323,29 +329,29 @@ void play_idle(unsigned long duration_us)
|
||||
WARN_ON_ONCE(current->nr_cpus_allowed != 1);
|
||||
WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
|
||||
WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
|
||||
WARN_ON_ONCE(!duration_us);
|
||||
WARN_ON_ONCE(!duration_ns);
|
||||
|
||||
rcu_sleep_check();
|
||||
preempt_disable();
|
||||
current->flags |= PF_IDLE;
|
||||
cpuidle_use_deepest_state(true);
|
||||
cpuidle_use_deepest_state(latency_ns);
|
||||
|
||||
it.done = 0;
|
||||
hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
it.timer.function = idle_inject_timer_fn;
|
||||
hrtimer_start(&it.timer, ns_to_ktime(duration_us * NSEC_PER_USEC),
|
||||
hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
|
||||
while (!READ_ONCE(it.done))
|
||||
do_idle();
|
||||
|
||||
cpuidle_use_deepest_state(false);
|
||||
cpuidle_use_deepest_state(0);
|
||||
current->flags &= ~PF_IDLE;
|
||||
|
||||
preempt_fold_need_resched();
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(play_idle);
|
||||
EXPORT_SYMBOL_GPL(play_idle_precise);
|
||||
|
||||
void cpu_startup_entry(enum cpuhp_state state)
|
||||
{
|
||||
|
Reference in New Issue
Block a user