cpuidle: Consolidate disabled state checks

There are two reasons why CPU idle states may be disabled: either
because the driver has disabled them or because they have been
disabled by user space via sysfs.

In the former case, the state's "disabled" flag is set once during
the initialization of the driver and it is never cleared later (it
is read-only effectively).  In the latter case, the "disable" field
of the given state's cpuidle_state_usage struct is set and it may be
changed via sysfs.  Thus checking whether or not an idle state has
been disabled involves reading these two flags every time.

In order to avoid the additional check of the state's "disabled" flag
(which is effectively read-only anyway), use the value of it at the
init time to set a (new) flag in the "disable" field of that state's
cpuidle_state_usage structure and use the sysfs interface to
manipulate another (new) flag in it.  This way the state is disabled
whenever the "disable" field of its cpuidle_state_usage structure is
nonzero, whatever the reason, and it is the only place to look into
to check whether or not the state has been disabled.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
此提交包含在:
Rafael J. Wysocki
2019-11-04 12:16:17 +01:00
父節點 fa583f71a9
當前提交 99e98d3fb1
共有 7 個檔案被更改,包括 54 行新增48 行删除

查看文件

@@ -84,12 +84,12 @@ static int find_deepest_state(struct cpuidle_driver *drv,
for (i = 1; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
struct cpuidle_state_usage *su = &dev->states_usage[i];
if (s->disabled || su->disable || s->exit_latency <= latency_req
|| s->exit_latency > max_latency
|| (s->flags & forbidden_flags)
|| (s2idle && !s->enter_s2idle))
if (dev->states_usage[i].disable ||
s->exit_latency <= latency_req ||
s->exit_latency > max_latency ||
(s->flags & forbidden_flags) ||
(s2idle && !s->enter_s2idle))
continue;
latency_req = s->exit_latency;
@@ -265,8 +265,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (diff < drv->states[entered_state].target_residency) {
for (i = entered_state - 1; i >= 0; i--) {
if (drv->states[i].disabled ||
dev->states_usage[i].disable)
if (dev->states_usage[i].disable)
continue;
/* Shallower states are enabled, so update. */
@@ -275,8 +274,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
}
} else if (diff > delay) {
for (i = entered_state + 1; i < drv->state_count; i++) {
if (drv->states[i].disabled ||
dev->states_usage[i].disable)
if (dev->states_usage[i].disable)
continue;
/*
@@ -380,7 +378,7 @@ u64 cpuidle_poll_time(struct cpuidle_driver *drv,
limit_ns = TICK_NSEC;
for (i = 1; i < drv->state_count; i++) {
if (drv->states[i].disabled || dev->states_usage[i].disable)
if (dev->states_usage[i].disable)
continue;
limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
@@ -567,12 +565,16 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
*/
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int i, ret;
if (!try_module_get(drv->owner))
return -EINVAL;
for (i = 0; i < drv->state_count; i++)
if (drv->states[i].disabled)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);