Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael J Wysocki: - Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). - Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. - ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. - cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. - cpu0 cpufreq driver from Shawn Guo. - cpufreq governor fixes related to the relaxing of limit from Michal Pecio. - OMAP cpufreq updates from Axel Lin and Richard Zhao. - cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. - Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. - Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. - System suspend error code path fix from Feng Hong. Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the workqueue fixes conflicting fairly badly with the removal of support for hardware P-state chips. The changes were independent but somewhat intertwined. * tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 cpuidle: rename function name "__cpuidle_register_driver", v2 cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name cpuidle: remove some empty lines PM: Prevent runtime suspend during system resume PM QoS: Use spinlock in the per-device PM QoS constraints code PM / Sleep: use resume event when call dpm_resume_early cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure ACPI / processor: remove pointless variable initialization ACPI / processor: remove unused function parameter cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed properly __init-annotate pm_sysrq_init() cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 ...
This commit is contained in:
@@ -53,6 +53,24 @@
|
||||
static LIST_HEAD(gpd_list);
|
||||
static DEFINE_MUTEX(gpd_list_lock);
|
||||
|
||||
static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
|
||||
{
|
||||
struct generic_pm_domain *genpd = NULL, *gpd;
|
||||
|
||||
if (IS_ERR_OR_NULL(domain_name))
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
||||
if (!strcmp(gpd->name, domain_name)) {
|
||||
genpd = gpd;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
return genpd;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
|
||||
* @domain_name: Name of the PM domain to power up.
|
||||
*/
|
||||
int pm_genpd_name_poweron(const char *domain_name)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
genpd = pm_genpd_lookup_name(domain_name);
|
||||
return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
|
||||
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_CALLBACK(genpd, int, start, dev);
|
||||
}
|
||||
|
||||
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
|
||||
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||
not_suspended = 0;
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|
||||
|| pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
|
||||
|| pdd->dev->power.irq_safe))
|
||||
not_suspended++;
|
||||
|
||||
if (not_suspended > genpd->in_progress)
|
||||
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
||||
|
||||
might_sleep_if(!genpd->dev_irq_safe);
|
||||
|
||||
if (dev_gpd_data(dev)->always_on)
|
||||
return -EBUSY;
|
||||
|
||||
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
|
||||
if (stop_ok && !stop_ok(dev))
|
||||
return -EBUSY;
|
||||
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
||||
|
||||
/* If power.irq_safe, the PM domain is never powered off. */
|
||||
if (dev->power.irq_safe)
|
||||
return genpd_start_dev(genpd, dev);
|
||||
return genpd_start_dev_no_timing(genpd, dev);
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
ret = __pm_genpd_poweron(genpd);
|
||||
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
/**
|
||||
* pm_genpd_present - Check if the given PM domain has been initialized.
|
||||
* @genpd: PM domain to check.
|
||||
*/
|
||||
static bool pm_genpd_present(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct generic_pm_domain *gpd;
|
||||
|
||||
if (IS_ERR_OR_NULL(genpd))
|
||||
return false;
|
||||
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node)
|
||||
if (gpd == genpd)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
|
||||
* Check if the given PM domain can be powered off (during system suspend or
|
||||
* hibernation) and do that if so. Also, in that case propagate to its masters.
|
||||
*
|
||||
* This function is only called in "noirq" stages of system power transitions,
|
||||
* so it need not acquire locks (all of the "noirq" callbacks are executed
|
||||
* sequentially, so it is guaranteed that it will never run twice in parallel).
|
||||
* This function is only called in "noirq" and "syscore" stages of system power
|
||||
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
|
||||
* executed sequentially, so it is guaranteed that it will never run twice in
|
||||
* parallel).
|
||||
*/
|
||||
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
||||
{
|
||||
@@ -776,6 +828,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
|
||||
* @genpd: PM domain to power on.
|
||||
*
|
||||
* This function is only called in "noirq" and "syscore" stages of system power
|
||||
* transitions, so it need not acquire locks (all of the "noirq" callbacks are
|
||||
* executed sequentially, so it is guaranteed that it will never run twice in
|
||||
* parallel).
|
||||
*/
|
||||
static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct gpd_link *link;
|
||||
|
||||
if (genpd->status != GPD_STATE_POWER_OFF)
|
||||
return;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
pm_genpd_sync_poweron(link->master);
|
||||
genpd_sd_counter_inc(link->master);
|
||||
}
|
||||
|
||||
if (genpd->power_on)
|
||||
genpd->power_on(genpd);
|
||||
|
||||
genpd->status = GPD_STATE_ACTIVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* resume_needed - Check whether to resume a device before system suspend.
|
||||
* @dev: Device to check.
|
||||
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
|
||||
if (genpd->suspend_power_off
|
||||
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
|
||||
return 0;
|
||||
|
||||
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
|
||||
if (genpd->suspend_power_off
|
||||
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
|
||||
return 0;
|
||||
|
||||
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
|
||||
* guaranteed that this function will never run twice in parallel for
|
||||
* the same PM domain, so it is not necessary to use locking here.
|
||||
*/
|
||||
pm_genpd_poweron(genpd);
|
||||
pm_genpd_sync_poweron(genpd);
|
||||
genpd->suspended_count--;
|
||||
|
||||
return genpd_start_dev(genpd, dev);
|
||||
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
|
||||
0 : genpd_stop_dev(genpd, dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
|
||||
0 : genpd_start_dev(genpd, dev);
|
||||
return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
|
||||
if (genpd->suspended_count++ == 0) {
|
||||
/*
|
||||
* The boot kernel might put the domain into arbitrary state,
|
||||
* so make it appear as powered off to pm_genpd_poweron(), so
|
||||
* that it tries to power it on in case it was really off.
|
||||
* so make it appear as powered off to pm_genpd_sync_poweron(),
|
||||
* so that it tries to power it on in case it was really off.
|
||||
*/
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
if (genpd->suspend_power_off) {
|
||||
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
|
||||
if (genpd->suspend_power_off)
|
||||
return 0;
|
||||
|
||||
pm_genpd_poweron(genpd);
|
||||
pm_genpd_sync_poweron(genpd);
|
||||
|
||||
return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
|
||||
return genpd_start_dev(genpd, dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_syscore_switch - Switch power during system core suspend or resume.
|
||||
* @dev: Device that normally is marked as "always on" to switch power for.
|
||||
*
|
||||
* This routine may only be called during the system core (syscore) suspend or
|
||||
* resume phase for devices whose "always on" flags are set.
|
||||
*/
|
||||
void pm_genpd_syscore_switch(struct device *dev, bool suspend)
|
||||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
|
||||
genpd = dev_to_genpd(dev);
|
||||
if (!pm_genpd_present(genpd))
|
||||
return;
|
||||
|
||||
if (suspend) {
|
||||
genpd->suspended_count++;
|
||||
pm_genpd_sync_poweroff(genpd);
|
||||
} else {
|
||||
pm_genpd_sync_poweron(genpd);
|
||||
genpd->suspended_count--;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
|
||||
|
||||
#else
|
||||
|
||||
#define pm_genpd_prepare NULL
|
||||
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
|
||||
return __pm_genpd_add_device(genpd, dev, td);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
|
||||
* @domain_name: Name of the PM domain to add the device to.
|
||||
* @dev: Device to be added.
|
||||
* @td: Set of PM QoS timing parameters to attach to the device.
|
||||
*/
|
||||
int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
|
||||
struct gpd_timing_data *td)
|
||||
{
|
||||
return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
|
||||
* @genpd: PM domain to remove the device from.
|
||||
@@ -1454,26 +1569,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
|
||||
* @dev: Device to set/unset the flag for.
|
||||
* @val: The new value of the device's "always on" flag.
|
||||
*/
|
||||
void pm_genpd_dev_always_on(struct device *dev, bool val)
|
||||
{
|
||||
struct pm_subsys_data *psd;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
psd = dev_to_psd(dev);
|
||||
if (psd && psd->domain_data)
|
||||
to_gpd_data(psd->domain_data)->always_on = val;
|
||||
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
|
||||
|
||||
/**
|
||||
* pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
|
||||
* @dev: Device to set/unset the flag for.
|
||||
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
struct gpd_link *link;
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
|
||||
|| genpd == subdomain)
|
||||
return -EINVAL;
|
||||
|
||||
start:
|
||||
@@ -1551,6 +1647,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
|
||||
* @master_name: Name of the master PM domain to add the subdomain to.
|
||||
* @subdomain_name: Name of the subdomain to be added.
|
||||
*/
|
||||
int pm_genpd_add_subdomain_names(const char *master_name,
|
||||
const char *subdomain_name)
|
||||
{
|
||||
struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
|
||||
|
||||
if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&gpd_list_lock);
|
||||
list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
|
||||
if (!master && !strcmp(gpd->name, master_name))
|
||||
master = gpd;
|
||||
|
||||
if (!subdomain && !strcmp(gpd->name, subdomain_name))
|
||||
subdomain = gpd;
|
||||
|
||||
if (master && subdomain)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&gpd_list_lock);
|
||||
|
||||
return pm_genpd_add_subdomain(master, subdomain);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
||||
* @genpd: Master PM domain to remove the subdomain from.
|
||||
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
|
||||
|
||||
int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
||||
/**
|
||||
* pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
|
||||
* @genpd: PM domain to be connected with cpuidle.
|
||||
* @state: cpuidle state this domain can disable/enable.
|
||||
*
|
||||
* Make a PM domain behave as though it contained a CPU core, that is, instead
|
||||
* of calling its power down routine it will enable the given cpuidle state so
|
||||
* that the cpuidle subsystem can power it down (if possible and desirable).
|
||||
*/
|
||||
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
||||
{
|
||||
struct cpuidle_driver *cpuidle_drv;
|
||||
struct gpd_cpu_data *cpu_data;
|
||||
@@ -1753,7 +1887,24 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
|
||||
goto out;
|
||||
}
|
||||
|
||||
int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
/**
|
||||
* pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
|
||||
* @name: Name of the domain to connect to cpuidle.
|
||||
* @state: cpuidle state this domain can manipulate.
|
||||
*/
|
||||
int pm_genpd_name_attach_cpuidle(const char *name, int state)
|
||||
{
|
||||
return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
|
||||
* @genpd: PM domain to remove the cpuidle connection from.
|
||||
*
|
||||
* Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
|
||||
* given PM domain.
|
||||
*/
|
||||
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct gpd_cpu_data *cpu_data;
|
||||
struct cpuidle_state *idle_state;
|
||||
@@ -1784,6 +1935,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
|
||||
* @name: Name of the domain to disconnect cpuidle from.
|
||||
*/
|
||||
int pm_genpd_name_detach_cpuidle(const char *name)
|
||||
{
|
||||
return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
|
||||
}
|
||||
|
||||
/* Default device callbacks for generic PM domains. */
|
||||
|
||||
/**
|
||||
|
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
|
||||
static int async_error;
|
||||
|
||||
/**
|
||||
* device_pm_init - Initialize the PM-related part of a device object.
|
||||
* device_pm_sleep_init - Initialize system suspend-related device fields.
|
||||
* @dev: Device object being initialized.
|
||||
*/
|
||||
void device_pm_init(struct device *dev)
|
||||
void device_pm_sleep_init(struct device *dev)
|
||||
{
|
||||
dev->power.is_prepared = false;
|
||||
dev->power.is_suspended = false;
|
||||
init_completion(&dev->power.completion);
|
||||
complete_all(&dev->power.completion);
|
||||
dev->power.wakeup = NULL;
|
||||
spin_lock_init(&dev->power.lock);
|
||||
pm_runtime_init(dev);
|
||||
INIT_LIST_HEAD(&dev->power.entry);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Out;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Out;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "early power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
bool put = false;
|
||||
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
dpm_wait(dev->parent, async);
|
||||
device_lock(dev);
|
||||
|
||||
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
goto Unlock;
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
put = true;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "power domain ";
|
||||
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
|
||||
Unlock:
|
||||
device_unlock(dev);
|
||||
|
||||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
TRACE_RESUME(error);
|
||||
|
||||
if (put)
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
|
||||
void (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
|
||||
}
|
||||
|
||||
device_unlock(dev);
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "late power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
|
||||
|
||||
error = dpm_suspend_noirq(state);
|
||||
if (error) {
|
||||
dpm_resume_early(state);
|
||||
dpm_resume_early(resume_event(state));
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
if (async_error)
|
||||
goto Complete;
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
/*
|
||||
* If a device configured to wake up the system from sleep states
|
||||
* has been suspended at run time and there's a resume request pending
|
||||
* for it, this is equivalent to the device signaling wakeup, so the
|
||||
* system suspend operation should be aborted.
|
||||
*/
|
||||
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
|
||||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
pm_runtime_put_sync(dev);
|
||||
async_error = -EBUSY;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
if (error) {
|
||||
pm_runtime_put_sync(dev);
|
||||
if (error)
|
||||
async_error = error;
|
||||
} else if (dev->power.is_suspended) {
|
||||
else if (dev->power.is_suspended)
|
||||
__pm_runtime_disable(dev, false);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If a device's parent goes into runtime suspend at the wrong time,
|
||||
* it won't be possible to resume the device. To prevent this we
|
||||
* block runtime suspend here, during the prepare phase, and allow
|
||||
* it again during the complete phase.
|
||||
*/
|
||||
pm_runtime_get_noresume(dev);
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/opp.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
/*
|
||||
* Internal data structure organization with the OPP layer library is as
|
||||
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
|
||||
|
||||
return &dev_opp->head;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
/**
|
||||
* of_init_opp_table() - Initialize opp table from device tree
|
||||
* @dev: device pointer used to lookup device OPPs.
|
||||
*
|
||||
* Register the initial OPP table with the OPP library for given device.
|
||||
*/
|
||||
int of_init_opp_table(struct device *dev)
|
||||
{
|
||||
const struct property *prop;
|
||||
const __be32 *val;
|
||||
int nr;
|
||||
|
||||
prop = of_find_property(dev->of_node, "operating-points", NULL);
|
||||
if (!prop)
|
||||
return -ENODEV;
|
||||
if (!prop->value)
|
||||
return -ENODATA;
|
||||
|
||||
/*
|
||||
* Each OPP is a set of tuples consisting of frequency and
|
||||
* voltage like <freq-kHz vol-uV>.
|
||||
*/
|
||||
nr = prop->length / sizeof(u32);
|
||||
if (nr % 2) {
|
||||
dev_err(dev, "%s: Invalid OPP list\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
val = prop->value;
|
||||
while (nr) {
|
||||
unsigned long freq = be32_to_cpup(val++) * 1000;
|
||||
unsigned long volt = be32_to_cpup(val++);
|
||||
|
||||
if (opp_add(dev, freq, volt)) {
|
||||
dev_warn(dev, "%s: Failed to add OPP %ld\n",
|
||||
__func__, freq);
|
||||
continue;
|
||||
}
|
||||
nr -= 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@@ -1,12 +1,32 @@
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
static inline void device_pm_init_common(struct device *dev)
|
||||
{
|
||||
if (!dev->power.early_init) {
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
dev->power.early_init = true;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
|
||||
static inline void pm_runtime_early_init(struct device *dev)
|
||||
{
|
||||
dev->power.disable_depth = 1;
|
||||
device_pm_init_common(dev);
|
||||
}
|
||||
|
||||
extern void pm_runtime_init(struct device *dev);
|
||||
extern void pm_runtime_remove(struct device *dev);
|
||||
|
||||
#else /* !CONFIG_PM_RUNTIME */
|
||||
|
||||
static inline void pm_runtime_early_init(struct device *dev)
|
||||
{
|
||||
device_pm_init_common(dev);
|
||||
}
|
||||
|
||||
static inline void pm_runtime_init(struct device *dev) {}
|
||||
static inline void pm_runtime_remove(struct device *dev) {}
|
||||
|
||||
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
|
||||
return container_of(entry, struct device, power.entry);
|
||||
}
|
||||
|
||||
extern void device_pm_init(struct device *dev);
|
||||
extern void device_pm_sleep_init(struct device *dev);
|
||||
extern void device_pm_add(struct device *);
|
||||
extern void device_pm_remove(struct device *);
|
||||
extern void device_pm_move_before(struct device *, struct device *);
|
||||
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline void device_pm_init(struct device *dev)
|
||||
{
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
pm_runtime_init(dev);
|
||||
}
|
||||
static inline void device_pm_sleep_init(struct device *dev) {}
|
||||
|
||||
static inline void device_pm_add(struct device *dev)
|
||||
{
|
||||
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
|
||||
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline void device_pm_init(struct device *dev)
|
||||
{
|
||||
device_pm_init_common(dev);
|
||||
device_pm_sleep_init(dev);
|
||||
pm_runtime_init(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
/*
|
||||
|
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||
repeat:
|
||||
if (dev->power.runtime_error)
|
||||
retval = -EINVAL;
|
||||
else if (dev->power.disable_depth == 1 && dev->power.is_suspended
|
||||
&& dev->power.runtime_status == RPM_ACTIVE)
|
||||
retval = 1;
|
||||
else if (dev->power.disable_depth > 0)
|
||||
retval = -EACCES;
|
||||
if (retval)
|
||||
|
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
|
||||
*/
|
||||
void wakeup_source_add(struct wakeup_source *ws)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!ws))
|
||||
return;
|
||||
|
||||
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
|
||||
ws->active = false;
|
||||
ws->last_time = ktime_get();
|
||||
|
||||
spin_lock_irq(&events_lock);
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
list_add_rcu(&ws->entry, &wakeup_sources);
|
||||
spin_unlock_irq(&events_lock);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_add);
|
||||
|
||||
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
|
||||
*/
|
||||
void wakeup_source_remove(struct wakeup_source *ws)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!ws))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&events_lock);
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
list_del_rcu(&ws->entry);
|
||||
spin_unlock_irq(&events_lock);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
synchronize_rcu();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(wakeup_source_remove);
|
||||
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_wakeup_event);
|
||||
|
||||
static void print_active_wakeup_sources(void)
|
||||
{
|
||||
struct wakeup_source *ws;
|
||||
int active = 0;
|
||||
struct wakeup_source *last_activity_ws = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
|
||||
if (ws->active) {
|
||||
pr_info("active wakeup source: %s\n", ws->name);
|
||||
active = 1;
|
||||
} else if (!active &&
|
||||
(!last_activity_ws ||
|
||||
ktime_to_ns(ws->last_time) >
|
||||
ktime_to_ns(last_activity_ws->last_time))) {
|
||||
last_activity_ws = ws;
|
||||
}
|
||||
}
|
||||
|
||||
if (!active && last_activity_ws)
|
||||
pr_info("last active wakeup source: %s\n",
|
||||
last_activity_ws->name);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_wakeup_pending - Check if power transition in progress should be aborted.
|
||||
*
|
||||
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
|
||||
events_check_enabled = !ret;
|
||||
}
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
|
||||
if (ret)
|
||||
print_active_wakeup_sources();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
|
||||
bool pm_save_wakeup_count(unsigned int count)
|
||||
{
|
||||
unsigned int cnt, inpr;
|
||||
unsigned long flags;
|
||||
|
||||
events_check_enabled = false;
|
||||
spin_lock_irq(&events_lock);
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
split_counters(&cnt, &inpr);
|
||||
if (cnt == count && inpr == 0) {
|
||||
saved_count = count;
|
||||
events_check_enabled = true;
|
||||
}
|
||||
spin_unlock_irq(&events_lock);
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
return events_check_enabled;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user