Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael J Wysocki: - Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). - Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. - ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. - cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. - cpu0 cpufreq driver from Shawn Guo. - cpufreq governor fixes related to the relaxing of limit from Michal Pecio. - OMAP cpufreq updates from Axel Lin and Richard Zhao. - cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. - Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. - Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. - System suspend error code path fix from Feng Hong. Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the workqueue fixes conflicting fairly badly with the removal of support for hardware P-state chips. The changes were independent but somewhat intertwined. * tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 cpuidle: rename function name "__cpuidle_register_driver", v2 cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name cpuidle: remove some empty lines PM: Prevent runtime suspend during system resume PM QoS: Use spinlock in the per-device PM QoS constraints code PM / Sleep: use resume event when call dpm_resume_early cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure ACPI / processor: remove pointless variable initialization ACPI / processor: remove unused function parameter cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed properly __init-annotate pm_sysrq_init() cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 ...
This commit is contained in:
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
|
||||
static int async_error;
|
||||
|
||||
/**
|
||||
* device_pm_init - Initialize the PM-related part of a device object.
|
||||
* device_pm_sleep_init - Initialize system suspend-related device fields.
|
||||
* @dev: Device object being initialized.
|
||||
*/
|
||||
void device_pm_init(struct device *dev)
|
||||
void device_pm_sleep_init(struct device *dev)
|
||||
{
|
||||
dev->power.is_prepared = false;
|
||||
dev->power.is_suspended = false;
|
||||
init_completion(&dev->power.completion);
|
||||
complete_all(&dev->power.completion);
|
||||
dev->power.wakeup = NULL;
|
||||
spin_lock_init(&dev->power.lock);
|
||||
pm_runtime_init(dev);
|
||||
INIT_LIST_HEAD(&dev->power.entry);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Out;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Out;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "early power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
bool put = false;
|
||||
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
dpm_wait(dev->parent, async);
|
||||
device_lock(dev);
|
||||
|
||||
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
goto Unlock;
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
put = true;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "power domain ";
|
||||
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
|
||||
Unlock:
|
||||
device_unlock(dev);
|
||||
|
||||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
TRACE_RESUME(error);
|
||||
|
||||
if (put)
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
|
||||
void (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
|
||||
}
|
||||
|
||||
device_unlock(dev);
|
||||
|
||||
pm_runtime_put_sync(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "noirq power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "late power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
|
||||
|
||||
error = dpm_suspend_noirq(state);
|
||||
if (error) {
|
||||
dpm_resume_early(state);
|
||||
dpm_resume_early(resume_event(state));
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
if (async_error)
|
||||
goto Complete;
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
/*
|
||||
* If a device configured to wake up the system from sleep states
|
||||
* has been suspended at run time and there's a resume request pending
|
||||
* for it, this is equivalent to the device signaling wakeup, so the
|
||||
* system suspend operation should be aborted.
|
||||
*/
|
||||
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
|
||||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
pm_runtime_put_sync(dev);
|
||||
async_error = -EBUSY;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Complete;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
Complete:
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
if (error) {
|
||||
pm_runtime_put_sync(dev);
|
||||
if (error)
|
||||
async_error = error;
|
||||
} else if (dev->power.is_suspended) {
|
||||
else if (dev->power.is_suspended)
|
||||
__pm_runtime_disable(dev, false);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
if (dev->power.syscore)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If a device's parent goes into runtime suspend at the wrong time,
|
||||
* it won't be possible to resume the device. To prevent this we
|
||||
* block runtime suspend here, during the prepare phase, and allow
|
||||
* it again during the complete phase.
|
||||
*/
|
||||
pm_runtime_get_noresume(dev);
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||
|
Reference in New Issue
Block a user