Merge branch 'pm-sleep' into pm-for-linus

* pm-sleep: (51 commits)
  PM: Drop generic_subsys_pm_ops
  PM / Sleep: Remove forward-only callbacks from AMBA bus type
  PM / Sleep: Remove forward-only callbacks from platform bus type
  PM: Run the driver callback directly if the subsystem one is not there
  PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers
  PM / Sleep: Merge internal functions in generic_ops.c
  PM / Sleep: Simplify generic system suspend callbacks
  PM / Hibernate: Remove deprecated hibernation snapshot ioctls
  PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled()
  PM / Sleep: Recommend [un]lock_system_sleep() over using pm_mutex directly
  PM / Sleep: Replace mutex_[un]lock(&pm_mutex) with [un]lock_system_sleep()
  PM / Sleep: Make [un]lock_system_sleep() generic
  PM / Sleep: Use the freezer_count() functions in [un]lock_system_sleep() APIs
  PM / Freezer: Remove the "userspace only" constraint from freezer[_do_not]_count()
  PM / Hibernate: Replace unintuitive 'if' condition in kernel/power/user.c with 'else'
  Freezer / sunrpc / NFS: don't allow TASK_KILLABLE sleeps to block the freezer
  PM / Sleep: Unify diagnostic messages from device suspend/resume
  ACPI / PM: Do not save/restore NVS on Asus K54C/K54HR
  PM / Hibernate: Remove deprecated hibernation test modes
  PM / Hibernate: Thaw processes in SNAPSHOT_CREATE_IMAGE ioctl test path
  ...

Conflicts:
	kernel/kmod.c
This commit is contained in:
Rafael J. Wysocki
2011-12-25 23:42:20 +01:00
81 changed files with 769 additions and 1267 deletions

View File

@@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
struct freezer, css);
}
static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
bool cgroup_freezing(struct task_struct *task)
{
enum freezer_state state = task_freezer(task)->state;
return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
}
enum freezer_state state;
bool ret;
int cgroup_freezing_or_frozen(struct task_struct *task)
{
int result;
task_lock(task);
result = __cgroup_freezing_or_frozen(task);
task_unlock(task);
return result;
rcu_read_lock();
state = task_freezer(task)->state;
ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
rcu_read_unlock();
return ret;
}
/*
@@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
* freezer_can_attach():
* cgroup_mutex (held by caller of can_attach)
*
* cgroup_freezing_or_frozen():
* task->alloc_lock (to get task's cgroup)
*
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
* freezer->lock
* sighand->siglock (if the cgroup is freezing)
@@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
* write_lock css_set_lock (cgroup iterator start)
* task->alloc_lock
* read_lock css_set_lock (cgroup iterator start)
* task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
* task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
* sighand->siglock
*/
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
@@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
static void freezer_destroy(struct cgroup_subsys *ss,
struct cgroup *cgroup)
{
kfree(cgroup_freezer(cgroup));
struct freezer *freezer = cgroup_freezer(cgroup);
if (freezer->state != CGROUP_THAWED)
atomic_dec(&system_freezing_cnt);
kfree(freezer);
}
/* task is frozen or will freeze immediately when next it gets woken */
@@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
rcu_read_lock();
if (__cgroup_freezing_or_frozen(tsk)) {
rcu_read_unlock();
return -EBUSY;
}
rcu_read_unlock();
return 0;
return cgroup_freezing(tsk) ? -EBUSY : 0;
}
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
@@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
/* Locking avoids race with FREEZING -> THAWED transitions. */
if (freezer->state == CGROUP_FREEZING)
freeze_task(task, true);
freeze_task(task);
spin_unlock_irq(&freezer->lock);
}
@@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
ntotal++;
if (is_task_frozen_enough(task))
if (freezing(task) && is_task_frozen_enough(task))
nfrozen++;
}
@@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
struct task_struct *task;
unsigned int num_cant_freeze_now = 0;
freezer->state = CGROUP_FREEZING;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
if (!freeze_task(task, true))
if (!freeze_task(task))
continue;
if (is_task_frozen_enough(task))
continue;
@@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
struct task_struct *task;
cgroup_iter_start(cgroup, &it);
while ((task = cgroup_iter_next(cgroup, &it))) {
thaw_process(task);
}
while ((task = cgroup_iter_next(cgroup, &it)))
__thaw_task(task);
cgroup_iter_end(cgroup, &it);
freezer->state = CGROUP_THAWED;
}
static int freezer_change_state(struct cgroup *cgroup,
@@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
spin_lock_irq(&freezer->lock);
update_if_frozen(cgroup, freezer);
if (goal_state == freezer->state)
goto out;
switch (goal_state) {
case CGROUP_THAWED:
if (freezer->state != CGROUP_THAWED)
atomic_dec(&system_freezing_cnt);
freezer->state = CGROUP_THAWED;
unfreeze_cgroup(cgroup, freezer);
break;
case CGROUP_FROZEN:
if (freezer->state == CGROUP_THAWED)
atomic_inc(&system_freezing_cnt);
freezer->state = CGROUP_FREEZING;
retval = try_to_freeze_cgroup(cgroup, freezer);
break;
default:
BUG();
}
out:
spin_unlock_irq(&freezer->lock);
return retval;

View File

@@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
/* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk);
task_unlock(tsk);
mm_update_next_owner(mm);
mmput(mm);
@@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
exit_rcu();
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
schedule();
BUG();
/* Avoid "noreturn function does return". */

View File

@@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
new_flags |= PF_FORKNOEXEC;
new_flags |= PF_STARTING;
p->flags = new_flags;
clear_freeze_flag(p);
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)

View File

@@ -9,101 +9,114 @@
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
/*
* freezing is complete, mark current process as frozen
/* total number of freezing conditions in effect */
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
EXPORT_SYMBOL(system_freezing_cnt);
/* indicate whether PM freezing is in effect, protected by pm_mutex */
bool pm_freezing;
bool pm_nosig_freezing;
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
/**
* freezing_slow_path - slow path for testing whether a task needs to be frozen
* @p: task to be tested
*
* This function is called by freezing() if system_freezing_cnt isn't zero
* and tests whether @p needs to enter and stay in frozen state. Can be
* called under any context. The freezers are responsible for ensuring the
* target tasks see the updated state.
*/
static inline void frozen_process(void)
bool freezing_slow_path(struct task_struct *p)
{
if (!unlikely(current->flags & PF_NOFREEZE)) {
current->flags |= PF_FROZEN;
smp_wmb();
}
clear_freeze_flag(current);
if (p->flags & PF_NOFREEZE)
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
return true;
if (pm_freezing && !(p->flags & PF_KTHREAD))
return true;
return false;
}
EXPORT_SYMBOL(freezing_slow_path);
/* Refrigerator is place where frozen processes are stored :-). */
void refrigerator(void)
bool __refrigerator(bool check_kthr_stop)
{
/* Hmm, should we be allowed to suspend when there are realtime
processes around? */
long save;
bool was_frozen = false;
long save = current->state;
task_lock(current);
if (freezing(current)) {
frozen_process();
task_unlock(current);
} else {
task_unlock(current);
return;
}
save = current->state;
pr_debug("%s entered refrigerator\n", current->comm);
spin_lock_irq(&current->sighand->siglock);
recalc_sigpending(); /* We sent fake signal, clean it up */
spin_unlock_irq(&current->sighand->siglock);
/* prevent accounting of that task to load */
current->flags |= PF_FREEZING;
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!frozen(current))
spin_lock_irq(&freezer_lock);
current->flags |= PF_FROZEN;
if (!freezing(current) ||
(check_kthr_stop && kthread_should_stop()))
current->flags &= ~PF_FROZEN;
spin_unlock_irq(&freezer_lock);
if (!(current->flags & PF_FROZEN))
break;
was_frozen = true;
schedule();
}
/* Remove the accounting blocker */
current->flags &= ~PF_FREEZING;
pr_debug("%s left refrigerator\n", current->comm);
__set_current_state(save);
/*
* Restore saved task state before returning. The mb'd version
* needs to be used; otherwise, it might silently break
* synchronization which depends on ordered task state change.
*/
set_current_state(save);
return was_frozen;
}
EXPORT_SYMBOL(refrigerator);
EXPORT_SYMBOL(__refrigerator);
static void fake_signal_wake_up(struct task_struct *p)
{
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
signal_wake_up(p, 0);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
if (lock_task_sighand(p, &flags)) {
signal_wake_up(p, 0);
unlock_task_sighand(p, &flags);
}
}
/**
* freeze_task - send a freeze request to given task
* @p: task to send the request to
* @sig_only: if set, the request will only be sent if the task has the
* PF_FREEZER_NOSIG flag unset
* Return value: 'false', if @sig_only is set and the task has
* PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
* freeze_task - send a freeze request to given task
* @p: task to send the request to
*
* The freeze request is sent by setting the tasks's TIF_FREEZE flag and
* either sending a fake signal to it or waking it up, depending on whether
* or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
* has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
* TIF_FREEZE flag will not be set.
* If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
* flag and either sending a fake signal to it or waking it up, depending
* on whether it has %PF_FREEZER_NOSIG set.
*
* RETURNS:
* %false, if @p is not freezing or already frozen; %true, otherwise
*/
bool freeze_task(struct task_struct *p, bool sig_only)
bool freeze_task(struct task_struct *p)
{
/*
* We first check if the task is freezing and next if it has already
* been frozen to avoid the race with frozen_process() which first marks
* the task as frozen and next clears its TIF_FREEZE.
*/
if (!freezing(p)) {
smp_rmb();
if (frozen(p))
return false;
unsigned long flags;
if (!sig_only || should_send_signal(p))
set_freeze_flag(p);
else
return false;
spin_lock_irqsave(&freezer_lock, flags);
if (!freezing(p) || frozen(p)) {
spin_unlock_irqrestore(&freezer_lock, flags);
return false;
}
if (should_send_signal(p)) {
if (!(p->flags & PF_KTHREAD)) {
fake_signal_wake_up(p);
/*
* fake_signal_wake_up() goes through p's scheduler
@@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
* TASK_RUNNING transition can't race with task state
* testing in try_to_freeze_tasks().
*/
} else if (sig_only) {
return false;
} else {
wake_up_state(p, TASK_INTERRUPTIBLE);
}
spin_unlock_irqrestore(&freezer_lock, flags);
return true;
}
void cancel_freezing(struct task_struct *p)
void __thaw_task(struct task_struct *p)
{
unsigned long flags;
if (freezing(p)) {
pr_debug(" clean up: %s\n", p->comm);
clear_freeze_flag(p);
spin_lock_irqsave(&p->sighand->siglock, flags);
recalc_sigpending_and_wake(p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
}
static int __thaw_process(struct task_struct *p)
{
if (frozen(p)) {
p->flags &= ~PF_FROZEN;
return 1;
}
clear_freeze_flag(p);
return 0;
}
/*
* Wake up a frozen process
*
* task_lock() is needed to prevent the race with refrigerator() which may
* occur if the freezing of tasks fails. Namely, without the lock, if the
* freezing of tasks failed, thaw_tasks() might have run before a task in
* refrigerator() could call frozen_process(), in which case the task would be
* frozen and no one would thaw it.
*/
int thaw_process(struct task_struct *p)
{
task_lock(p);
if (__thaw_process(p) == 1) {
task_unlock(p);
/*
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
* be visible to @p as waking up implies wmb. Waking up inside
* freezer_lock also prevents wakeups from leaking outside
* refrigerator.
*/
spin_lock_irqsave(&freezer_lock, flags);
if (frozen(p))
wake_up_process(p);
return 1;
}
task_unlock(p);
return 0;
spin_unlock_irqrestore(&freezer_lock, flags);
}
EXPORT_SYMBOL(thaw_process);
/**
* set_freezable - make %current freezable
*
* Mark %current freezable and enter refrigerator if necessary.
*/
bool set_freezable(void)
{
might_sleep();
/*
* Modify flags while holding freezer_lock. This ensures the
* freezer notices that we aren't frozen yet or the freezing
* condition is visible to try_to_freeze() below.
*/
spin_lock_irq(&freezer_lock);
current->flags &= ~PF_NOFREEZE;
spin_unlock_irq(&freezer_lock);
return try_to_freeze();
}
EXPORT_SYMBOL(set_freezable);

View File

@@ -1523,7 +1523,7 @@ int kernel_kexec(void)
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
mutex_lock(&pm_mutex);
lock_system_sleep();
pm_prepare_console();
error = freeze_processes();
if (error) {
@@ -1576,7 +1576,7 @@ int kernel_kexec(void)
thaw_processes();
Restore_console:
pm_restore_console();
mutex_unlock(&pm_mutex);
unlock_system_sleep();
}
#endif

View File

@@ -34,6 +34,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <trace/events/module.h>
@@ -48,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
static DEFINE_SPINLOCK(umh_sysctl_lock);
static DECLARE_RWSEM(umhelper_sem);
#ifdef CONFIG_MODULES
@@ -273,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
* (used for preventing user land processes from being created after the user
* land has been frozen during a system-wide hibernation or suspend operation).
* Should always be manipulated under umhelper_sem acquired for write.
*/
static int usermodehelper_disabled = 1;
@@ -291,6 +296,18 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
*/
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
void read_lock_usermodehelper(void)
{
down_read(&umhelper_sem);
}
EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
void read_unlock_usermodehelper(void)
{
up_read(&umhelper_sem);
}
EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
/**
* usermodehelper_disable - prevent new helpers from being started
*/
@@ -298,8 +315,10 @@ int usermodehelper_disable(void)
{
long retval;
down_write(&umhelper_sem);
usermodehelper_disabled = 1;
smp_mb();
up_write(&umhelper_sem);
/*
* From now on call_usermodehelper_exec() won't start any new
* helpers, so it is sufficient if running_helpers turns out to
@@ -312,7 +331,9 @@ int usermodehelper_disable(void)
if (retval)
return 0;
down_write(&umhelper_sem);
usermodehelper_disabled = 0;
up_write(&umhelper_sem);
return -EAGAIN;
}
@@ -321,7 +342,9 @@ int usermodehelper_disable(void)
*/
void usermodehelper_enable(void)
{
down_write(&umhelper_sem);
usermodehelper_disabled = 0;
up_write(&umhelper_sem);
}
/**

View File

@@ -58,6 +58,31 @@ int kthread_should_stop(void)
}
EXPORT_SYMBOL(kthread_should_stop);
/**
* kthread_freezable_should_stop - should this freezable kthread return now?
* @was_frozen: optional out parameter, indicates whether %current was frozen
*
* kthread_should_stop() for freezable kthreads, which will enter
* refrigerator if necessary. This function is safe from kthread_stop() /
* freezer deadlock and freezable kthreads should use this function instead
* of calling try_to_freeze() directly.
*/
bool kthread_freezable_should_stop(bool *was_frozen)
{
bool frozen = false;
might_sleep();
if (unlikely(freezing(current)))
frozen = __refrigerator(true);
if (was_frozen)
*was_frozen = frozen;
return kthread_should_stop();
}
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
/**
* kthread_data - return data value specified on kthread creation
* @task: kthread task in question
@@ -257,7 +282,7 @@ int kthreadd(void *unused)
set_cpus_allowed_ptr(tsk, cpu_all_mask);
set_mems_allowed(node_states[N_HIGH_MEMORY]);
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
current->flags |= PF_NOFREEZE;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);

View File

@@ -43,8 +43,6 @@ int in_suspend __nosavedata;
enum {
HIBERNATION_INVALID,
HIBERNATION_PLATFORM,
HIBERNATION_TEST,
HIBERNATION_TESTPROC,
HIBERNATION_SHUTDOWN,
HIBERNATION_REBOOT,
/* keep last */
@@ -55,7 +53,7 @@ enum {
static int hibernation_mode = HIBERNATION_SHUTDOWN;
static bool freezer_test_done;
bool freezer_test_done;
static const struct platform_hibernation_ops *hibernation_ops;
@@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
WARN_ON(1);
return;
}
mutex_lock(&pm_mutex);
lock_system_sleep();
hibernation_ops = ops;
if (ops)
hibernation_mode = HIBERNATION_PLATFORM;
else if (hibernation_mode == HIBERNATION_PLATFORM)
hibernation_mode = HIBERNATION_SHUTDOWN;
mutex_unlock(&pm_mutex);
unlock_system_sleep();
}
static bool entering_platform_hibernation;
@@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
mdelay(5000);
}
static int hibernation_testmode(int mode)
{
if (hibernation_mode == mode) {
hibernation_debug_sleep();
return 1;
}
return 0;
}
static int hibernation_test(int level)
{
if (pm_test_level == level) {
@@ -114,7 +103,6 @@ static int hibernation_test(int level)
return 0;
}
#else /* !CONFIG_PM_DEBUG */
static int hibernation_testmode(int mode) { return 0; }
static int hibernation_test(int level) { return 0; }
#endif /* !CONFIG_PM_DEBUG */
@@ -278,8 +266,7 @@ static int create_image(int platform_mode)
goto Platform_finish;
error = disable_nonboot_cpus();
if (error || hibernation_test(TEST_CPUS)
|| hibernation_testmode(HIBERNATION_TEST))
if (error || hibernation_test(TEST_CPUS))
goto Enable_cpus;
local_irq_disable();
@@ -333,7 +320,7 @@ static int create_image(int platform_mode)
*/
int hibernation_snapshot(int platform_mode)
{
pm_message_t msg = PMSG_RECOVER;
pm_message_t msg;
int error;
error = platform_begin(platform_mode);
@@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
if (error)
goto Cleanup;
if (hibernation_test(TEST_FREEZER) ||
hibernation_testmode(HIBERNATION_TESTPROC)) {
if (hibernation_test(TEST_FREEZER)) {
/*
* Indicate to the caller that we are returning due to a
@@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
error = dpm_prepare(PMSG_FREEZE);
if (error) {
dpm_complete(msg);
dpm_complete(PMSG_RECOVER);
goto Cleanup;
}
suspend_console();
pm_restrict_gfp_mask();
error = dpm_suspend(PMSG_FREEZE);
if (error)
goto Recover_platform;
if (hibernation_test(TEST_DEVICES))
goto Recover_platform;
if (error || hibernation_test(TEST_DEVICES))
platform_recover(platform_mode);
else
error = create_image(platform_mode);
error = create_image(platform_mode);
/*
* Control returns here (1) after the image has been created or the
* In the case that we call create_image() above, the control
* returns here (1) after the image has been created or the
* image creation has failed and (2) after a successful restore.
*/
Resume_devices:
/* We may need to release the preallocated image pages here. */
if (error || !in_suspend)
swsusp_free();
@@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
platform_end(platform_mode);
return error;
Recover_platform:
platform_recover(platform_mode);
goto Resume_devices;
Cleanup:
swsusp_free();
goto Close;
@@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
static void power_down(void)
{
switch (hibernation_mode) {
case HIBERNATION_TEST:
case HIBERNATION_TESTPROC:
break;
case HIBERNATION_REBOOT:
kernel_restart(NULL);
break;
@@ -611,17 +590,6 @@ static void power_down(void)
while(1);
}
static int prepare_processes(void)
{
int error = 0;
if (freeze_processes()) {
error = -EBUSY;
thaw_processes();
}
return error;
}
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
@@ -629,7 +597,7 @@ int hibernate(void)
{
int error;
mutex_lock(&pm_mutex);
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
@@ -654,7 +622,7 @@ int hibernate(void)
sys_sync();
printk("done.\n");
error = prepare_processes();
error = freeze_processes();
if (error)
goto Finish;
@@ -697,7 +665,7 @@ int hibernate(void)
pm_restore_console();
atomic_inc(&snapshot_device_available);
Unlock:
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return error;
}
@@ -811,11 +779,13 @@ static int software_resume(void)
goto close_finish;
error = create_basic_memory_bitmaps();
if (error)
if (error) {
usermodehelper_enable();
goto close_finish;
}
pr_debug("PM: Preparing processes for restore.\n");
error = prepare_processes();
error = freeze_processes();
if (error) {
swsusp_close(FMODE_READ);
goto Done;
@@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
[HIBERNATION_PLATFORM] = "platform",
[HIBERNATION_SHUTDOWN] = "shutdown",
[HIBERNATION_REBOOT] = "reboot",
[HIBERNATION_TEST] = "test",
[HIBERNATION_TESTPROC] = "testproc",
};
/*
@@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
* Hibernation can be handled in several ways. There are a few different ways
* to put the system into the sleep state: using the platform driver (e.g. ACPI
* or other hibernation_ops), powering it off or rebooting it (for testing
* mostly), or using one of the two available test modes.
* mostly).
*
* The sysfs file /sys/power/disk provides an interface for selecting the
* hibernation mode to use. Reading from this file causes the available modes
* to be printed. There are 5 modes that can be supported:
* to be printed. There are 3 modes that can be supported:
*
* 'platform'
* 'shutdown'
* 'reboot'
* 'test'
* 'testproc'
*
* If a platform hibernation driver is in use, 'platform' will be supported
* and will be used by default. Otherwise, 'shutdown' will be used by default.
@@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
switch (i) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
case HIBERNATION_TEST:
case HIBERNATION_TESTPROC:
break;
case HIBERNATION_PLATFORM:
if (hibernation_ops)
@@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
mutex_lock(&pm_mutex);
lock_system_sleep();
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
if (len == strlen(hibernation_modes[i])
&& !strncmp(buf, hibernation_modes[i], len)) {
@@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
switch (mode) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
case HIBERNATION_TEST:
case HIBERNATION_TESTPROC:
hibernation_mode = mode;
break;
case HIBERNATION_PLATFORM:
@@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
if (!error)
pr_debug("PM: Hibernation mode set to '%s'\n",
hibernation_modes[mode]);
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return error ? error : n;
}
@@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
if (maj != MAJOR(res) || min != MINOR(res))
goto out;
mutex_lock(&pm_mutex);
lock_system_sleep();
swsusp_resume_device = res;
mutex_unlock(&pm_mutex);
unlock_system_sleep();
printk(KERN_INFO "PM: Starting manual resume from disk\n");
noresume = 0;
software_resume();

View File

@@ -3,7 +3,7 @@
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
*
* This file is released under the GPLv2
*
*/
@@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
mutex_lock(&pm_mutex);
lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
@@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
break;
}
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return error ? error : n;
}
@@ -240,7 +240,7 @@ struct kobject *power_kobj;
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
* 'disk' (Suspend-to-Disk).
*
* store() accepts one of those strings, translates it into the
* store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
/* First, check if we are requested to hibernate */
if (len == 4 && !strncmp(buf, "disk", len)) {
error = hibernate();
goto Exit;
goto Exit;
}
#ifdef CONFIG_SUSPEND

View File

@@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
/* kernel/power/hibernate.c */
extern bool freezer_test_done;
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
extern int hibernation_platform_enter(void);

View File

@@ -22,16 +22,7 @@
*/
#define TIMEOUT (20 * HZ)
static inline int freezable(struct task_struct * p)
{
if ((p == current) ||
(p->flags & PF_NOFREEZE) ||
(p->exit_state != 0))
return 0;
return 1;
}
static int try_to_freeze_tasks(bool sig_only)
static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
@@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
end_time = jiffies + TIMEOUT;
if (!sig_only)
if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (frozen(p) || !freezable(p))
continue;
if (!freeze_task(p, sig_only))
if (p == current || !freeze_task(p))
continue;
/*
@@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
if (!sig_only) {
if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
@@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
elapsed_csecs = elapsed_csecs64;
if (todo) {
/* This does not unfreeze processes that are already frozen
* (we have slightly ugly calling convention in that respect,
* and caller must call thaw_processes() if something fails),
* but it cleans up leftover PF_FREEZE requests.
*/
printk("\n");
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
@@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
elapsed_csecs / 100, elapsed_csecs % 100,
todo - wq_busy, wq_busy);
thaw_workqueues();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
if (!wakeup && freezing(p) && !freezer_should_skip(p))
if (!wakeup && !freezer_should_skip(p) &&
p != current && freezing(p) && !frozen(p))
sched_show_task(p);
cancel_freezing(p);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
} else {
@@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
printk("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
printk("done.");
@@ -150,17 +135,22 @@ int freeze_processes(void)
printk("\n");
BUG_ON(in_atomic());
if (error)
thaw_processes();
return error;
}
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_kernel_threads(void)
{
int error;
printk("Freezing remaining freezable tasks ... ");
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
printk("done.");
@@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
printk("\n");
BUG_ON(in_atomic());
if (error)
thaw_processes();
return error;
}
static void thaw_tasks(bool nosig_only)
{
struct task_struct *g, *p;
read_lock(&tasklist_lock);
do_each_thread(g, p) {
if (!freezable(p))
continue;
if (nosig_only && should_send_signal(p))
continue;
if (cgroup_freezing_or_frozen(p))
continue;
thaw_process(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
}
void thaw_processes(void)
{
struct task_struct *g, *p;
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
pm_freezing = false;
pm_nosig_freezing = false;
oom_killer_enable();
printk("Restarting tasks ... ");
thaw_workqueues();
thaw_tasks(true);
thaw_tasks(false);
read_lock(&tasklist_lock);
do_each_thread(g, p) {
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
schedule();
printk("done.\n");
}

View File

@@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
mutex_lock(&pm_mutex);
lock_system_sleep();
suspend_ops = ops;
mutex_unlock(&pm_mutex);
unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
@@ -106,13 +106,11 @@ static int suspend_prepare(void)
goto Finish;
error = suspend_freeze_processes();
if (error) {
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
} else
if (!error)
return 0;
suspend_thaw_processes();
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
usermodehelper_enable();
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);

View File

@@ -30,28 +30,6 @@
#include "power.h"
/*
* NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
* will be removed in the future. They are only preserved here for
* compatibility with existing userland utilities.
*/
#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
#define PMOPS_PREPARE 1
#define PMOPS_ENTER 2
#define PMOPS_FINISH 3
/*
* NOTE: The following ioctl definitions are wrong and have been replaced with
* correct ones. They are only preserved here for compatibility with existing
* userland utilities and will be removed in the future.
*/
#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
#define SNAPSHOT_MINOR 231
@@ -71,7 +49,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
struct snapshot_data *data;
int error;
mutex_lock(&pm_mutex);
lock_system_sleep();
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
@@ -123,7 +101,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->platform_support = 0;
Unlock:
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return error;
}
@@ -132,7 +110,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
mutex_lock(&pm_mutex);
lock_system_sleep();
swsusp_free();
free_basic_memory_bitmaps();
@@ -146,7 +124,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return 0;
}
@@ -158,7 +136,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
mutex_lock(&pm_mutex);
lock_system_sleep();
data = filp->private_data;
if (!data->ready) {
@@ -179,7 +157,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
*offp += res;
Unlock:
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return res;
}
@@ -191,7 +169,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
mutex_lock(&pm_mutex);
lock_system_sleep();
data = filp->private_data;
@@ -208,20 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res > 0)
*offp += res;
unlock:
mutex_unlock(&pm_mutex);
unlock_system_sleep();
return res;
}
static void snapshot_deprecated_ioctl(unsigned int cmd)
{
if (printk_ratelimit())
printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
"be removed soon, update your suspend-to-disk "
"utilities\n",
__builtin_return_address(0), cmd);
}
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
@@ -257,11 +226,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
error = freeze_processes();
if (error) {
thaw_processes();
if (error)
usermodehelper_enable();
}
if (!error)
else
data->frozen = 1;
break;
@@ -274,8 +241,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
data->frozen = 0;
break;
case SNAPSHOT_ATOMIC_SNAPSHOT:
snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_CREATE_IMAGE:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
@@ -283,10 +248,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
}
pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
if (!error)
if (!error) {
error = put_user(in_suspend, (int __user *)arg);
if (!error)
data->ready = 1;
if (!error && !freezer_test_done)
data->ready = 1;
if (freezer_test_done) {
freezer_test_done = false;
thaw_processes();
}
}
break;
case SNAPSHOT_ATOMIC_RESTORE:
@@ -305,8 +275,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
data->ready = 0;
break;
case SNAPSHOT_SET_IMAGE_SIZE:
snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_PREF_IMAGE_SIZE:
image_size = arg;
break;
@@ -321,16 +289,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
error = put_user(size, (loff_t __user *)arg);
break;
case SNAPSHOT_AVAIL_SWAP:
snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_AVAIL_SWAP_SIZE:
size = count_swap_pages(data->swap, 1);
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
case SNAPSHOT_GET_SWAP_PAGE:
snapshot_deprecated_ioctl(cmd);
case SNAPSHOT_ALLOC_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
@@ -353,27 +317,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
free_all_swap_pages(data->swap);
break;
case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
snapshot_deprecated_ioctl(cmd);
if (!swsusp_swap_in_use()) {
/*
* User space encodes device types as two-byte values,
* so we need to recode them
*/
if (old_decode_dev(arg)) {
data->swap = swap_type_of(old_decode_dev(arg),
0, NULL);
if (data->swap < 0)
error = -ENODEV;
} else {
data->swap = -1;
error = -EINVAL;
}
} else {
error = -EPERM;
}
break;
case SNAPSHOT_S2RAM:
if (!data->frozen) {
error = -EPERM;
@@ -396,33 +339,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
error = hibernation_platform_enter();
break;
case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
snapshot_deprecated_ioctl(cmd);
error = -EINVAL;
switch (arg) {
case PMOPS_PREPARE:
data->platform_support = 1;
error = 0;
break;
case PMOPS_ENTER:
if (data->platform_support)
error = hibernation_platform_enter();
break;
case PMOPS_FINISH:
if (data->platform_support)
error = 0;
break;
default:
printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
}
break;
case SNAPSHOT_SET_SWAP_AREA:
if (swsusp_swap_in_use()) {
error = -EPERM;