Merge branch 'pm-genirq' into acpi-pm
This commit is contained in:
@@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc)
|
||||
return irq_wait_for_poll(desc);
|
||||
}
|
||||
|
||||
static bool irq_may_run(struct irq_desc *desc)
|
||||
{
|
||||
unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
|
||||
|
||||
/*
|
||||
* If the interrupt is not in progress and is not an armed
|
||||
* wakeup interrupt, proceed.
|
||||
*/
|
||||
if (!irqd_has_set(&desc->irq_data, mask))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If the interrupt is an armed wakeup source, mark it pending
|
||||
* and suspended, disable it and notify the pm core about the
|
||||
* event.
|
||||
*/
|
||||
if (irq_pm_check_wakeup(desc))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Handle a potential concurrent poll on a different core.
|
||||
*/
|
||||
return irq_check_poll(desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_simple_irq - Simple and software-decoded IRQs.
|
||||
* @irq: the interrupt number
|
||||
@@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
||||
if (!irq_check_poll(desc))
|
||||
goto out_unlock;
|
||||
if (!irq_may_run(desc))
|
||||
goto out_unlock;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
@@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
raw_spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc);
|
||||
|
||||
if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
||||
if (!irq_check_poll(desc))
|
||||
goto out_unlock;
|
||||
if (!irq_may_run(desc))
|
||||
goto out_unlock;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
@@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
|
||||
if (!irq_check_poll(desc))
|
||||
goto out;
|
||||
if (!irq_may_run(desc))
|
||||
goto out;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
@@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
/*
|
||||
* If we're currently running this IRQ, or its disabled,
|
||||
* we shouldn't process the IRQ. Mark it pending, handle
|
||||
* the necessary masking and go out
|
||||
*/
|
||||
if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
|
||||
irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
|
||||
if (!irq_check_poll(desc)) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_ack_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!irq_may_run(desc)) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_ack_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If its disabled or no action available then mask it and get
|
||||
* out of here.
|
||||
*/
|
||||
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_ack_irq(desc);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
@@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
/*
|
||||
* If we're currently running this IRQ, or its disabled,
|
||||
* we shouldn't process the IRQ. Mark it pending, handle
|
||||
* the necessary masking and go out
|
||||
*/
|
||||
if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
|
||||
irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
|
||||
if (!irq_check_poll(desc)) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_eoi;
|
||||
}
|
||||
|
||||
if (!irq_may_run(desc)) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_eoi;
|
||||
}
|
||||
|
||||
/*
|
||||
* If its disabled or no action available then mask it and get
|
||||
* out of here.
|
||||
*/
|
||||
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
goto out_eoi;
|
||||
}
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
do {
|
||||
|
@@ -63,8 +63,8 @@ enum {
|
||||
|
||||
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
||||
unsigned long flags);
|
||||
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
|
||||
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
||||
extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
|
||||
extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
|
||||
|
||||
extern int irq_startup(struct irq_desc *desc, bool resend);
|
||||
extern void irq_shutdown(struct irq_desc *desc);
|
||||
@@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d
|
||||
__this_cpu_inc(*desc->kstat_irqs);
|
||||
__this_cpu_inc(kstat.irqs_sum);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
bool irq_pm_check_wakeup(struct irq_desc *desc);
|
||||
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
|
||||
void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
|
||||
#else
|
||||
static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; }
|
||||
static inline void
|
||||
irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
|
||||
static inline void
|
||||
irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
|
||||
#endif
|
||||
|
@@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
||||
}
|
||||
#endif
|
||||
|
||||
void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
||||
void __disable_irq(struct irq_desc *desc, unsigned int irq)
|
||||
{
|
||||
if (suspend) {
|
||||
if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
|
||||
return;
|
||||
desc->istate |= IRQS_SUSPENDED;
|
||||
}
|
||||
|
||||
if (!desc->depth++)
|
||||
irq_disable(desc);
|
||||
}
|
||||
@@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq)
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
__disable_irq(desc, irq, false);
|
||||
__disable_irq(desc, irq);
|
||||
irq_put_desc_busunlock(desc, flags);
|
||||
return 0;
|
||||
}
|
||||
@@ -442,20 +436,8 @@ void disable_irq(unsigned int irq)
|
||||
}
|
||||
EXPORT_SYMBOL(disable_irq);
|
||||
|
||||
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
||||
void __enable_irq(struct irq_desc *desc, unsigned int irq)
|
||||
{
|
||||
if (resume) {
|
||||
if (!(desc->istate & IRQS_SUSPENDED)) {
|
||||
if (!desc->action)
|
||||
return;
|
||||
if (!(desc->action->flags & IRQF_FORCE_RESUME))
|
||||
return;
|
||||
/* Pretend that it got disabled ! */
|
||||
desc->depth++;
|
||||
}
|
||||
desc->istate &= ~IRQS_SUSPENDED;
|
||||
}
|
||||
|
||||
switch (desc->depth) {
|
||||
case 0:
|
||||
err_out:
|
||||
@@ -497,7 +479,7 @@ void enable_irq(unsigned int irq)
|
||||
KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
|
||||
goto out;
|
||||
|
||||
__enable_irq(desc, irq, false);
|
||||
__enable_irq(desc, irq);
|
||||
out:
|
||||
irq_put_desc_busunlock(desc, flags);
|
||||
}
|
||||
@@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
new->irq = irq;
|
||||
*old_ptr = new;
|
||||
|
||||
irq_pm_install_action(desc, new);
|
||||
|
||||
/* Reset broken irq detection when installing new handler */
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
@@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
*/
|
||||
if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
|
||||
desc->istate &= ~IRQS_SPURIOUS_DISABLED;
|
||||
__enable_irq(desc, irq, false);
|
||||
__enable_irq(desc, irq);
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
@@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||
/* Found it - now remove it from the list of entries: */
|
||||
*action_ptr = action->next;
|
||||
|
||||
irq_pm_remove_action(desc, action);
|
||||
|
||||
/* If this was the last handler, shut down the IRQ line: */
|
||||
if (!desc->action) {
|
||||
irq_shutdown(desc);
|
||||
|
159
kernel/irq/pm.c
159
kernel/irq/pm.c
@@ -9,17 +9,105 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
bool irq_pm_check_wakeup(struct irq_desc *desc)
|
||||
{
|
||||
if (irqd_is_wakeup_armed(&desc->irq_data)) {
|
||||
irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
|
||||
desc->istate |= IRQS_SUSPENDED | IRQS_PENDING;
|
||||
desc->depth++;
|
||||
irq_disable(desc);
|
||||
pm_system_wakeup();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from __setup_irq() with desc->lock held after @action has
|
||||
* been installed in the action chain.
|
||||
*/
|
||||
void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
desc->nr_actions++;
|
||||
|
||||
if (action->flags & IRQF_FORCE_RESUME)
|
||||
desc->force_resume_depth++;
|
||||
|
||||
WARN_ON_ONCE(desc->force_resume_depth &&
|
||||
desc->force_resume_depth != desc->nr_actions);
|
||||
|
||||
if (action->flags & IRQF_NO_SUSPEND)
|
||||
desc->no_suspend_depth++;
|
||||
|
||||
WARN_ON_ONCE(desc->no_suspend_depth &&
|
||||
desc->no_suspend_depth != desc->nr_actions);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from __free_irq() with desc->lock held after @action has
|
||||
* been removed from the action chain.
|
||||
*/
|
||||
void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
desc->nr_actions--;
|
||||
|
||||
if (action->flags & IRQF_FORCE_RESUME)
|
||||
desc->force_resume_depth--;
|
||||
|
||||
if (action->flags & IRQF_NO_SUSPEND)
|
||||
desc->no_suspend_depth--;
|
||||
}
|
||||
|
||||
static bool suspend_device_irq(struct irq_desc *desc, int irq)
|
||||
{
|
||||
if (!desc->action || desc->no_suspend_depth)
|
||||
return false;
|
||||
|
||||
if (irqd_is_wakeup_set(&desc->irq_data)) {
|
||||
irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
|
||||
/*
|
||||
* We return true here to force the caller to issue
|
||||
* synchronize_irq(). We need to make sure that the
|
||||
* IRQD_WAKEUP_ARMED is visible before we return from
|
||||
* suspend_device_irqs().
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
|
||||
desc->istate |= IRQS_SUSPENDED;
|
||||
__disable_irq(desc, irq);
|
||||
|
||||
/*
|
||||
* Hardware which has no wakeup source configuration facility
|
||||
* requires that the non wakeup interrupts are masked at the
|
||||
* chip level. The chip implementation indicates that with
|
||||
* IRQCHIP_MASK_ON_SUSPEND.
|
||||
*/
|
||||
if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
|
||||
mask_irq(desc);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* suspend_device_irqs - disable all currently enabled interrupt lines
|
||||
*
|
||||
* During system-wide suspend or hibernation device drivers need to be prevented
|
||||
* from receiving interrupts and this function is provided for this purpose.
|
||||
* It marks all interrupt lines in use, except for the timer ones, as disabled
|
||||
* and sets the IRQS_SUSPENDED flag for each of them.
|
||||
* During system-wide suspend or hibernation device drivers need to be
|
||||
* prevented from receiving interrupts and this function is provided
|
||||
* for this purpose.
|
||||
*
|
||||
* So we disable all interrupts and mark them IRQS_SUSPENDED except
|
||||
* for those which are unused, those which are marked as not
|
||||
* suspendable via an interrupt request with the flag IRQF_NO_SUSPEND
|
||||
* set and those which are marked as active wakeup sources.
|
||||
*
|
||||
* The active wakeup sources are handled by the flow handler entry
|
||||
* code which checks for the IRQD_WAKEUP_ARMED flag, suspends the
|
||||
* interrupt and notifies the pm core about the wakeup.
|
||||
*/
|
||||
void suspend_device_irqs(void)
|
||||
{
|
||||
@@ -28,18 +116,36 @@ void suspend_device_irqs(void)
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
unsigned long flags;
|
||||
bool sync;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__disable_irq(desc, irq, true);
|
||||
sync = suspend_device_irq(desc, irq);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
for_each_irq_desc(irq, desc)
|
||||
if (desc->istate & IRQS_SUSPENDED)
|
||||
if (sync)
|
||||
synchronize_irq(irq);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(suspend_device_irqs);
|
||||
|
||||
static void resume_irq(struct irq_desc *desc, int irq)
|
||||
{
|
||||
irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
|
||||
|
||||
if (desc->istate & IRQS_SUSPENDED)
|
||||
goto resume;
|
||||
|
||||
/* Force resume the interrupt? */
|
||||
if (!desc->force_resume_depth)
|
||||
return;
|
||||
|
||||
/* Pretend that it got disabled ! */
|
||||
desc->depth++;
|
||||
resume:
|
||||
desc->istate &= ~IRQS_SUSPENDED;
|
||||
__enable_irq(desc, irq);
|
||||
}
|
||||
|
||||
static void resume_irqs(bool want_early)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
@@ -54,7 +160,7 @@ static void resume_irqs(bool want_early)
|
||||
continue;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__enable_irq(desc, irq, true);
|
||||
resume_irq(desc, irq);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
||||
@@ -93,38 +199,3 @@ void resume_device_irqs(void)
|
||||
resume_irqs(false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(resume_device_irqs);
|
||||
|
||||
/**
|
||||
* check_wakeup_irqs - check if any wake-up interrupts are pending
|
||||
*/
|
||||
int check_wakeup_irqs(void)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
int irq;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
/*
|
||||
* Only interrupts which are marked as wakeup source
|
||||
* and have not been disabled before the suspend check
|
||||
* can abort suspend.
|
||||
*/
|
||||
if (irqd_is_wakeup_set(&desc->irq_data)) {
|
||||
if (desc->depth == 1 && desc->istate & IRQS_PENDING)
|
||||
return -EBUSY;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Check the non wakeup interrupts whether they need
|
||||
* to be masked before finally going into suspend
|
||||
* state. That's for hardware which has no wakeup
|
||||
* source configuration facility. The chip
|
||||
* implementation indicates that with
|
||||
* IRQCHIP_MASK_ON_SUSPEND.
|
||||
*/
|
||||
if (desc->istate & IRQS_SUSPENDED &&
|
||||
irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
|
||||
mask_irq(desc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -129,6 +129,7 @@ int freeze_processes(void)
|
||||
if (!pm_freezing)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
|
||||
pm_wakeup_clear();
|
||||
printk("Freezing user space processes ... ");
|
||||
pm_freezing = true;
|
||||
error = try_to_freeze_tasks(true);
|
||||
|
Reference in New Issue
Block a user