Merge branch 'dt/gic' into next/dt
Conflicts: arch/arm/include/asm/localtimer.h arch/arm/mach-msm/board-msm8x60.c arch/arm/mach-omap2/board-generic.c
This commit is contained in:
109
include/linux/cpu_pm.h
Normal file
109
include/linux/cpu_pm.h
Normal file
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright (C) 2011 Google, Inc.
|
||||
*
|
||||
* Author:
|
||||
* Colin Cross <ccross@android.com>
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CPU_PM_H
|
||||
#define _LINUX_CPU_PM_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
/*
|
||||
* When a CPU goes to a low power state that turns off power to the CPU's
|
||||
* power domain, the contents of some blocks (floating point coprocessors,
|
||||
* interrupt controllers, caches, timers) in the same power domain can
|
||||
* be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
|
||||
* and hotplug implementations to notify the drivers for these blocks that
|
||||
* they may be reset.
|
||||
*
|
||||
* All cpu_pm notifications must be called with interrupts disabled.
|
||||
*
|
||||
* The notifications are split into two classes: CPU notifications and CPU
|
||||
* cluster notifications.
|
||||
*
|
||||
* CPU notifications apply to a single CPU and must be called on the affected
|
||||
* CPU. They are used to save per-cpu context for affected blocks.
|
||||
*
|
||||
* CPU cluster notifications apply to all CPUs in a single power domain. They
|
||||
* are used to save any global context for affected blocks, and must be called
|
||||
* after all the CPUs in the power domain have been notified of the low power
|
||||
* state.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Event codes passed as unsigned long val to notifier calls
|
||||
*/
|
||||
enum cpu_pm_event {
|
||||
/* A single cpu is entering a low power state */
|
||||
CPU_PM_ENTER,
|
||||
|
||||
/* A single cpu failed to enter a low power state */
|
||||
CPU_PM_ENTER_FAILED,
|
||||
|
||||
/* A single cpu is exiting a low power state */
|
||||
CPU_PM_EXIT,
|
||||
|
||||
/* A cpu power domain is entering a low power state */
|
||||
CPU_CLUSTER_PM_ENTER,
|
||||
|
||||
/* A cpu power domain failed to enter a low power state */
|
||||
CPU_CLUSTER_PM_ENTER_FAILED,
|
||||
|
||||
/* A cpu power domain is exiting a low power state */
|
||||
CPU_CLUSTER_PM_EXIT,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_PM
|
||||
int cpu_pm_register_notifier(struct notifier_block *nb);
|
||||
int cpu_pm_unregister_notifier(struct notifier_block *nb);
|
||||
int cpu_pm_enter(void);
|
||||
int cpu_pm_exit(void);
|
||||
int cpu_cluster_pm_enter(void);
|
||||
int cpu_cluster_pm_exit(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int cpu_pm_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpu_pm_enter(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpu_pm_exit(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpu_cluster_pm_enter(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpu_cluster_pm_exit(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
@@ -95,6 +95,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
|
||||
* @flags: flags (see IRQF_* above)
|
||||
* @name: name of the device
|
||||
* @dev_id: cookie to identify the device
|
||||
* @percpu_dev_id: cookie to identify the device
|
||||
* @next: pointer to the next irqaction for shared interrupts
|
||||
* @irq: interrupt number
|
||||
* @dir: pointer to the proc/irq/NN/name entry
|
||||
@@ -104,17 +105,18 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
|
||||
* @thread_mask: bitmask for keeping track of @thread activity
|
||||
*/
|
||||
struct irqaction {
|
||||
irq_handler_t handler;
|
||||
unsigned long flags;
|
||||
void *dev_id;
|
||||
struct irqaction *next;
|
||||
int irq;
|
||||
irq_handler_t thread_fn;
|
||||
struct task_struct *thread;
|
||||
unsigned long thread_flags;
|
||||
unsigned long thread_mask;
|
||||
const char *name;
|
||||
struct proc_dir_entry *dir;
|
||||
irq_handler_t handler;
|
||||
unsigned long flags;
|
||||
void *dev_id;
|
||||
void __percpu *percpu_dev_id;
|
||||
struct irqaction *next;
|
||||
int irq;
|
||||
irq_handler_t thread_fn;
|
||||
struct task_struct *thread;
|
||||
unsigned long thread_flags;
|
||||
unsigned long thread_mask;
|
||||
const char *name;
|
||||
struct proc_dir_entry *dir;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
extern irqreturn_t no_action(int cpl, void *dev_id);
|
||||
@@ -136,6 +138,10 @@ extern int __must_check
|
||||
request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
||||
unsigned long flags, const char *name, void *dev_id);
|
||||
|
||||
extern int __must_check
|
||||
request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||
const char *devname, void __percpu *percpu_dev_id);
|
||||
|
||||
extern void exit_irq_thread(void);
|
||||
#else
|
||||
|
||||
@@ -164,10 +170,18 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
||||
return request_irq(irq, handler, flags, name, dev_id);
|
||||
}
|
||||
|
||||
static inline int __must_check
|
||||
request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||
const char *devname, void __percpu *percpu_dev_id)
|
||||
{
|
||||
return request_irq(irq, handler, 0, devname, percpu_dev_id);
|
||||
}
|
||||
|
||||
static inline void exit_irq_thread(void) { }
|
||||
#endif
|
||||
|
||||
extern void free_irq(unsigned int, void *);
|
||||
extern void free_percpu_irq(unsigned int, void __percpu *);
|
||||
|
||||
struct device;
|
||||
|
||||
@@ -207,7 +221,9 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
|
||||
|
||||
extern void disable_irq_nosync(unsigned int irq);
|
||||
extern void disable_irq(unsigned int irq);
|
||||
extern void disable_percpu_irq(unsigned int irq);
|
||||
extern void enable_irq(unsigned int irq);
|
||||
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
|
||||
|
||||
/* The following three functions are for the core kernel use only. */
|
||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
||||
|
@@ -66,6 +66,7 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data);
|
||||
* IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set)
|
||||
* IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
|
||||
* IRQ_NESTED_TRHEAD - Interrupt nests into another thread
|
||||
* IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
|
||||
*/
|
||||
enum {
|
||||
IRQ_TYPE_NONE = 0x00000000,
|
||||
@@ -88,12 +89,13 @@ enum {
|
||||
IRQ_MOVE_PCNTXT = (1 << 14),
|
||||
IRQ_NESTED_THREAD = (1 << 15),
|
||||
IRQ_NOTHREAD = (1 << 16),
|
||||
IRQ_PER_CPU_DEVID = (1 << 17),
|
||||
};
|
||||
|
||||
#define IRQF_MODIFY_MASK \
|
||||
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
|
||||
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
|
||||
IRQ_PER_CPU | IRQ_NESTED_THREAD)
|
||||
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
|
||||
|
||||
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
|
||||
|
||||
@@ -336,12 +338,14 @@ struct irq_chip {
|
||||
* IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path
|
||||
* IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks
|
||||
* when irq enabled
|
||||
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
|
||||
*/
|
||||
enum {
|
||||
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
|
||||
IRQCHIP_EOI_IF_HANDLED = (1 << 1),
|
||||
IRQCHIP_MASK_ON_SUSPEND = (1 << 2),
|
||||
IRQCHIP_ONOFFLINE_ENABLED = (1 << 3),
|
||||
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
|
||||
};
|
||||
|
||||
/* This include will go away once we isolated irq_desc usage to core code */
|
||||
@@ -365,6 +369,8 @@ enum {
|
||||
struct irqaction;
|
||||
extern int setup_irq(unsigned int irq, struct irqaction *new);
|
||||
extern void remove_irq(unsigned int irq, struct irqaction *act);
|
||||
extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
|
||||
extern void remove_percpu_irq(unsigned int irq, struct irqaction *act);
|
||||
|
||||
extern void irq_cpu_online(void);
|
||||
extern void irq_cpu_offline(void);
|
||||
@@ -392,6 +398,7 @@ extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
|
||||
extern void handle_nested_irq(unsigned int irq);
|
||||
|
||||
@@ -420,6 +427,8 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
|
||||
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
|
||||
}
|
||||
|
||||
extern int irq_set_percpu_devid(unsigned int irq);
|
||||
|
||||
extern void
|
||||
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
const char *name);
|
||||
@@ -481,6 +490,13 @@ static inline void irq_set_nested_thread(unsigned int irq, bool nest)
|
||||
irq_clear_status_flags(irq, IRQ_NESTED_THREAD);
|
||||
}
|
||||
|
||||
static inline void irq_set_percpu_devid_flags(unsigned int irq)
|
||||
{
|
||||
irq_set_status_flags(irq,
|
||||
IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD |
|
||||
IRQ_NOPROBE | IRQ_PER_CPU_DEVID);
|
||||
}
|
||||
|
||||
/* Handle dynamic irq creation and destruction */
|
||||
extern unsigned int create_irq_nr(unsigned int irq_want, int node);
|
||||
extern int create_irq(void);
|
||||
|
@@ -53,6 +53,7 @@ struct irq_desc {
|
||||
unsigned long last_unhandled; /* Aging timer for unhandled count */
|
||||
unsigned int irqs_unhandled;
|
||||
raw_spinlock_t lock;
|
||||
struct cpumask *percpu_enabled;
|
||||
#ifdef CONFIG_SMP
|
||||
const struct cpumask *affinity_hint;
|
||||
struct irq_affinity_notify *affinity_notify;
|
||||
|
@@ -47,6 +47,7 @@ struct irq_domain_ops {
|
||||
* of the irq_domain is responsible for allocating the array of
|
||||
* irq_desc structures.
|
||||
* @nr_irq: Number of irqs managed by the irq domain
|
||||
* @hwirq_base: Starting number for hwirqs managed by the irq domain
|
||||
* @ops: pointer to irq_domain methods
|
||||
* @priv: private data pointer for use by owner. Not touched by irq_domain
|
||||
* core code.
|
||||
@@ -57,6 +58,7 @@ struct irq_domain {
|
||||
struct list_head list;
|
||||
unsigned int irq_base;
|
||||
unsigned int nr_irq;
|
||||
unsigned int hwirq_base;
|
||||
const struct irq_domain_ops *ops;
|
||||
void *priv;
|
||||
struct device_node *of_node;
|
||||
@@ -72,9 +74,21 @@ struct irq_domain {
|
||||
static inline unsigned int irq_domain_to_irq(struct irq_domain *d,
|
||||
unsigned long hwirq)
|
||||
{
|
||||
return d->ops->to_irq ? d->ops->to_irq(d, hwirq) : d->irq_base + hwirq;
|
||||
if (d->ops->to_irq)
|
||||
return d->ops->to_irq(d, hwirq);
|
||||
if (WARN_ON(hwirq < d->hwirq_base))
|
||||
return 0;
|
||||
return d->irq_base + hwirq - d->hwirq_base;
|
||||
}
|
||||
|
||||
#define irq_domain_for_each_hwirq(d, hw) \
|
||||
for (hw = d->hwirq_base; hw < d->hwirq_base + d->nr_irq; hw++)
|
||||
|
||||
#define irq_domain_for_each_irq(d, hw, irq) \
|
||||
for (hw = d->hwirq_base, irq = irq_domain_to_irq(d, hw); \
|
||||
hw < d->hwirq_base + d->nr_irq; \
|
||||
hw++, irq = irq_domain_to_irq(d, hw))
|
||||
|
||||
extern void irq_domain_add(struct irq_domain *domain);
|
||||
extern void irq_domain_del(struct irq_domain *domain);
|
||||
#endif /* CONFIG_IRQ_DOMAIN */
|
||||
|
@@ -33,6 +33,8 @@ struct of_irq {
|
||||
u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
|
||||
};
|
||||
|
||||
typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
|
||||
|
||||
/*
|
||||
* Workarounds only applied to 32bit powermac machines
|
||||
*/
|
||||
@@ -73,6 +75,7 @@ extern int of_irq_to_resource_table(struct device_node *dev,
|
||||
struct resource *res, int nr_irqs);
|
||||
extern struct device_node *of_irq_find_parent(struct device_node *child);
|
||||
|
||||
extern void of_irq_init(const struct of_device_id *matches);
|
||||
|
||||
#endif /* CONFIG_OF_IRQ */
|
||||
#endif /* CONFIG_OF */
|
||||
|
Reference in New Issue
Block a user