irq_work: Add generic hardirq context callbacks

Provide a mechanism that allows running code in IRQ context. It is
most useful for NMI code that needs to interact with the rest of the
system -- like wakeup a task to drain buffers.

Perf currently has such a mechanism, so extract that and provide it as
a generic feature, independent of perf so that others may also
benefit.

The IRQ context callback is generated through self-IPIs where
possible, or on architectures like powerpc the decrementer (the
built-in timer facility) is set to generate an interrupt immediately.

Architectures that don't have anything like this get to do with a
callback from the timer tick. These architectures can call
irq_work_run() at the tail of any IRQ handlers that might enqueue such
work (like the perf IRQ handler) to avoid undue latencies in
processing the work.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Kyle McMartin <kyle@mcmartin.ca>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
[ various fixes ]
Signed-off-by: Huang Ying <ying.huang@intel.com>
LKML-Reference: <1287036094.7768.291.camel@yhuang-dev>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra
2010-10-14 14:01:34 +08:00
committed by Ingo Molnar
parent 8e5fc1a732
commit e360adbe29
39 changed files with 311 additions and 242 deletions

View File

@@ -53,7 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/perf_event.h>
#include <linux/irq_work.h>
#include <asm/trace.h>
#include <asm/io.h>
@@ -493,60 +493,60 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PERF_EVENTS
#ifdef CONFIG_IRQ_WORK
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
static inline unsigned long test_perf_event_pending(void)
static inline unsigned long test_irq_work_pending(void)
{
unsigned long x;
asm volatile("lbz %0,%1(13)"
: "=r" (x)
: "i" (offsetof(struct paca_struct, perf_event_pending)));
: "i" (offsetof(struct paca_struct, irq_work_pending)));
return x;
}
static inline void set_perf_event_pending_flag(void)
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (1),
"i" (offsetof(struct paca_struct, perf_event_pending)));
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
static inline void clear_perf_event_pending(void)
static inline void clear_irq_work_pending(void)
{
asm volatile("stb %0,%1(13)" : :
"r" (0),
"i" (offsetof(struct paca_struct, perf_event_pending)));
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
#else /* 32-bit */
DEFINE_PER_CPU(u8, perf_event_pending);
DEFINE_PER_CPU(u8, irq_work_pending);
#define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1
#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
#endif /* 32 vs 64 bit */
void set_perf_event_pending(void)
void set_irq_work_pending(void)
{
preempt_disable();
set_perf_event_pending_flag();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
#else /* CONFIG_PERF_EVENTS */
#else /* CONFIG_IRQ_WORK */
#define test_perf_event_pending() 0
#define clear_perf_event_pending()
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
#endif /* CONFIG_PERF_EVENTS */
#endif /* CONFIG_IRQ_WORK */
/*
* For iSeries shared processors, we have to let the hypervisor
@@ -587,9 +587,9 @@ void timer_interrupt(struct pt_regs * regs)
calculate_steal_time();
if (test_perf_event_pending()) {
clear_perf_event_pending();
perf_event_do_pending();
if (test_irq_work_pending()) {
clear_irq_work_pending();
irq_work_run();
}
#ifdef CONFIG_PPC_ISERIES