Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/bnx2x/bnx2x.h
This commit is contained in:
@@ -3,6 +3,12 @@
|
||||
*/
|
||||
#include <linux/irqdesc.h>
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
|
||||
#else
|
||||
# define IRQ_BITMAP_BITS NR_IRQS
|
||||
#endif
|
||||
|
||||
extern int noirqdebug;
|
||||
|
||||
#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
|
||||
|
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
static DEFINE_MUTEX(sparse_irq_lock);
|
||||
static DECLARE_BITMAP(allocated_irqs, NR_IRQS);
|
||||
static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
@@ -217,6 +217,15 @@ int __init early_irq_init(void)
|
||||
initcnt = arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
|
||||
|
||||
if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
|
||||
nr_irqs = IRQ_BITMAP_BITS;
|
||||
|
||||
if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
|
||||
initcnt = IRQ_BITMAP_BITS;
|
||||
|
||||
if (initcnt > nr_irqs)
|
||||
nr_irqs = initcnt;
|
||||
|
||||
for (i = 0; i < initcnt; i++) {
|
||||
desc = alloc_desc(i, node);
|
||||
set_bit(i, allocated_irqs);
|
||||
|
@@ -1182,7 +1182,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
||||
if (retval)
|
||||
kfree(action);
|
||||
|
||||
#ifdef CONFIG_DEBUG_SHIRQ
|
||||
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
|
||||
if (!retval && (irqflags & IRQF_SHARED)) {
|
||||
/*
|
||||
* It's a shared IRQ -- the driver ought to be prepared for it
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#ifdef CONFIG_HARDIRQS_SW_RESEND
|
||||
|
||||
/* Bitmap to handle software resend of interrupts: */
|
||||
static DECLARE_BITMAP(irqs_resend, NR_IRQS);
|
||||
static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
|
||||
|
||||
/*
|
||||
* Run software resends of IRQ's
|
||||
|
@@ -782,6 +782,10 @@ retry:
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
#define MAX_INTERRUPTS (~0ULL)
|
||||
|
||||
static void perf_log_throttle(struct perf_event *event, int enable);
|
||||
|
||||
static int
|
||||
event_sched_in(struct perf_event *event,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
|
||||
|
||||
event->state = PERF_EVENT_STATE_ACTIVE;
|
||||
event->oncpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Unthrottle events, since we scheduled we might have missed several
|
||||
* ticks already, also for a heavily scheduling task there is little
|
||||
* guarantee it'll get a tick in a timely manner.
|
||||
*/
|
||||
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
|
||||
perf_log_throttle(event, 1);
|
||||
event->hw.interrupts = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The new state must be visible before we turn it on in the hardware:
|
||||
*/
|
||||
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX_INTERRUPTS (~0ULL)
|
||||
|
||||
static void perf_log_throttle(struct perf_event *event, int enable);
|
||||
|
||||
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
||||
{
|
||||
u64 frequency = event->attr.sample_freq;
|
||||
|
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
|
||||
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the broadcast device supports oneshot.
|
||||
*/
|
||||
bool tick_broadcast_oneshot_available(void)
|
||||
{
|
||||
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||
|
||||
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
|
||||
{
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
|
||||
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
|
||||
return 0;
|
||||
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
||||
return 1;
|
||||
return tick_broadcast_oneshot_available();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
|
||||
extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
|
||||
extern int tick_broadcast_oneshot_active(void);
|
||||
extern void tick_check_oneshot_broadcast(int cpu);
|
||||
bool tick_broadcast_oneshot_available(void);
|
||||
# else /* BROADCAST */
|
||||
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
|
||||
{
|
||||
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
|
||||
static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
|
||||
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
||||
static inline void tick_check_oneshot_broadcast(int cpu) { }
|
||||
static inline bool tick_broadcast_oneshot_available(void) { return true; }
|
||||
# endif /* !BROADCAST */
|
||||
|
||||
#else /* !ONESHOT */
|
||||
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
|
||||
return 0;
|
||||
}
|
||||
static inline int tick_broadcast_oneshot_active(void) { return 0; }
|
||||
static inline bool tick_broadcast_oneshot_available(void) { return false; }
|
||||
#endif /* !TICK_ONESHOT */
|
||||
|
||||
/*
|
||||
|
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
|
||||
rwbs[i] = '\0';
|
||||
}
|
||||
|
||||
void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
|
||||
{
|
||||
int rw = rq->cmd_flags & 0x03;
|
||||
int bytes;
|
||||
|
||||
if (rq->cmd_flags & REQ_DISCARD)
|
||||
rw |= REQ_DISCARD;
|
||||
|
||||
if (rq->cmd_flags & REQ_SECURE)
|
||||
rw |= REQ_SECURE;
|
||||
|
||||
bytes = blk_rq_bytes(rq);
|
||||
|
||||
blk_fill_rwbs(rwbs, rw, bytes);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_EVENT_TRACING */
|
||||
|
||||
|
Reference in New Issue
Block a user