genirq/affinity: Provide smarter irq spreading infrastructure
The current irq spreading infrastructure is just looking at a cpumask and tries to spread the interrupts over the mask. Thats suboptimal as it does not take numa nodes into account. Change the logic so the interrupts are spread across numa nodes and inside the nodes. If there are more cpus than vectors per node, then we set the affinity to several cpus. If HT siblings are available we take that into account and try to set all siblings to a single vector. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Christoph Hellwig <hch@lst.de> Cc: axboe@fb.com Cc: keith.busch@intel.com Cc: agordeev@redhat.com Cc: linux-block@vger.kernel.org Link: http://lkml.kernel.org/r/1473862739-15032-3-git-send-email-hch@lst.de
This commit is contained in:
@@ -279,6 +279,8 @@ extern int
|
||||
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
|
||||
|
||||
struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs);
|
||||
struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
|
||||
int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
@@ -316,6 +318,19 @@ static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs)
|
||||
*nr_vecs = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct cpumask *
|
||||
irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int
|
||||
irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
|
||||
{
|
||||
return maxvec;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user