iommu/vt-d: Convert IR set_affinity function to remap_ops
The function to set interrupt affinity with interrupt remapping enabled is Intel specific too. So move it to the irq_remap_ops too. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
@@ -2327,71 +2327,6 @@ ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
|
||||
/*
|
||||
* Migrate the IO-APIC irq in the presence of intr-remapping.
|
||||
*
|
||||
* For both level and edge triggered, irq migration is a simple atomic
|
||||
* update(of vector and cpu destination) of IRTE and flush the hardware cache.
|
||||
*
|
||||
* For level triggered, we eliminate the io-apic RTE modification (with the
|
||||
* updated vector information), by using a virtual vector (io-apic pin number).
|
||||
* Real vector that is used for interrupting cpu will be coming from
|
||||
* the interrupt-remapping table entry.
|
||||
*
|
||||
* As the migration is a simple atomic update of IRTE, the same mechanism
|
||||
* is used to migrate MSI irq's in the presence of interrupt-remapping.
|
||||
*/
|
||||
static int
|
||||
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
struct irq_cfg *cfg = data->chip_data;
|
||||
unsigned int dest, irq = data->irq;
|
||||
struct irte irte;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return -EINVAL;
|
||||
|
||||
if (get_irte(irq, &irte))
|
||||
return -EBUSY;
|
||||
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return -EBUSY;
|
||||
|
||||
dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
|
||||
|
||||
irte.vector = cfg->vector;
|
||||
irte.dest_id = IRTE_DEST(dest);
|
||||
|
||||
/*
|
||||
* Atomically updates the IRTE with the new destination, vector
|
||||
* and flushes the interrupt entry cache.
|
||||
*/
|
||||
modify_irte(irq, &irte);
|
||||
|
||||
/*
|
||||
* After this point, all the interrupts will start arriving
|
||||
* at the new destination. So, time to cleanup the previous
|
||||
* vector allocation.
|
||||
*/
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
cpumask_copy(data->affinity, mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int
|
||||
ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
{
|
||||
unsigned vector, me;
|
||||
@@ -2636,7 +2571,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
|
||||
chip->irq_eoi = ir_ack_apic_level;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
chip->irq_set_affinity = ir_ioapic_set_affinity;
|
||||
chip->irq_set_affinity = intr_set_affinity;
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_IRQ_REMAP */
|
||||
@@ -3826,7 +3761,7 @@ void __init setup_ioapic_dest(void)
|
||||
mask = apic->target_cpus();
|
||||
|
||||
if (intr_remapping_enabled)
|
||||
ir_ioapic_set_affinity(idata, mask, false);
|
||||
intr_set_affinity(idata, mask, false);
|
||||
else
|
||||
ioapic_set_affinity(idata, mask, false);
|
||||
}
|
||||
|
Reference in New Issue
Block a user