Merge tag 'iommu-updates-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "The updates are mostly about the x86 IOMMUs this time. Exceptions are the groundwork for the PAMU IOMMU from Freescale (for a PPC platform) and an extension to the IOMMU group interface. On the x86 side this includes a workaround for VT-d to disable interrupt remapping on broken chipsets. On the AMD-Vi side the most important new feature is a kernel command-line interface to override broken information in IVRS ACPI tables and get interrupt remapping working this way. Besides that there are small fixes all over the place." * tag 'iommu-updates-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (24 commits) iommu/tegra: Fix printk formats for dma_addr_t iommu: Add a function to find an iommu group by id iommu/vt-d: Remove warning for HPET scope type iommu: Move swap_pci_ref function to drivers/iommu/pci.h. iommu/vt-d: Disable translation if already enabled iommu/amd: fix error return code in early_amd_iommu_init() iommu/AMD: Per-thread IOMMU Interrupt Handling iommu: Include linux/err.h iommu/amd: Workaround for ERBT1312 iommu/amd: Document ivrs_ioapic and ivrs_hpet parameters iommu/amd: Don't report firmware bugs with cmd-line ivrs overrides iommu/amd: Add ioapic and hpet ivrs override iommu/amd: Add early maps for ioapic and hpet iommu/amd: Extend IVRS special device data structure iommu/amd: Move add_special_device() to __init iommu: Fix compile warnings with forward declarations iommu/amd: Properly initialize irq-table lock iommu/amd: Use AMD specific data structure for irq remapping iommu/amd: Remove map_sg_no_iommu() iommu/vt-d: add quirk for broken interrupt remapping on 55XX chipsets ...
This commit is contained in:
@@ -46,6 +46,7 @@
|
||||
#include "amd_iommu_proto.h"
|
||||
#include "amd_iommu_types.h"
|
||||
#include "irq_remapping.h"
|
||||
#include "pci.h"
|
||||
|
||||
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
||||
|
||||
@@ -263,12 +264,6 @@ static bool check_device(struct device *dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
|
||||
{
|
||||
pci_dev_put(*from);
|
||||
*from = to;
|
||||
}
|
||||
|
||||
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
|
||||
{
|
||||
while (!bus->self) {
|
||||
@@ -701,9 +696,6 @@ retry:
|
||||
static void iommu_poll_events(struct amd_iommu *iommu)
|
||||
{
|
||||
u32 head, tail;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
head = readl(iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
||||
tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
|
||||
@@ -714,8 +706,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
|
||||
}
|
||||
|
||||
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
||||
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
|
||||
@@ -740,17 +730,11 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
|
||||
|
||||
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 head, tail;
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
/* enable ppr interrupts again */
|
||||
writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
@@ -786,34 +770,50 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
||||
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
|
||||
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
|
||||
/*
|
||||
* Release iommu->lock because ppr-handling might need to
|
||||
* re-acquire it
|
||||
*/
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
/* Handle PPR entry */
|
||||
iommu_handle_ppr_entry(iommu, entry);
|
||||
|
||||
spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
/* Refresh ring-buffer information */
|
||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
irqreturn_t amd_iommu_int_thread(int irq, void *data)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
struct amd_iommu *iommu = (struct amd_iommu *) data;
|
||||
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
for_each_iommu(iommu) {
|
||||
iommu_poll_events(iommu);
|
||||
iommu_poll_ppr_log(iommu);
|
||||
while (status & (MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK)) {
|
||||
/* Enable EVT and PPR interrupts again */
|
||||
writel((MMIO_STATUS_EVT_INT_MASK | MMIO_STATUS_PPR_INT_MASK),
|
||||
iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
if (status & MMIO_STATUS_EVT_INT_MASK) {
|
||||
pr_devel("AMD-Vi: Processing IOMMU Event Log\n");
|
||||
iommu_poll_events(iommu);
|
||||
}
|
||||
|
||||
if (status & MMIO_STATUS_PPR_INT_MASK) {
|
||||
pr_devel("AMD-Vi: Processing IOMMU PPR Log\n");
|
||||
iommu_poll_ppr_log(iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hardware bug: ERBT1312
|
||||
* When re-enabling interrupt (by writing 1
|
||||
* to clear the bit), the hardware might also try to set
|
||||
* the interrupt bit in the event status register.
|
||||
* In this scenario, the bit will be set, and disable
|
||||
* subsequent interrupts.
|
||||
*
|
||||
* Workaround: The IOMMU driver should read back the
|
||||
* status register and check if the interrupt bits are cleared.
|
||||
* If not, driver will need to go through the interrupt handler
|
||||
* again and re-clear the bits
|
||||
*/
|
||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@@ -2838,24 +2838,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a special map_sg function which is used if we should map a
|
||||
* device which is not handled by an AMD IOMMU in the system.
|
||||
*/
|
||||
static int map_sg_no_iommu(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, int dir)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
s->dma_address = (dma_addr_t)sg_phys(s);
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
|
||||
return nelems;
|
||||
}
|
||||
|
||||
/*
|
||||
* The exported map_sg function for dma_ops (handles scatter-gather
|
||||
* lists).
|
||||
@@ -2875,9 +2857,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
INC_STATS_COUNTER(cnt_map_sg);
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (PTR_ERR(domain) == -EINVAL)
|
||||
return map_sg_no_iommu(dev, sglist, nelems, dir);
|
||||
else if (IS_ERR(domain))
|
||||
if (IS_ERR(domain))
|
||||
return 0;
|
||||
|
||||
dma_mask = *dev->dma_mask;
|
||||
@@ -3410,7 +3390,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
|
||||
}
|
||||
|
||||
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
||||
unsigned long iova)
|
||||
dma_addr_t iova)
|
||||
{
|
||||
struct protection_domain *domain = dom->priv;
|
||||
unsigned long offset_mask;
|
||||
@@ -3947,6 +3927,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
||||
if (!table)
|
||||
goto out;
|
||||
|
||||
/* Initialize table spin-lock */
|
||||
spin_lock_init(&table->lock);
|
||||
|
||||
if (ioapic)
|
||||
/* Keep the first 32 indexes free for IOAPIC interrupts */
|
||||
table->min_index = 32;
|
||||
@@ -4007,7 +3990,7 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
|
||||
c = 0;
|
||||
|
||||
if (c == count) {
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
|
||||
for (; c != 0; --c)
|
||||
table->table[index - c + 1] = IRTE_ALLOCATED;
|
||||
@@ -4015,9 +3998,9 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
|
||||
index -= count - 1;
|
||||
|
||||
cfg->remapped = 1;
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info->sub_handle = devid;
|
||||
irte_info->irte_index = index;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
irte_info->devid = devid;
|
||||
irte_info->index = index;
|
||||
|
||||
goto out;
|
||||
}
|
||||
@@ -4098,7 +4081,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||
struct io_apic_irq_attr *attr)
|
||||
{
|
||||
struct irq_remap_table *table;
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
struct irq_cfg *cfg;
|
||||
union irte irte;
|
||||
int ioapic_id;
|
||||
@@ -4110,7 +4093,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
ioapic_id = mpc_ioapic_id(attr->ioapic);
|
||||
devid = get_ioapic_devid(ioapic_id);
|
||||
|
||||
@@ -4125,8 +4108,8 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||
|
||||
/* Setup IRQ remapping info */
|
||||
cfg->remapped = 1;
|
||||
irte_info->sub_handle = devid;
|
||||
irte_info->irte_index = index;
|
||||
irte_info->devid = devid;
|
||||
irte_info->index = index;
|
||||
|
||||
/* Setup IRTE for IOMMU */
|
||||
irte.val = 0;
|
||||
@@ -4160,7 +4143,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
|
||||
static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
unsigned int dest, irq;
|
||||
struct irq_cfg *cfg;
|
||||
union irte irte;
|
||||
@@ -4171,12 +4154,12 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
|
||||
cfg = data->chip_data;
|
||||
irq = data->irq;
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return -EINVAL;
|
||||
|
||||
if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte))
|
||||
if (get_irte(irte_info->devid, irte_info->index, &irte))
|
||||
return -EBUSY;
|
||||
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
@@ -4192,7 +4175,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
irte.fields.vector = cfg->vector;
|
||||
irte.fields.destination = dest;
|
||||
|
||||
modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
|
||||
modify_irte(irte_info->devid, irte_info->index, irte);
|
||||
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
@@ -4204,16 +4187,16 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
|
||||
static int free_irq(int irq)
|
||||
{
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
struct irq_cfg *cfg;
|
||||
|
||||
cfg = irq_get_chip_data(irq);
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
|
||||
free_irte(irte_info->sub_handle, irte_info->irte_index);
|
||||
free_irte(irte_info->devid, irte_info->index);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4222,7 +4205,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
|
||||
unsigned int irq, unsigned int dest,
|
||||
struct msi_msg *msg, u8 hpet_id)
|
||||
{
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
struct irq_cfg *cfg;
|
||||
union irte irte;
|
||||
|
||||
@@ -4230,7 +4213,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
|
||||
irte.val = 0;
|
||||
irte.fields.vector = cfg->vector;
|
||||
@@ -4239,11 +4222,11 @@ static void compose_msi_msg(struct pci_dev *pdev,
|
||||
irte.fields.dm = apic->irq_dest_mode;
|
||||
irte.fields.valid = 1;
|
||||
|
||||
modify_irte(irte_info->sub_handle, irte_info->irte_index, irte);
|
||||
modify_irte(irte_info->devid, irte_info->index, irte);
|
||||
|
||||
msg->address_hi = MSI_ADDR_BASE_HI;
|
||||
msg->address_lo = MSI_ADDR_BASE_LO;
|
||||
msg->data = irte_info->irte_index;
|
||||
msg->data = irte_info->index;
|
||||
}
|
||||
|
||||
static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
|
||||
@@ -4268,7 +4251,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
|
||||
static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
||||
int index, int offset)
|
||||
{
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
struct irq_cfg *cfg;
|
||||
u16 devid;
|
||||
|
||||
@@ -4283,18 +4266,18 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
|
||||
return 0;
|
||||
|
||||
devid = get_device_id(&pdev->dev);
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
|
||||
cfg->remapped = 1;
|
||||
irte_info->sub_handle = devid;
|
||||
irte_info->irte_index = index + offset;
|
||||
irte_info->devid = devid;
|
||||
irte_info->index = index + offset;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
{
|
||||
struct irq_2_iommu *irte_info;
|
||||
struct irq_2_irte *irte_info;
|
||||
struct irq_cfg *cfg;
|
||||
int index, devid;
|
||||
|
||||
@@ -4302,7 +4285,7 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
if (!cfg)
|
||||
return -EINVAL;
|
||||
|
||||
irte_info = &cfg->irq_2_iommu;
|
||||
irte_info = &cfg->irq_2_irte;
|
||||
devid = get_hpet_devid(id);
|
||||
if (devid < 0)
|
||||
return devid;
|
||||
@@ -4312,8 +4295,8 @@ static int setup_hpet_msi(unsigned int irq, unsigned int id)
|
||||
return index;
|
||||
|
||||
cfg->remapped = 1;
|
||||
irte_info->sub_handle = devid;
|
||||
irte_info->irte_index = index;
|
||||
irte_info->devid = devid;
|
||||
irte_info->index = index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user