Merge branches 'iommu/fixes', 'arm/omap', 'arm/smmu', 'arm/shmobile', 'x86/amd', 'arm/exynos', 'arm/renesas', 'ppc/pamu' and 'arm/msm' into next
This commit is contained in:

bovenliggende
286f600bc8
645d6c8722
9db4241e83
3dbc260853
365409db5d
720b0cef71
d6a71bf79d
afba256b14
commit
c0981b863a
@@ -178,13 +178,13 @@ config TEGRA_IOMMU_SMMU
|
||||
|
||||
config EXYNOS_IOMMU
|
||||
bool "Exynos IOMMU Support"
|
||||
depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
|
||||
depends on ARCH_EXYNOS
|
||||
select IOMMU_API
|
||||
help
|
||||
Support for the IOMMU(System MMU) of Samsung Exynos application
|
||||
processor family. This enables H/W multimedia accellerators to see
|
||||
non-linear physical memory chunks as a linear memory in their
|
||||
address spaces
|
||||
Support for the IOMMU (System MMU) of Samsung Exynos application
|
||||
processor family. This enables H/W multimedia accelerators to see
|
||||
non-linear physical memory chunks as linear memory in their
|
||||
address space.
|
||||
|
||||
If unsure, say N here.
|
||||
|
||||
@@ -193,9 +193,9 @@ config EXYNOS_IOMMU_DEBUG
|
||||
depends on EXYNOS_IOMMU
|
||||
help
|
||||
Select this to see the detailed log message that shows what
|
||||
happens in the IOMMU driver
|
||||
happens in the IOMMU driver.
|
||||
|
||||
Say N unless you need kernel log message for IOMMU debugging
|
||||
Say N unless you need kernel log message for IOMMU debugging.
|
||||
|
||||
config SHMOBILE_IPMMU
|
||||
bool
|
||||
@@ -272,6 +272,18 @@ config SHMOBILE_IOMMU_L1SIZE
|
||||
default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
|
||||
default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
|
||||
|
||||
config IPMMU_VMSA
|
||||
bool "Renesas VMSA-compatible IPMMU"
|
||||
depends on ARM_LPAE
|
||||
depends on ARCH_SHMOBILE || COMPILE_TEST
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU
|
||||
help
|
||||
Support for the Renesas VMSA-compatible IPMMU Renesas found in the
|
||||
R-Mobile APE6 and R-Car H2/M2 SoCs.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config SPAPR_TCE_IOMMU
|
||||
bool "sPAPR TCE IOMMU Support"
|
||||
depends on PPC_POWERNV || PPC_PSERIES
|
||||
|
@@ -7,6 +7,7 @@ obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
|
||||
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
|
||||
obj-$(CONFIG_DMAR_TABLE) += dmar.o
|
||||
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
|
||||
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
|
||||
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
|
||||
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
|
||||
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
|
||||
|
@@ -3499,8 +3499,6 @@ int __init amd_iommu_init_passthrough(void)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct pci_dev *dev = NULL;
|
||||
struct amd_iommu *iommu;
|
||||
u16 devid;
|
||||
int ret;
|
||||
|
||||
ret = alloc_passthrough_domain();
|
||||
@@ -3514,12 +3512,6 @@ int __init amd_iommu_init_passthrough(void)
|
||||
dev_data = get_dev_data(&dev->dev);
|
||||
dev_data->passthrough = true;
|
||||
|
||||
devid = get_device_id(&dev->dev);
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
if (!iommu)
|
||||
continue;
|
||||
|
||||
attach_device(&dev->dev, pt_domain);
|
||||
}
|
||||
|
||||
@@ -3999,7 +3991,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
|
||||
iommu_flush_dte(iommu, devid);
|
||||
if (devid != alias) {
|
||||
irq_lookup_table[alias] = table;
|
||||
set_dte_irq_entry(devid, table);
|
||||
set_dte_irq_entry(alias, table);
|
||||
iommu_flush_dte(iommu, alias);
|
||||
}
|
||||
|
||||
|
@@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
|
||||
* per device. But we can enable the exclusion range per
|
||||
* device. This is done here
|
||||
*/
|
||||
set_dev_entry_bit(m->devid, DEV_ENTRY_EX);
|
||||
set_dev_entry_bit(devid, DEV_ENTRY_EX);
|
||||
iommu->exclusion_start = m->range_start;
|
||||
iommu->exclusion_length = m->range_length;
|
||||
}
|
||||
|
@@ -45,6 +45,8 @@ struct pri_queue {
|
||||
struct pasid_state {
|
||||
struct list_head list; /* For global state-list */
|
||||
atomic_t count; /* Reference count */
|
||||
atomic_t mmu_notifier_count; /* Counting nested mmu_notifier
|
||||
calls */
|
||||
struct task_struct *task; /* Task bound to this PASID */
|
||||
struct mm_struct *mm; /* mm_struct for the faults */
|
||||
struct mmu_notifier mn; /* mmu_otifier handle */
|
||||
@@ -56,6 +58,8 @@ struct pasid_state {
|
||||
};
|
||||
|
||||
struct device_state {
|
||||
struct list_head list;
|
||||
u16 devid;
|
||||
atomic_t count;
|
||||
struct pci_dev *pdev;
|
||||
struct pasid_state **states;
|
||||
@@ -81,13 +85,9 @@ struct fault {
|
||||
u16 flags;
|
||||
};
|
||||
|
||||
static struct device_state **state_table;
|
||||
static LIST_HEAD(state_list);
|
||||
static spinlock_t state_lock;
|
||||
|
||||
/* List and lock for all pasid_states */
|
||||
static LIST_HEAD(pasid_state_list);
|
||||
static DEFINE_SPINLOCK(ps_lock);
|
||||
|
||||
static struct workqueue_struct *iommu_wq;
|
||||
|
||||
/*
|
||||
@@ -99,7 +99,6 @@ static u64 *empty_page_table;
|
||||
|
||||
static void free_pasid_states(struct device_state *dev_state);
|
||||
static void unbind_pasid(struct device_state *dev_state, int pasid);
|
||||
static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
|
||||
|
||||
static u16 device_id(struct pci_dev *pdev)
|
||||
{
|
||||
@@ -111,13 +110,25 @@ static u16 device_id(struct pci_dev *pdev)
|
||||
return devid;
|
||||
}
|
||||
|
||||
static struct device_state *__get_device_state(u16 devid)
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
|
||||
list_for_each_entry(dev_state, &state_list, list) {
|
||||
if (dev_state->devid == devid)
|
||||
return dev_state;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct device_state *get_device_state(u16 devid)
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
dev_state = state_table[devid];
|
||||
dev_state = __get_device_state(devid);
|
||||
if (dev_state != NULL)
|
||||
atomic_inc(&dev_state->count);
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
@@ -158,29 +169,6 @@ static void put_device_state_wait(struct device_state *dev_state)
|
||||
free_device_state(dev_state);
|
||||
}
|
||||
|
||||
static struct notifier_block profile_nb = {
|
||||
.notifier_call = task_exit,
|
||||
};
|
||||
|
||||
static void link_pasid_state(struct pasid_state *pasid_state)
|
||||
{
|
||||
spin_lock(&ps_lock);
|
||||
list_add_tail(&pasid_state->list, &pasid_state_list);
|
||||
spin_unlock(&ps_lock);
|
||||
}
|
||||
|
||||
static void __unlink_pasid_state(struct pasid_state *pasid_state)
|
||||
{
|
||||
list_del(&pasid_state->list);
|
||||
}
|
||||
|
||||
static void unlink_pasid_state(struct pasid_state *pasid_state)
|
||||
{
|
||||
spin_lock(&ps_lock);
|
||||
__unlink_pasid_state(pasid_state);
|
||||
spin_unlock(&ps_lock);
|
||||
}
|
||||
|
||||
/* Must be called under dev_state->lock */
|
||||
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
|
||||
int pasid, bool alloc)
|
||||
@@ -337,7 +325,6 @@ static void unbind_pasid(struct device_state *dev_state, int pasid)
|
||||
if (pasid_state == NULL)
|
||||
return;
|
||||
|
||||
unlink_pasid_state(pasid_state);
|
||||
__unbind_pasid(pasid_state);
|
||||
put_pasid_state_wait(pasid_state); /* Reference taken in this function */
|
||||
}
|
||||
@@ -379,7 +366,12 @@ static void free_pasid_states(struct device_state *dev_state)
|
||||
continue;
|
||||
|
||||
put_pasid_state(pasid_state);
|
||||
unbind_pasid(dev_state, i);
|
||||
|
||||
/*
|
||||
* This will call the mn_release function and
|
||||
* unbind the PASID
|
||||
*/
|
||||
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
|
||||
}
|
||||
|
||||
if (dev_state->pasid_levels == 2)
|
||||
@@ -443,8 +435,11 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
|
||||
pasid_state = mn_to_state(mn);
|
||||
dev_state = pasid_state->device_state;
|
||||
|
||||
amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
|
||||
__pa(empty_page_table));
|
||||
if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
|
||||
amd_iommu_domain_set_gcr3(dev_state->domain,
|
||||
pasid_state->pasid,
|
||||
__pa(empty_page_table));
|
||||
}
|
||||
}
|
||||
|
||||
static void mn_invalidate_range_end(struct mmu_notifier *mn,
|
||||
@@ -457,11 +452,31 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
|
||||
pasid_state = mn_to_state(mn);
|
||||
dev_state = pasid_state->device_state;
|
||||
|
||||
amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
|
||||
__pa(pasid_state->mm->pgd));
|
||||
if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
|
||||
amd_iommu_domain_set_gcr3(dev_state->domain,
|
||||
pasid_state->pasid,
|
||||
__pa(pasid_state->mm->pgd));
|
||||
}
|
||||
}
|
||||
|
||||
static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct pasid_state *pasid_state;
|
||||
struct device_state *dev_state;
|
||||
|
||||
might_sleep();
|
||||
|
||||
pasid_state = mn_to_state(mn);
|
||||
dev_state = pasid_state->device_state;
|
||||
|
||||
if (pasid_state->device_state->inv_ctx_cb)
|
||||
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
|
||||
|
||||
unbind_pasid(dev_state, pasid_state->pasid);
|
||||
}
|
||||
|
||||
static struct mmu_notifier_ops iommu_mn = {
|
||||
.release = mn_release,
|
||||
.clear_flush_young = mn_clear_flush_young,
|
||||
.change_pte = mn_change_pte,
|
||||
.invalidate_page = mn_invalidate_page,
|
||||
@@ -504,8 +519,10 @@ static void do_fault(struct work_struct *work)
|
||||
|
||||
write = !!(fault->flags & PPR_FAULT_WRITE);
|
||||
|
||||
down_read(&fault->state->mm->mmap_sem);
|
||||
npages = get_user_pages(fault->state->task, fault->state->mm,
|
||||
fault->address, 1, write, 0, &page, NULL);
|
||||
up_read(&fault->state->mm->mmap_sem);
|
||||
|
||||
if (npages == 1) {
|
||||
put_page(page);
|
||||
@@ -604,53 +621,6 @@ static struct notifier_block ppr_nb = {
|
||||
.notifier_call = ppr_notifier,
|
||||
};
|
||||
|
||||
static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
|
||||
{
|
||||
struct pasid_state *pasid_state;
|
||||
struct task_struct *task;
|
||||
|
||||
task = data;
|
||||
|
||||
/*
|
||||
* Using this notifier is a hack - but there is no other choice
|
||||
* at the moment. What I really want is a sleeping notifier that
|
||||
* is called when an MM goes down. But such a notifier doesn't
|
||||
* exist yet. The notifier needs to sleep because it has to make
|
||||
* sure that the device does not use the PASID and the address
|
||||
* space anymore before it is destroyed. This includes waiting
|
||||
* for pending PRI requests to pass the workqueue. The
|
||||
* MMU-Notifiers would be a good fit, but they use RCU and so
|
||||
* they are not allowed to sleep. Lets see how we can solve this
|
||||
* in a more intelligent way in the future.
|
||||
*/
|
||||
again:
|
||||
spin_lock(&ps_lock);
|
||||
list_for_each_entry(pasid_state, &pasid_state_list, list) {
|
||||
struct device_state *dev_state;
|
||||
int pasid;
|
||||
|
||||
if (pasid_state->task != task)
|
||||
continue;
|
||||
|
||||
/* Drop Lock and unbind */
|
||||
spin_unlock(&ps_lock);
|
||||
|
||||
dev_state = pasid_state->device_state;
|
||||
pasid = pasid_state->pasid;
|
||||
|
||||
if (pasid_state->device_state->inv_ctx_cb)
|
||||
dev_state->inv_ctx_cb(dev_state->pdev, pasid);
|
||||
|
||||
unbind_pasid(dev_state, pasid);
|
||||
|
||||
/* Task may be in the list multiple times */
|
||||
goto again;
|
||||
}
|
||||
spin_unlock(&ps_lock);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
|
||||
struct task_struct *task)
|
||||
{
|
||||
@@ -680,6 +650,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
|
||||
goto out;
|
||||
|
||||
atomic_set(&pasid_state->count, 1);
|
||||
atomic_set(&pasid_state->mmu_notifier_count, 0);
|
||||
init_waitqueue_head(&pasid_state->wq);
|
||||
spin_lock_init(&pasid_state->lock);
|
||||
|
||||
@@ -703,8 +674,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
|
||||
if (ret)
|
||||
goto out_clear_state;
|
||||
|
||||
link_pasid_state(pasid_state);
|
||||
|
||||
return 0;
|
||||
|
||||
out_clear_state:
|
||||
@@ -725,6 +694,7 @@ EXPORT_SYMBOL(amd_iommu_bind_pasid);
|
||||
|
||||
void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
|
||||
{
|
||||
struct pasid_state *pasid_state;
|
||||
struct device_state *dev_state;
|
||||
u16 devid;
|
||||
|
||||
@@ -741,7 +711,17 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
|
||||
if (pasid < 0 || pasid >= dev_state->max_pasids)
|
||||
goto out;
|
||||
|
||||
unbind_pasid(dev_state, pasid);
|
||||
pasid_state = get_pasid_state(dev_state, pasid);
|
||||
if (pasid_state == NULL)
|
||||
goto out;
|
||||
/*
|
||||
* Drop reference taken here. We are safe because we still hold
|
||||
* the reference taken in the amd_iommu_bind_pasid function.
|
||||
*/
|
||||
put_pasid_state(pasid_state);
|
||||
|
||||
/* This will call the mn_release function and unbind the PASID */
|
||||
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
|
||||
|
||||
out:
|
||||
put_device_state(dev_state);
|
||||
@@ -771,7 +751,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||||
|
||||
spin_lock_init(&dev_state->lock);
|
||||
init_waitqueue_head(&dev_state->wq);
|
||||
dev_state->pdev = pdev;
|
||||
dev_state->pdev = pdev;
|
||||
dev_state->devid = devid;
|
||||
|
||||
tmp = pasids;
|
||||
for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
|
||||
@@ -801,13 +782,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
if (state_table[devid] != NULL) {
|
||||
if (__get_device_state(devid) != NULL) {
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
ret = -EBUSY;
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
state_table[devid] = dev_state;
|
||||
list_add_tail(&dev_state->list, &state_list);
|
||||
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
|
||||
@@ -839,13 +820,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
|
||||
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
dev_state = state_table[devid];
|
||||
dev_state = __get_device_state(devid);
|
||||
if (dev_state == NULL) {
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
state_table[devid] = NULL;
|
||||
list_del(&dev_state->list);
|
||||
|
||||
spin_unlock_irqrestore(&state_lock, flags);
|
||||
|
||||
@@ -872,7 +853,7 @@ int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
ret = -EINVAL;
|
||||
dev_state = state_table[devid];
|
||||
dev_state = __get_device_state(devid);
|
||||
if (dev_state == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
@@ -903,7 +884,7 @@ int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
|
||||
spin_lock_irqsave(&state_lock, flags);
|
||||
|
||||
ret = -EINVAL;
|
||||
dev_state = state_table[devid];
|
||||
dev_state = __get_device_state(devid);
|
||||
if (dev_state == NULL)
|
||||
goto out_unlock;
|
||||
|
||||
@@ -920,7 +901,6 @@ EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
|
||||
|
||||
static int __init amd_iommu_v2_init(void)
|
||||
{
|
||||
size_t state_table_size;
|
||||
int ret;
|
||||
|
||||
pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
|
||||
@@ -936,16 +916,10 @@ static int __init amd_iommu_v2_init(void)
|
||||
|
||||
spin_lock_init(&state_lock);
|
||||
|
||||
state_table_size = MAX_DEVICES * sizeof(struct device_state *);
|
||||
state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(state_table_size));
|
||||
if (state_table == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = -ENOMEM;
|
||||
iommu_wq = create_workqueue("amd_iommu_v2");
|
||||
if (iommu_wq == NULL)
|
||||
goto out_free;
|
||||
goto out;
|
||||
|
||||
ret = -ENOMEM;
|
||||
empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
|
||||
@@ -953,29 +927,24 @@ static int __init amd_iommu_v2_init(void)
|
||||
goto out_destroy_wq;
|
||||
|
||||
amd_iommu_register_ppr_notifier(&ppr_nb);
|
||||
profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy_wq:
|
||||
destroy_workqueue(iommu_wq);
|
||||
|
||||
out_free:
|
||||
free_pages((unsigned long)state_table, get_order(state_table_size));
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit amd_iommu_v2_exit(void)
|
||||
{
|
||||
struct device_state *dev_state;
|
||||
size_t state_table_size;
|
||||
int i;
|
||||
|
||||
if (!amd_iommu_v2_supported())
|
||||
return;
|
||||
|
||||
profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
|
||||
amd_iommu_unregister_ppr_notifier(&ppr_nb);
|
||||
|
||||
flush_workqueue(iommu_wq);
|
||||
@@ -998,9 +967,6 @@ static void __exit amd_iommu_v2_exit(void)
|
||||
|
||||
destroy_workqueue(iommu_wq);
|
||||
|
||||
state_table_size = MAX_DEVICES * sizeof(struct device_state *);
|
||||
free_pages((unsigned long)state_table, get_order(state_table_size));
|
||||
|
||||
free_page((unsigned long)empty_page_table);
|
||||
}
|
||||
|
||||
|
@@ -1167,7 +1167,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
||||
for (i = 0; i < master->num_streamids; ++i) {
|
||||
u32 idx, s2cr;
|
||||
idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
|
||||
s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
|
||||
s2cr = S2CR_TYPE_TRANS |
|
||||
(smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
|
||||
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
|
||||
}
|
||||
@@ -1381,7 +1381,7 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
|
||||
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn,
|
||||
prot, stage);
|
||||
phys += next - addr;
|
||||
} while (pmd++, addr = next, addr < end);
|
||||
@@ -1499,7 +1499,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||
|
||||
ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
|
||||
arm_smmu_tlb_inv_context(&smmu_domain->root_cfg);
|
||||
return ret ? ret : size;
|
||||
return ret ? 0 : size;
|
||||
}
|
||||
|
||||
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
||||
@@ -1804,7 +1804,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
||||
* allocation (PTRS_PER_PGD).
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
smmu->s1_output_size = min(39UL, size);
|
||||
smmu->s1_output_size = min((unsigned long)VA_BITS, size);
|
||||
#else
|
||||
smmu->s1_output_size = min(32UL, size);
|
||||
#endif
|
||||
|
@@ -152,7 +152,8 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
|
||||
info->seg = pci_domain_nr(dev->bus);
|
||||
info->level = level;
|
||||
if (event == BUS_NOTIFY_ADD_DEVICE) {
|
||||
for (tmp = dev, level--; tmp; tmp = tmp->bus->self) {
|
||||
for (tmp = dev; tmp; tmp = tmp->bus->self) {
|
||||
level--;
|
||||
info->path[level].device = PCI_SLOT(tmp->devfn);
|
||||
info->path[level].function = PCI_FUNC(tmp->devfn);
|
||||
if (pci_is_root_bus(tmp->bus))
|
||||
|
Diff onderdrukt omdat het te groot bestand
Laad Diff
@@ -592,8 +592,7 @@ found_cpu_node:
|
||||
/* advance to next node in cache hierarchy */
|
||||
node = of_find_node_by_phandle(*prop);
|
||||
if (!node) {
|
||||
pr_debug("Invalid node for cache hierarchy %s\n",
|
||||
node->full_name);
|
||||
pr_debug("Invalid node for cache hierarchy\n");
|
||||
return ~(u32)0;
|
||||
}
|
||||
}
|
||||
|
@@ -1009,11 +1009,13 @@ static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
|
||||
if (level == 1)
|
||||
return freelist;
|
||||
|
||||
for (pte = page_address(pg); !first_pte_in_page(pte); pte++) {
|
||||
pte = page_address(pg);
|
||||
do {
|
||||
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
|
||||
freelist = dma_pte_list_pagetables(domain, level - 1,
|
||||
pte, freelist);
|
||||
}
|
||||
pte++;
|
||||
} while (!first_pte_in_page(pte));
|
||||
|
||||
return freelist;
|
||||
}
|
||||
@@ -2235,7 +2237,9 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
|
||||
bridge_devfn = dev_tmp->devfn;
|
||||
}
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
info = dmar_search_domain_by_dev_info(segment, bus, devfn);
|
||||
info = dmar_search_domain_by_dev_info(segment,
|
||||
bridge_bus,
|
||||
bridge_devfn);
|
||||
if (info) {
|
||||
iommu = info->iommu;
|
||||
domain = info->domain;
|
||||
|
1255
drivers/iommu/ipmmu-vmsa.c
Normal file
1255
drivers/iommu/ipmmu-vmsa.c
Normal file
Diff onderdrukt omdat het te groot bestand
Laad Diff
@@ -127,13 +127,12 @@ static void msm_iommu_reset(void __iomem *base, int ncb)
|
||||
|
||||
static int msm_iommu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *r, *r2;
|
||||
struct resource *r;
|
||||
struct clk *iommu_clk;
|
||||
struct clk *iommu_pclk;
|
||||
struct msm_iommu_drvdata *drvdata;
|
||||
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data;
|
||||
void __iomem *regs_base;
|
||||
resource_size_t len;
|
||||
int ret, irq, par;
|
||||
|
||||
if (pdev->id == -1) {
|
||||
@@ -178,35 +177,16 @@ static int msm_iommu_probe(struct platform_device *pdev)
|
||||
iommu_clk = NULL;
|
||||
|
||||
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
|
||||
|
||||
if (!r) {
|
||||
ret = -ENODEV;
|
||||
regs_base = devm_ioremap_resource(&pdev->dev, r);
|
||||
if (IS_ERR(regs_base)) {
|
||||
ret = PTR_ERR(regs_base);
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
len = resource_size(r);
|
||||
|
||||
r2 = request_mem_region(r->start, len, r->name);
|
||||
if (!r2) {
|
||||
pr_err("Could not request memory region: start=%p, len=%d\n",
|
||||
(void *) r->start, len);
|
||||
ret = -EBUSY;
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
regs_base = ioremap(r2->start, len);
|
||||
|
||||
if (!regs_base) {
|
||||
pr_err("Could not ioremap: start=%p, len=%d\n",
|
||||
(void *) r2->start, len);
|
||||
ret = -EBUSY;
|
||||
goto fail_mem;
|
||||
}
|
||||
|
||||
irq = platform_get_irq_byname(pdev, "secure_irq");
|
||||
if (irq < 0) {
|
||||
ret = -ENODEV;
|
||||
goto fail_io;
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
msm_iommu_reset(regs_base, iommu_dev->ncb);
|
||||
@@ -222,14 +202,14 @@ static int msm_iommu_probe(struct platform_device *pdev)
|
||||
if (!par) {
|
||||
pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
|
||||
ret = -ENODEV;
|
||||
goto fail_io;
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
ret = request_irq(irq, msm_iommu_fault_handler, 0,
|
||||
"msm_iommu_secure_irpt_handler", drvdata);
|
||||
if (ret) {
|
||||
pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
|
||||
goto fail_io;
|
||||
goto fail_clk;
|
||||
}
|
||||
|
||||
|
||||
@@ -250,10 +230,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
|
||||
clk_disable(iommu_pclk);
|
||||
|
||||
return 0;
|
||||
fail_io:
|
||||
iounmap(regs_base);
|
||||
fail_mem:
|
||||
release_mem_region(r->start, len);
|
||||
fail_clk:
|
||||
if (iommu_clk) {
|
||||
clk_disable(iommu_clk);
|
||||
|
@@ -94,11 +94,6 @@ static int ipmmu_probe(struct platform_device *pdev)
|
||||
struct resource *res;
|
||||
struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "cannot get platform resources\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL);
|
||||
if (!ipmmu) {
|
||||
dev_err(&pdev->dev, "cannot allocate device data\n");
|
||||
@@ -106,19 +101,18 @@ static int ipmmu_probe(struct platform_device *pdev)
|
||||
}
|
||||
spin_lock_init(&ipmmu->flush_lock);
|
||||
ipmmu->dev = &pdev->dev;
|
||||
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
if (!ipmmu->ipmmu_base) {
|
||||
dev_err(&pdev->dev, "ioremap_nocache failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ipmmu->ipmmu_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ipmmu->ipmmu_base))
|
||||
return PTR_ERR(ipmmu->ipmmu_base);
|
||||
|
||||
ipmmu->dev_names = pdata->dev_names;
|
||||
ipmmu->num_dev_names = pdata->num_dev_names;
|
||||
platform_set_drvdata(pdev, ipmmu);
|
||||
ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */
|
||||
ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */
|
||||
ipmmu_iommu_init(ipmmu);
|
||||
return 0;
|
||||
return ipmmu_iommu_init(ipmmu);
|
||||
}
|
||||
|
||||
static struct platform_driver ipmmu_driver = {
|
||||
|
Verwijs in nieuw issue
Block a user