Merge branch 'api-features' into x86/vt-d
Esse commit está contido em:
@@ -2608,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
|
||||
/* Everything is mapped - write the right values into s->dma_address */
|
||||
for_each_sg(sglist, s, nelems, i) {
|
||||
s->dma_address += address + s->offset;
|
||||
/*
|
||||
* Add in the remaining piece of the scatter-gather offset that
|
||||
* was masked out when we were determining the physical address
|
||||
* via (sg_phys(s) & PAGE_MASK) earlier.
|
||||
*/
|
||||
s->dma_address += address + (s->offset & ~PAGE_MASK);
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
|
||||
@@ -3164,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
|
||||
return;
|
||||
|
||||
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
||||
int type, prot = 0;
|
||||
size_t length;
|
||||
int prot = 0;
|
||||
|
||||
if (devid < entry->devid_start || devid > entry->devid_end)
|
||||
continue;
|
||||
|
||||
type = IOMMU_RESV_DIRECT;
|
||||
length = entry->address_end - entry->address_start;
|
||||
if (entry->prot & IOMMU_PROT_IR)
|
||||
prot |= IOMMU_READ;
|
||||
if (entry->prot & IOMMU_PROT_IW)
|
||||
prot |= IOMMU_WRITE;
|
||||
if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
|
||||
/* Exclusion range */
|
||||
type = IOMMU_RESV_RESERVED;
|
||||
|
||||
region = iommu_alloc_resv_region(entry->address_start,
|
||||
length, prot,
|
||||
IOMMU_RESV_DIRECT);
|
||||
length, prot, type);
|
||||
if (!region) {
|
||||
dev_err(dev, "Out of memory allocating dm-regions\n");
|
||||
return;
|
||||
|
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
|
||||
if (e == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (m->flags & IVMD_FLAG_EXCL_RANGE)
|
||||
init_exclusion_range(m);
|
||||
|
||||
switch (m->type) {
|
||||
default:
|
||||
kfree(e);
|
||||
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
|
||||
|
||||
while (p < end) {
|
||||
m = (struct ivmd_header *)p;
|
||||
if (m->flags & IVMD_FLAG_EXCL_RANGE)
|
||||
init_exclusion_range(m);
|
||||
else if (m->flags & IVMD_FLAG_UNITY_MAP)
|
||||
if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
|
||||
init_unity_map_range(m);
|
||||
|
||||
p += m->length;
|
||||
|
@@ -374,6 +374,8 @@
|
||||
#define IOMMU_PROT_IR 0x01
|
||||
#define IOMMU_PROT_IW 0x02
|
||||
|
||||
#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
|
||||
|
||||
/* IOMMU capabilities */
|
||||
#define IOMMU_CAP_IOTLB 24
|
||||
#define IOMMU_CAP_NPCACHE 26
|
||||
|
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
||||
u32 pmen;
|
||||
unsigned long flags;
|
||||
|
||||
if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&iommu->register_lock, flags);
|
||||
pmen = readl(iommu->reg + DMAR_PMEN_REG);
|
||||
pmen &= ~DMA_PMEN_EPM;
|
||||
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
||||
|
||||
ctx_lo = context[0].lo;
|
||||
|
||||
sdev->did = domain->iommu_did[iommu->seq_id];
|
||||
sdev->did = FLPT_DEFAULT_DID;
|
||||
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
||||
|
||||
if (!(ctx_lo & CONTEXT_PASIDE)) {
|
||||
|
@@ -160,6 +160,14 @@
|
||||
|
||||
#define ARM_V7S_TCR_PD1 BIT(5)
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
|
||||
#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
|
||||
#else
|
||||
#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
|
||||
#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
|
||||
#endif
|
||||
|
||||
typedef u32 arm_v7s_iopte;
|
||||
|
||||
static bool selftest_running;
|
||||
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
|
||||
void *table = NULL;
|
||||
|
||||
if (lvl == 1)
|
||||
table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
|
||||
table = (void *)__get_free_pages(
|
||||
__GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
|
||||
else if (lvl == 2)
|
||||
table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
|
||||
table = kmem_cache_zalloc(data->l2_tables, gfp);
|
||||
phys = virt_to_phys(table);
|
||||
if (phys != (arm_v7s_iopte)phys)
|
||||
if (phys != (arm_v7s_iopte)phys) {
|
||||
/* Doesn't fit in PTE */
|
||||
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
|
||||
goto out_free;
|
||||
}
|
||||
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
|
||||
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev, dma))
|
||||
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
|
||||
data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
|
||||
ARM_V7S_TABLE_SIZE(2),
|
||||
ARM_V7S_TABLE_SIZE(2),
|
||||
SLAB_CACHE_DMA, NULL);
|
||||
ARM_V7S_TABLE_SLAB_FLAGS, NULL);
|
||||
if (!data->l2_tables)
|
||||
goto out_free_data;
|
||||
|
||||
|
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
||||
|
||||
dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
|
||||
if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
|
||||
dev_warn(dev,
|
||||
"failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
|
||||
iommu_def_domain_type);
|
||||
dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
|
||||
if (dom) {
|
||||
dev_warn(dev,
|
||||
"failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
|
||||
iommu_def_domain_type);
|
||||
}
|
||||
}
|
||||
|
||||
group->default_domain = dom;
|
||||
@@ -2037,3 +2039,203 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
|
||||
|
||||
/*
|
||||
* Per device IOMMU features.
|
||||
*/
|
||||
bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->dev_has_feat)
|
||||
return ops->dev_has_feat(dev, feat);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
|
||||
|
||||
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->dev_enable_feat)
|
||||
return ops->dev_enable_feat(dev, feat);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
|
||||
|
||||
/*
|
||||
* The device drivers should do the necessary cleanups before calling this.
|
||||
* For example, before disabling the aux-domain feature, the device driver
|
||||
* should detach all aux-domains. Otherwise, this will return -EBUSY.
|
||||
*/
|
||||
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->dev_disable_feat)
|
||||
return ops->dev_disable_feat(dev, feat);
|
||||
|
||||
return -EBUSY;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
|
||||
|
||||
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (ops && ops->dev_feat_enabled)
|
||||
return ops->dev_feat_enabled(dev, feat);
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
|
||||
|
||||
/*
|
||||
* Aux-domain specific attach/detach.
|
||||
*
|
||||
* Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
|
||||
* true. Also, as long as domains are attached to a device through this
|
||||
* interface, any tries to call iommu_attach_device() should fail
|
||||
* (iommu_detach_device() can't fail, so we fail when trying to re-attach).
|
||||
* This should make us safe against a device being attached to a guest as a
|
||||
* whole while there are still pasid users on it (aux and sva).
|
||||
*/
|
||||
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (domain->ops->aux_attach_dev)
|
||||
ret = domain->ops->aux_attach_dev(domain, dev);
|
||||
|
||||
if (!ret)
|
||||
trace_attach_device_to_domain(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
|
||||
|
||||
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
if (domain->ops->aux_detach_dev) {
|
||||
domain->ops->aux_detach_dev(domain, dev);
|
||||
trace_detach_device_from_domain(dev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
|
||||
|
||||
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (domain->ops->aux_get_pasid)
|
||||
ret = domain->ops->aux_get_pasid(domain, dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
|
||||
|
||||
/**
|
||||
* iommu_sva_bind_device() - Bind a process address space to a device
|
||||
* @dev: the device
|
||||
* @mm: the mm to bind, caller must hold a reference to it
|
||||
*
|
||||
* Create a bond between device and address space, allowing the device to access
|
||||
* the mm using the returned PASID. If a bond already exists between @device and
|
||||
* @mm, it is returned and an additional reference is taken. Caller must call
|
||||
* iommu_sva_unbind_device() to release each reference.
|
||||
*
|
||||
* iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
|
||||
* initialize the required SVA features.
|
||||
*
|
||||
* On error, returns an ERR_PTR value.
|
||||
*/
|
||||
struct iommu_sva *
|
||||
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
struct iommu_sva *handle = ERR_PTR(-EINVAL);
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (!ops || !ops->sva_bind)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (!group)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
/* Ensure device count and domain don't change while we're binding */
|
||||
mutex_lock(&group->mutex);
|
||||
|
||||
/*
|
||||
* To keep things simple, SVA currently doesn't support IOMMU groups
|
||||
* with more than one device. Existing SVA-capable systems are not
|
||||
* affected by the problems that required IOMMU groups (lack of ACS
|
||||
* isolation, device ID aliasing and other hardware issues).
|
||||
*/
|
||||
if (iommu_group_device_count(group) != 1)
|
||||
goto out_unlock;
|
||||
|
||||
handle = ops->sva_bind(dev, mm, drvdata);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
|
||||
return handle;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
|
||||
|
||||
/**
|
||||
* iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
|
||||
* @handle: the handle returned by iommu_sva_bind_device()
|
||||
*
|
||||
* Put reference to a bond between device and address space. The device should
|
||||
* not be issuing any more transaction for this PASID. All outstanding page
|
||||
* requests for this PASID must have been flushed to the IOMMU.
|
||||
*
|
||||
* Returns 0 on success, or an error value
|
||||
*/
|
||||
void iommu_sva_unbind_device(struct iommu_sva *handle)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
struct device *dev = handle->dev;
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
|
||||
if (!ops || !ops->sva_unbind)
|
||||
return;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (!group)
|
||||
return;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
ops->sva_unbind(handle);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
iommu_group_put(group);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
|
||||
|
||||
int iommu_sva_set_ops(struct iommu_sva *handle,
|
||||
const struct iommu_sva_ops *sva_ops)
|
||||
{
|
||||
if (handle->ops && handle->ops != sva_ops)
|
||||
return -EEXIST;
|
||||
|
||||
handle->ops = sva_ops;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
|
||||
|
||||
int iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||
{
|
||||
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
|
||||
|
||||
if (!ops || !ops->sva_get_pasid)
|
||||
return IOMMU_PASID_INVALID;
|
||||
|
||||
return ops->sva_get_pasid(handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
|
||||
|
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
||||
curr_iova = rb_entry(curr, struct iova, node);
|
||||
} while (curr && new_pfn <= curr_iova->pfn_hi);
|
||||
|
||||
if (limit_pfn < size || new_pfn < iovad->start_pfn)
|
||||
if (limit_pfn < size || new_pfn < iovad->start_pfn) {
|
||||
iovad->max32_alloc_size = size;
|
||||
goto iova32_full;
|
||||
}
|
||||
|
||||
/* pfn_lo will point to size aligned address if size_aligned is set */
|
||||
new->pfn_lo = new_pfn;
|
||||
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
|
||||
return 0;
|
||||
|
||||
iova32_full:
|
||||
iovad->max32_alloc_size = size;
|
||||
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
Referência em uma nova issue
Block a user