Merge branches 'arm/renesas', 'arm/smmu', 'ppc/pamu', 'x86/vt-d', 'x86/amd' and 'core' into next
This commit is contained in:
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
|
||||
};
|
||||
struct list_head msi_page_list;
|
||||
spinlock_t msi_lock;
|
||||
|
||||
/* Domain for flush queue callback; NULL if flush queue not in use */
|
||||
struct iommu_domain *fq_domain;
|
||||
};
|
||||
|
||||
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
|
||||
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie;
|
||||
struct iommu_domain *domain;
|
||||
|
||||
cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
|
||||
domain = cookie->fq_domain;
|
||||
/*
|
||||
* The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
|
||||
* implies that ops->flush_iotlb_all must be non-NULL.
|
||||
*/
|
||||
domain->ops->flush_iotlb_all(domain);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_dma_init_domain - Initialise a DMA mapping domain
|
||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
||||
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
unsigned long order, base_pfn, end_pfn;
|
||||
int attr;
|
||||
|
||||
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
|
||||
return -EINVAL;
|
||||
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
}
|
||||
|
||||
init_iova_domain(iovad, 1UL << order, base_pfn);
|
||||
|
||||
if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
|
||||
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
|
||||
cookie->fq_domain = domain;
|
||||
init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
|
||||
}
|
||||
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
||||
/* The MSI case is only ever cleaning up its most recent allocation */
|
||||
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
|
||||
cookie->msi_iova -= size;
|
||||
else if (cookie->fq_domain) /* non-strict mode */
|
||||
queue_iova(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad), 0);
|
||||
else
|
||||
free_iova_fast(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad));
|
||||
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
|
||||
dma_addr -= iova_off;
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
|
||||
WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
|
||||
WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
|
||||
if (!cookie->fq_domain)
|
||||
iommu_tlb_sync(domain);
|
||||
iommu_dma_free_iova(cookie, dma_addr, size);
|
||||
}
|
||||
|
||||
@@ -491,7 +521,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
|
||||
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
|
||||
*handle = IOMMU_MAPPING_ERROR;
|
||||
}
|
||||
@@ -518,7 +548,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
unsigned long attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t))
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct page **pages;
|
||||
@@ -606,9 +636,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
size_t size, int prot)
|
||||
size_t size, int prot, struct iommu_domain *domain)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
size_t iova_off = 0;
|
||||
dma_addr_t iova;
|
||||
@@ -632,13 +661,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, int prot)
|
||||
{
|
||||
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
|
||||
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
|
||||
iommu_get_dma_domain(dev));
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -726,7 +756,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
|
||||
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, int prot)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
struct iommu_domain *domain = iommu_get_dma_domain(dev);
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
struct iova_domain *iovad = &cookie->iovad;
|
||||
struct scatterlist *s, *prev = NULL;
|
||||
@@ -811,20 +841,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
sg = tmp;
|
||||
}
|
||||
end = sg_dma_address(sg) + sg_dma_len(sg);
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
|
||||
}
|
||||
|
||||
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
return __iommu_dma_map(dev, phys, size,
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
|
||||
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
|
||||
iommu_get_dma_domain(dev));
|
||||
}
|
||||
|
||||
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
|
||||
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
|
||||
}
|
||||
|
||||
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
@@ -850,7 +881,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
if (!msi_page)
|
||||
return NULL;
|
||||
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot);
|
||||
iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
|
||||
if (iommu_dma_mapping_error(dev, iova))
|
||||
goto out_free_page;
|
||||
|
||||
|
Reference in New Issue
Block a user