Merge branch 'iommu/page-sizes' into x86/amd
Conflicts: drivers/iommu/amd_iommu.c
This commit is contained in:
@@ -78,6 +78,24 @@
|
||||
#define LEVEL_STRIDE (9)
|
||||
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
|
||||
|
||||
/*
|
||||
* This bitmap is used to advertise the page sizes our hardware support
|
||||
* to the IOMMU core, which will then use this information to split
|
||||
* physically contiguous memory regions it is mapping into page sizes
|
||||
* that we support.
|
||||
*
|
||||
* Traditionally the IOMMU core just handed us the mappings directly,
|
||||
* after making sure the size is an order of a 4KiB page and that the
|
||||
* mapping has natural alignment.
|
||||
*
|
||||
* To retain this behavior, we currently advertise that we support
|
||||
* all page sizes that are an order of 4KiB.
|
||||
*
|
||||
* If at some point we'd like to utilize the IOMMU core's new behavior,
|
||||
* we could change this to advertise the real page sizes we support.
|
||||
*/
|
||||
#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
|
||||
|
||||
static inline int agaw_to_level(int agaw)
|
||||
{
|
||||
return agaw + 2;
|
||||
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||
|
||||
static int intel_iommu_map(struct iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t hpa,
|
||||
int gfp_order, int iommu_prot)
|
||||
size_t size, int iommu_prot)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
u64 max_addr;
|
||||
int prot = 0;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (iommu_prot & IOMMU_READ)
|
||||
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
||||
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
|
||||
prot |= DMA_PTE_SNP;
|
||||
|
||||
size = PAGE_SIZE << gfp_order;
|
||||
max_addr = iova + size;
|
||||
if (dmar_domain->max_addr < max_addr) {
|
||||
u64 end;
|
||||
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, int gfp_order)
|
||||
static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
size_t size = PAGE_SIZE << gfp_order;
|
||||
int order;
|
||||
|
||||
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
|
||||
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
|
||||
if (dmar_domain->max_addr == iova + size)
|
||||
dmar_domain->max_addr = iova;
|
||||
|
||||
return order;
|
||||
return PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
|
||||
.unmap = intel_iommu_unmap,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.domain_has_cap = intel_iommu_domain_has_cap,
|
||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
|
||||
|
Reference in New Issue
Block a user