Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (53 commits) iommu/amd: Set IOTLB invalidation timeout iommu/amd: Init stats for iommu=pt iommu/amd: Remove unnecessary cache flushes in amd_iommu_resume iommu/amd: Add invalidate-context call-back iommu/amd: Add amd_iommu_device_info() function iommu/amd: Adapt IOMMU driver to PCI register name changes iommu/amd: Add invalid_ppr callback iommu/amd: Implement notifiers for IOMMUv2 iommu/amd: Implement IO page-fault handler iommu/amd: Add routines to bind/unbind a pasid iommu/amd: Implement device aquisition code for IOMMUv2 iommu/amd: Add driver stub for AMD IOMMUv2 support iommu/amd: Add stat counter for IOMMUv2 events iommu/amd: Add device errata handling iommu/amd: Add function to get IOMMUv2 domain for pdev iommu/amd: Implement function to send PPR completions iommu/amd: Implement functions to manage GCR3 table iommu/amd: Implement IOMMUv2 TLB flushing routines iommu/amd: Add support for IOMMUv2 domain mode iommu/amd: Add amd_iommu_domain_direct_map function ...
This commit is contained in:
@@ -79,6 +79,24 @@
|
||||
#define LEVEL_STRIDE (9)
|
||||
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
|
||||
|
||||
/*
|
||||
* This bitmap is used to advertise the page sizes our hardware support
|
||||
* to the IOMMU core, which will then use this information to split
|
||||
* physically contiguous memory regions it is mapping into page sizes
|
||||
* that we support.
|
||||
*
|
||||
* Traditionally the IOMMU core just handed us the mappings directly,
|
||||
* after making sure the size is an order of a 4KiB page and that the
|
||||
* mapping has natural alignment.
|
||||
*
|
||||
* To retain this behavior, we currently advertise that we support
|
||||
* all page sizes that are an order of 4KiB.
|
||||
*
|
||||
* If at some point we'd like to utilize the IOMMU core's new behavior,
|
||||
* we could change this to advertise the real page sizes we support.
|
||||
*/
|
||||
#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
|
||||
|
||||
static inline int agaw_to_level(int agaw)
|
||||
{
|
||||
return agaw + 2;
|
||||
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
|
||||
|
||||
static int intel_iommu_map(struct iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t hpa,
|
||||
int gfp_order, int iommu_prot)
|
||||
size_t size, int iommu_prot)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
u64 max_addr;
|
||||
int prot = 0;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
if (iommu_prot & IOMMU_READ)
|
||||
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
||||
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
|
||||
prot |= DMA_PTE_SNP;
|
||||
|
||||
size = PAGE_SIZE << gfp_order;
|
||||
max_addr = iova + size;
|
||||
if (dmar_domain->max_addr < max_addr) {
|
||||
u64 end;
|
||||
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, int gfp_order)
|
||||
static size_t intel_iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = domain->priv;
|
||||
size_t size = PAGE_SIZE << gfp_order;
|
||||
int order;
|
||||
|
||||
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
|
||||
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
|
||||
if (dmar_domain->max_addr == iova + size)
|
||||
dmar_domain->max_addr = iova;
|
||||
|
||||
return order;
|
||||
return PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
@@ -4060,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Group numbers are arbitrary. Device with the same group number
|
||||
* indicate the iommu cannot differentiate between them. To avoid
|
||||
* tracking used groups we just use the seg|bus|devfn of the lowest
|
||||
* level we're able to differentiate devices
|
||||
*/
|
||||
static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_dev *bridge;
|
||||
union {
|
||||
struct {
|
||||
u8 devfn;
|
||||
u8 bus;
|
||||
u16 segment;
|
||||
} pci;
|
||||
u32 group;
|
||||
} id;
|
||||
|
||||
if (iommu_no_mapping(dev))
|
||||
return -ENODEV;
|
||||
|
||||
id.pci.segment = pci_domain_nr(pdev->bus);
|
||||
id.pci.bus = pdev->bus->number;
|
||||
id.pci.devfn = pdev->devfn;
|
||||
|
||||
if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
|
||||
return -ENODEV;
|
||||
|
||||
bridge = pci_find_upstream_pcie_bridge(pdev);
|
||||
if (bridge) {
|
||||
if (pci_is_pcie(bridge)) {
|
||||
id.pci.bus = bridge->subordinate->number;
|
||||
id.pci.devfn = 0;
|
||||
} else {
|
||||
id.pci.bus = bridge->bus->number;
|
||||
id.pci.devfn = bridge->devfn;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pdev->is_virtfn && iommu_group_mf)
|
||||
id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
|
||||
|
||||
*groupid = id.group;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops intel_iommu_ops = {
|
||||
.domain_init = intel_iommu_domain_init,
|
||||
.domain_destroy = intel_iommu_domain_destroy,
|
||||
@@ -4069,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
|
||||
.unmap = intel_iommu_unmap,
|
||||
.iova_to_phys = intel_iommu_iova_to_phys,
|
||||
.domain_has_cap = intel_iommu_domain_has_cap,
|
||||
.device_group = intel_iommu_device_group,
|
||||
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
|
||||
};
|
||||
|
||||
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
|
||||
|
Reference in New Issue
Block a user