iommu: Add gfp parameter to io_pgtable_ops->map()
Now the ARM page tables are always allocated by GFP_ATOMIC parameter,
but the iommu_ops->map() function has been added a gfp_t parameter by
commit 781ca2de89
("iommu: Add gfp parameter to iommu_ops::map"),
thus io_pgtable_ops->map() should use the gfp parameter passed from
iommu_ops->map() to allocate page pages, which can avoid wasting the
memory allocators atomic pools for some non-atomic contexts.
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/3093df4cb95497aaf713fca623ce4ecebb197c2e.1591930156.git.baolin.wang@linux.alibaba.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:

committed by
Joerg Roedel

parent
9930264fd9
commit
f34ce7a701
@@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
|
|||||||
while (len) {
|
while (len) {
|
||||||
size_t pgsize = get_pgsize(iova | paddr, len);
|
size_t pgsize = get_pgsize(iova | paddr, len);
|
||||||
|
|
||||||
ops->map(ops, iova, paddr, pgsize, prot);
|
ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
|
||||||
iova += pgsize;
|
iova += pgsize;
|
||||||
paddr += pgsize;
|
paddr += pgsize;
|
||||||
len -= pgsize;
|
len -= pgsize;
|
||||||
|
@@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
if (!ops)
|
if (!ops)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return ops->map(ops, iova, paddr, size, prot);
|
return ops->map(ops, iova, paddr, size, prot, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
|
||||||
|
@@ -1227,7 +1227,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
arm_smmu_rpm_get(smmu);
|
arm_smmu_rpm_get(smmu);
|
||||||
ret = ops->map(ops, iova, paddr, size, prot);
|
ret = ops->map(ops, iova, paddr, size, prot, gfp);
|
||||||
arm_smmu_rpm_put(smmu);
|
arm_smmu_rpm_put(smmu);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
|
|||||||
|
|
||||||
static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
|
static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot,
|
phys_addr_t paddr, size_t size, int prot,
|
||||||
int lvl, arm_v7s_iopte *ptep)
|
int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||||
arm_v7s_iopte pte, *cptep;
|
arm_v7s_iopte pte, *cptep;
|
||||||
@@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
|
|||||||
/* Grab a pointer to the next level */
|
/* Grab a pointer to the next level */
|
||||||
pte = READ_ONCE(*ptep);
|
pte = READ_ONCE(*ptep);
|
||||||
if (!pte) {
|
if (!pte) {
|
||||||
cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
|
cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
|
||||||
if (!cptep)
|
if (!cptep)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Rinse, repeat */
|
/* Rinse, repeat */
|
||||||
return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
|
return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
|
static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot)
|
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
||||||
struct io_pgtable *iop = &data->iop;
|
struct io_pgtable *iop = &data->iop;
|
||||||
@@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
|
|||||||
paddr >= (1ULL << data->iop.cfg.oas)))
|
paddr >= (1ULL << data->iop.cfg.oas)))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
|
ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
|
||||||
/*
|
/*
|
||||||
* Synchronise all PTE updates for the new mapping before there's
|
* Synchronise all PTE updates for the new mapping before there's
|
||||||
* a chance for anything to kick off a table walk for the new iova.
|
* a chance for anything to kick off a table walk for the new iova.
|
||||||
@@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void)
|
|||||||
if (ops->map(ops, iova, iova, size, IOMMU_READ |
|
if (ops->map(ops, iova, iova, size, IOMMU_READ |
|
||||||
IOMMU_WRITE |
|
IOMMU_WRITE |
|
||||||
IOMMU_NOEXEC |
|
IOMMU_NOEXEC |
|
||||||
IOMMU_CACHE))
|
IOMMU_CACHE, GFP_KERNEL))
|
||||||
return __FAIL(ops);
|
return __FAIL(ops);
|
||||||
|
|
||||||
/* Overlapping mappings */
|
/* Overlapping mappings */
|
||||||
if (!ops->map(ops, iova, iova + size, size,
|
if (!ops->map(ops, iova, iova + size, size,
|
||||||
IOMMU_READ | IOMMU_NOEXEC))
|
IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
|
||||||
return __FAIL(ops);
|
return __FAIL(ops);
|
||||||
|
|
||||||
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
||||||
@@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void)
|
|||||||
return __FAIL(ops);
|
return __FAIL(ops);
|
||||||
|
|
||||||
/* Remap of partial unmap */
|
/* Remap of partial unmap */
|
||||||
if (ops->map(ops, iova_start + size, size, size, IOMMU_READ))
|
if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL))
|
||||||
return __FAIL(ops);
|
return __FAIL(ops);
|
||||||
|
|
||||||
if (ops->iova_to_phys(ops, iova_start + size + 42)
|
if (ops->iova_to_phys(ops, iova_start + size + 42)
|
||||||
@@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void)
|
|||||||
return __FAIL(ops);
|
return __FAIL(ops);
|
||||||
|
|
||||||
/* Remap full block */
|
/* Remap full block */
|
||||||
if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
|
if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
|
||||||
return __FAIL(ops);
|
return __FAIL(ops);
|
||||||
|
|
||||||
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
||||||
|
@@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
|
|||||||
|
|
||||||
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
|
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
|
||||||
int lvl, arm_lpae_iopte *ptep)
|
int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
|
||||||
{
|
{
|
||||||
arm_lpae_iopte *cptep, pte;
|
arm_lpae_iopte *cptep, pte;
|
||||||
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
|
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
|
||||||
@@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
|||||||
/* Grab a pointer to the next level */
|
/* Grab a pointer to the next level */
|
||||||
pte = READ_ONCE(*ptep);
|
pte = READ_ONCE(*ptep);
|
||||||
if (!pte) {
|
if (!pte) {
|
||||||
cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
|
cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
|
||||||
if (!cptep)
|
if (!cptep)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Rinse, repeat */
|
/* Rinse, repeat */
|
||||||
return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
|
return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
||||||
@@ -461,7 +461,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
|
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int iommu_prot)
|
phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
|
||||||
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
struct io_pgtable_cfg *cfg = &data->iop.cfg;
|
||||||
@@ -483,7 +483,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
|
|||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
prot = arm_lpae_prot_to_pte(data, iommu_prot);
|
prot = arm_lpae_prot_to_pte(data, iommu_prot);
|
||||||
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
|
ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
|
||||||
/*
|
/*
|
||||||
* Synchronise all PTE updates for the new mapping before there's
|
* Synchronise all PTE updates for the new mapping before there's
|
||||||
* a chance for anything to kick off a table walk for the new iova.
|
* a chance for anything to kick off a table walk for the new iova.
|
||||||
@@ -1178,12 +1178,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
|
|||||||
if (ops->map(ops, iova, iova, size, IOMMU_READ |
|
if (ops->map(ops, iova, iova, size, IOMMU_READ |
|
||||||
IOMMU_WRITE |
|
IOMMU_WRITE |
|
||||||
IOMMU_NOEXEC |
|
IOMMU_NOEXEC |
|
||||||
IOMMU_CACHE))
|
IOMMU_CACHE, GFP_KERNEL))
|
||||||
return __FAIL(ops, i);
|
return __FAIL(ops, i);
|
||||||
|
|
||||||
/* Overlapping mappings */
|
/* Overlapping mappings */
|
||||||
if (!ops->map(ops, iova, iova + size, size,
|
if (!ops->map(ops, iova, iova + size, size,
|
||||||
IOMMU_READ | IOMMU_NOEXEC))
|
IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
|
||||||
return __FAIL(ops, i);
|
return __FAIL(ops, i);
|
||||||
|
|
||||||
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
||||||
@@ -1198,7 +1198,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
|
|||||||
return __FAIL(ops, i);
|
return __FAIL(ops, i);
|
||||||
|
|
||||||
/* Remap of partial unmap */
|
/* Remap of partial unmap */
|
||||||
if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
|
if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
|
||||||
return __FAIL(ops, i);
|
return __FAIL(ops, i);
|
||||||
|
|
||||||
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
|
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
|
||||||
@@ -1216,7 +1216,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
|
|||||||
return __FAIL(ops, i);
|
return __FAIL(ops, i);
|
||||||
|
|
||||||
/* Remap full block */
|
/* Remap full block */
|
||||||
if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
|
if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
|
||||||
return __FAIL(ops, i);
|
return __FAIL(ops, i);
|
||||||
|
|
||||||
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
|
||||||
|
@@ -687,7 +687,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
|
|||||||
if (!domain)
|
if (!domain)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
return domain->iop->map(domain->iop, iova, paddr, size, prot);
|
return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
|
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
|
||||||
|
@@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock_irqsave(&priv->pgtlock, flags);
|
spin_lock_irqsave(&priv->pgtlock, flags);
|
||||||
ret = priv->iop->map(priv->iop, iova, pa, len, prot);
|
ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
|
||||||
spin_unlock_irqrestore(&priv->pgtlock, flags);
|
spin_unlock_irqrestore(&priv->pgtlock, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -397,7 +397,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
paddr |= BIT_ULL(32);
|
paddr |= BIT_ULL(32);
|
||||||
|
|
||||||
/* Synchronize with the tlb_lock */
|
/* Synchronize with the tlb_lock */
|
||||||
return dom->iop->map(dom->iop, iova, paddr, size, prot);
|
return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
|
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
|
||||||
|
@@ -441,7 +441,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
|
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
|
||||||
ret = ops->map(ops, iova, paddr, size, prot);
|
ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
|
||||||
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
|
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@@ -155,7 +155,7 @@ struct io_pgtable_cfg {
|
|||||||
*/
|
*/
|
||||||
struct io_pgtable_ops {
|
struct io_pgtable_ops {
|
||||||
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
|
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
|
||||||
phys_addr_t paddr, size_t size, int prot);
|
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
|
||||||
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
|
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
|
||||||
size_t size, struct iommu_iotlb_gather *gather);
|
size_t size, struct iommu_iotlb_gather *gather);
|
||||||
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
|
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
|
||||||
|
Reference in New Issue
Block a user