iommu/io-pgtable: Replace ->tlb_add_flush() with ->tlb_add_page()

The ->tlb_add_flush() callback in the io-pgtable API now looks a bit
silly:

  - It takes a size and a granule, which are always the same
  - It takes a 'bool leaf', which is always true
  - It only ever flushes a single page

With that in mind, replace it with an optional ->tlb_add_page() callback
that drops the useless parameters.

Signed-off-by: Will Deacon <will@kernel.org>
此提交包含在:
Will Deacon
2019-07-02 16:44:41 +01:00
父節點 10b7a7d912
當前提交 abfd6fe0cd
共有 10 個檔案被更改,包括 105 行新增71 行删除

查看文件

@@ -248,10 +248,16 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_BYPASS,
};
struct arm_smmu_flush_ops {
struct iommu_flush_ops tlb;
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
bool leaf, void *cookie)
};
struct arm_smmu_domain {
struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops;
const struct iommu_flush_ops *tlb_ops;
const struct arm_smmu_flush_ops *flush_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
bool non_strict;
@@ -551,42 +557,62 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie);
smmu_domain->tlb_ops->tlb_sync(cookie);
ops->tlb_inv_range(iova, size, granule, false, cookie);
ops->tlb.tlb_sync(cookie);
}
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie);
smmu_domain->tlb_ops->tlb_sync(cookie);
ops->tlb_inv_range(iova, size, granule, true, cookie);
ops->tlb.tlb_sync(cookie);
}
static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync_context,
static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule,
void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops;
ops->tlb_inv_range(iova, granule, granule, true, cookie);
}
static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_add_page,
.tlb_sync = arm_smmu_tlb_sync_context,
},
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
};
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync_context,
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_add_page,
.tlb_sync = arm_smmu_tlb_sync_context,
},
.tlb_inv_range = arm_smmu_tlb_inv_range_nosync,
};
static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync,
.tlb_sync = arm_smmu_tlb_sync_vmid,
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page = arm_smmu_tlb_add_page,
.tlb_sync = arm_smmu_tlb_sync_vmid,
},
.tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
};
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -866,7 +892,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL);
oas = min(oas, 32UL);
}
smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -886,9 +912,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = min(oas, 40UL);
}
if (smmu->version == ARM_SMMU_V2)
smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
else
smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
break;
default:
ret = -EINVAL;
@@ -917,7 +943,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
.tlb = smmu_domain->tlb_ops,
.tlb = &smmu_domain->flush_ops->tlb,
.iommu_dev = smmu->dev,
};
@@ -1346,9 +1372,9 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
if (smmu_domain->tlb_ops) {
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}
@@ -1359,9 +1385,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
if (smmu_domain->tlb_ops) {
if (smmu_domain->flush_ops) {
arm_smmu_rpm_get(smmu);
smmu_domain->tlb_ops->tlb_sync(smmu_domain);
smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain);
arm_smmu_rpm_put(smmu);
}
}