Merge branch 'arm/smmu' into arm/mediatek

This commit is contained in:
Joerg Roedel
2019-08-30 16:12:10 +02:00
29 changed files with 1965 additions and 1065 deletions

View File

@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H
#include <linux/bitops.h>
#include <linux/iommu.h>
/*
* Public API for use by IOMMU drivers
@@ -17,22 +19,31 @@ enum io_pgtable_fmt {
};
/**
* struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
* struct iommu_flush_ops - IOMMU callbacks for TLB and page table management.
*
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
* @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
* any corresponding page table updates are visible to the
* IOMMU.
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
* @tlb_flush_walk: Synchronously invalidate all intermediate TLB state
* (sometimes referred to as the "walk cache") for a virtual
* address range.
* @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual
* address range.
* @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a
* single page. IOMMUs that cannot batch TLB invalidation
* operations efficiently will typically issue them here, but
* others may decide to update the iommu_iotlb_gather structure
* and defer the invalidation until iommu_tlb_sync() instead.
*
* Note that these can all be called in atomic context and must therefore
* not block.
*/
struct iommu_gather_ops {
struct iommu_flush_ops {
void (*tlb_flush_all)(void *cookie);
void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
bool leaf, void *cookie);
void (*tlb_sync)(void *cookie);
void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
void *cookie);
void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
void *cookie);
void (*tlb_add_page)(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule, void *cookie);
};
/**
@@ -83,7 +94,7 @@ struct io_pgtable_cfg {
unsigned int ias;
unsigned int oas;
bool coherent_walk;
const struct iommu_gather_ops *tlb;
const struct iommu_flush_ops *tlb;
struct device *iommu_dev;
/* Low-level data specific to the table format */
@@ -127,7 +138,7 @@ struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size);
size_t size, struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
};
@@ -183,15 +194,27 @@ static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
iop->cfg.tlb->tlb_flush_all(iop->cookie);
}
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
static inline void
io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
size_t size, size_t granule)
{
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
static inline void
io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
size_t size, size_t granule)
{
iop->cfg.tlb->tlb_sync(iop->cookie);
iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
}
static inline void
io_pgtable_tlb_add_page(struct io_pgtable *iop,
struct iommu_iotlb_gather * gather, unsigned long iova,
size_t granule)
{
if (iop->cfg.tlb->tlb_add_page)
iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
}
/**