powerpc/iommu: Move tce_xxx callbacks from ppc_md to iommu_table
This adds a iommu_table_ops struct and puts pointer to it into the iommu_table struct. This moves tce_build/tce_free/tce_get/tce_flush callbacks from ppc_md to the new struct where they really belong to. This adds the requirement for @it_ops to be initialized before calling iommu_init_table() to make sure that we do not leave any IOMMU table with iommu_table_ops uninitialized. This is not a parameter of iommu_init_table() though as there will be cases when iommu_init_table() will not be called on TCE tables, for example - VFIO. This does s/tce_build/set/, s/tce_free/clear/ and removes "tce_" redundant prefixes. This removes tce_xxx_rm handlers from ppc_md but does not add them to iommu_table_ops as this will be done later if we decide to support TCE hypercalls in real mode. This removes _vm callbacks as only virtual mode is supported by now so this also removes @rm parameter. For pSeries, this always uses tce_buildmulti_pSeriesLP/ tce_buildmulti_pSeriesLP. This changes multi callback to fall back to tce_build_pSeriesLP/tce_free_pSeriesLP if FW_FEATURE_MULTITCE is not present. The reason for this is we still have to support "multitce=off" boot parameter in disable_multitce() and we do not want to walk through all IOMMU tables in the system and replace "multi" callbacks with single ones. For powernv, this defines _ops per PHB type which are P5IOC2/IODA1/IODA2. This makes the callbacks for them public. Later patches will extend callbacks for IODA1/2. No change in behaviour is expected. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:

committed by
Michael Ellerman

parent
10b35b2b74
commit
da004c3600
@@ -322,11 +322,11 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
||||
ret = entry << tbl->it_page_shift; /* Set the return dma address */
|
||||
|
||||
/* Put the TCEs in the HW table */
|
||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||
build_fail = tbl->it_ops->set(tbl, entry, npages,
|
||||
(unsigned long)page &
|
||||
IOMMU_PAGE_MASK(tbl), direction, attrs);
|
||||
|
||||
/* ppc_md.tce_build() only returns non-zero for transient errors.
|
||||
/* tbl->it_ops->set() only returns non-zero for transient errors.
|
||||
* Clean up the table bitmap in this case and return
|
||||
* DMA_ERROR_CODE. For all other errors the functionality is
|
||||
* not altered.
|
||||
@@ -337,8 +337,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
||||
}
|
||||
|
||||
/* Flush/invalidate TLB caches if necessary */
|
||||
if (ppc_md.tce_flush)
|
||||
ppc_md.tce_flush(tbl);
|
||||
if (tbl->it_ops->flush)
|
||||
tbl->it_ops->flush(tbl);
|
||||
|
||||
/* Make sure updates are seen by hardware */
|
||||
mb();
|
||||
@@ -408,7 +408,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||
if (!iommu_free_check(tbl, dma_addr, npages))
|
||||
return;
|
||||
|
||||
ppc_md.tce_free(tbl, entry, npages);
|
||||
tbl->it_ops->clear(tbl, entry, npages);
|
||||
|
||||
spin_lock_irqsave(&(pool->lock), flags);
|
||||
bitmap_clear(tbl->it_map, free_entry, npages);
|
||||
@@ -424,8 +424,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||
* not do an mb() here on purpose, it is not needed on any of
|
||||
* the current platforms.
|
||||
*/
|
||||
if (ppc_md.tce_flush)
|
||||
ppc_md.tce_flush(tbl);
|
||||
if (tbl->it_ops->flush)
|
||||
tbl->it_ops->flush(tbl);
|
||||
}
|
||||
|
||||
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
@@ -495,7 +495,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
npages, entry, dma_addr);
|
||||
|
||||
/* Insert into HW table */
|
||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||
build_fail = tbl->it_ops->set(tbl, entry, npages,
|
||||
vaddr & IOMMU_PAGE_MASK(tbl),
|
||||
direction, attrs);
|
||||
if(unlikely(build_fail))
|
||||
@@ -534,8 +534,8 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
}
|
||||
|
||||
/* Flush/invalidate TLB caches if necessary */
|
||||
if (ppc_md.tce_flush)
|
||||
ppc_md.tce_flush(tbl);
|
||||
if (tbl->it_ops->flush)
|
||||
tbl->it_ops->flush(tbl);
|
||||
|
||||
DBG("mapped %d elements:\n", outcount);
|
||||
|
||||
@@ -600,8 +600,8 @@ void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||
* do not do an mb() here, the affected platforms do not need it
|
||||
* when freeing.
|
||||
*/
|
||||
if (ppc_md.tce_flush)
|
||||
ppc_md.tce_flush(tbl);
|
||||
if (tbl->it_ops->flush)
|
||||
tbl->it_ops->flush(tbl);
|
||||
}
|
||||
|
||||
static void iommu_table_clear(struct iommu_table *tbl)
|
||||
@@ -613,17 +613,17 @@ static void iommu_table_clear(struct iommu_table *tbl)
|
||||
*/
|
||||
if (!is_kdump_kernel() || is_fadump_active()) {
|
||||
/* Clear the table in case firmware left allocations in it */
|
||||
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
|
||||
tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (ppc_md.tce_get) {
|
||||
if (tbl->it_ops->get) {
|
||||
unsigned long index, tceval, tcecount = 0;
|
||||
|
||||
/* Reserve the existing mappings left by the first kernel. */
|
||||
for (index = 0; index < tbl->it_size; index++) {
|
||||
tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
|
||||
tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
|
||||
/*
|
||||
* Freed TCE entry contains 0x7fffffffffffffff on JS20
|
||||
*/
|
||||
@@ -657,6 +657,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
|
||||
unsigned int i;
|
||||
struct iommu_pool *p;
|
||||
|
||||
BUG_ON(!tbl->it_ops);
|
||||
|
||||
/* number of bytes needed for the bitmap */
|
||||
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
||||
|
||||
@@ -929,8 +931,8 @@ EXPORT_SYMBOL_GPL(iommu_tce_direction);
|
||||
void iommu_flush_tce(struct iommu_table *tbl)
|
||||
{
|
||||
/* Flush/invalidate TLB caches if necessary */
|
||||
if (ppc_md.tce_flush)
|
||||
ppc_md.tce_flush(tbl);
|
||||
if (tbl->it_ops->flush)
|
||||
tbl->it_ops->flush(tbl);
|
||||
|
||||
/* Make sure updates are seen by hardware */
|
||||
mb();
|
||||
@@ -941,7 +943,7 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce_value,
|
||||
unsigned long npages)
|
||||
{
|
||||
/* ppc_md.tce_free() does not support any value but 0 */
|
||||
/* tbl->it_ops->clear() does not support any value but 0 */
|
||||
if (tce_value)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -989,9 +991,9 @@ unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||
|
||||
spin_lock(&(pool->lock));
|
||||
|
||||
oldtce = ppc_md.tce_get(tbl, entry);
|
||||
oldtce = tbl->it_ops->get(tbl, entry);
|
||||
if (oldtce & (TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
ppc_md.tce_free(tbl, entry, 1);
|
||||
tbl->it_ops->clear(tbl, entry, 1);
|
||||
else
|
||||
oldtce = 0;
|
||||
|
||||
@@ -1014,10 +1016,10 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
|
||||
|
||||
spin_lock(&(pool->lock));
|
||||
|
||||
oldtce = ppc_md.tce_get(tbl, entry);
|
||||
oldtce = tbl->it_ops->get(tbl, entry);
|
||||
/* Add new entry if it is not busy */
|
||||
if (!(oldtce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
||||
ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL);
|
||||
ret = tbl->it_ops->set(tbl, entry, 1, hwaddr, direction, NULL);
|
||||
|
||||
spin_unlock(&(pool->lock));
|
||||
|
||||
|
Reference in New Issue
Block a user