powerpc/iommu: Update constant names to reflect their hardcoded page size
The powerpc iommu uses a hardcoded page size of 4K. This patch changes the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A future patch will use the existing names to support dynamic page sizes. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:

zatwierdzone przez
Benjamin Herrenschmidt

rodzic
fee26f6d5d
commit
e589a4404f
@@ -520,14 +520,14 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
|
||||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
dma_addr_t ret = DMA_ERROR_CODE;
|
||||
|
||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
|
||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
|
||||
if (unlikely(dma_mapping_error(dev, ret))) {
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
}
|
||||
|
||||
@@ -543,7 +543,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||
|
||||
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
|
||||
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
|
||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
|
||||
}
|
||||
|
||||
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
@@ -556,7 +556,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
size_t alloc_size = 0;
|
||||
|
||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
|
||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);
|
||||
|
||||
if (vio_cmo_alloc(viodev, alloc_size)) {
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
@@ -572,7 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
}
|
||||
|
||||
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
|
||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
|
||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
|
||||
if (alloc_size)
|
||||
vio_cmo_dealloc(viodev, alloc_size);
|
||||
|
||||
@@ -590,7 +590,7 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
||||
int count = 0;
|
||||
|
||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
|
||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
|
||||
|
||||
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
|
||||
|
||||
@@ -736,7 +736,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
|
||||
viodev->cmo.desired =
|
||||
IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
|
||||
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
|
||||
viodev->cmo.desired = VIO_CMO_MIN_ENT;
|
||||
size = VIO_CMO_MIN_ENT;
|
||||
@@ -1176,9 +1177,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
|
||||
&tbl->it_index, &offset, &size);
|
||||
|
||||
/* TCE table size - measured in tce entries */
|
||||
tbl->it_size = size >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
|
||||
/* offset for VIO should always be 0 */
|
||||
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
|
||||
tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
|
||||
tbl->it_busno = 0;
|
||||
tbl->it_type = TCE_VB;
|
||||
tbl->it_blocksize = 16;
|
||||
|
Reference in New Issue
Block a user