powerpc: Use the newly added get_required_mask dma_map_ops hook

Now that the generic code has dma_map_ops set, instead of having a
messy ifdef & if block in the base dma_get_required_mask hook push
the computation into the dma ops.

If the ops fails to set the get_required_mask hook default to the
width of dma_addr_t.

This also corrects ibmbus ibmebus_dma_supported to require a 64
bit mask.  I doubt anything is checking or setting the dma mask on
that bus.

Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-kernel@vger.kernel.org
Cc: benh@kernel.crashing.org
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Milton Miller
2011-06-24 09:05:24 +00:00
committed by Benjamin Herrenschmidt
parent 3a8f7558e4
commit d24f9c6999
10 changed files with 68 additions and 34 deletions

View File

@@ -90,7 +90,7 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
return 1;
}
u64 dma_iommu_get_required_mask(struct device *dev)
static u64 dma_iommu_get_required_mask(struct device *dev)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;
@@ -111,5 +111,6 @@ struct dma_map_ops dma_iommu_ops = {
.dma_supported = dma_iommu_dma_supported,
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
};
EXPORT_SYMBOL(dma_iommu_ops);

View File

@@ -24,6 +24,21 @@
unsigned int ppc_swiotlb_enable;
static u64 swiotlb_powerpc_get_required(struct device *dev)
{
u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
end = memblock_end_of_DRAM();
if (max_direct_dma_addr && end > max_direct_dma_addr)
end = max_direct_dma_addr;
end += get_dma_offset(dev);
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
}
/*
* At the moment, all platforms that use this code only require
* swiotlb to be used if we're operating on HIGHMEM. Since
@@ -44,6 +59,7 @@ struct dma_map_ops swiotlb_dma_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
.get_required_mask = swiotlb_powerpc_get_required,
};
void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)

View File

@@ -96,6 +96,18 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
#endif
}
static u64 dma_direct_get_required_mask(struct device *dev)
{
u64 end, mask;
end = memblock_end_of_DRAM() + get_dma_offset(dev);
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
}
static inline dma_addr_t dma_direct_map_page(struct device *dev,
struct page *page,
unsigned long offset,
@@ -144,6 +156,7 @@ struct dma_map_ops dma_direct_ops = {
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page,
.get_required_mask = dma_direct_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE
.sync_single_for_cpu = dma_direct_sync_single,
.sync_single_for_device = dma_direct_sync_single,
@@ -173,7 +186,6 @@ EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
u64 mask, end = 0;
if (ppc_md.dma_get_required_mask)
return ppc_md.dma_get_required_mask(dev);
@@ -181,31 +193,10 @@ u64 dma_get_required_mask(struct device *dev)
if (unlikely(dma_ops == NULL))
return 0;
#ifdef CONFIG_PPC64
else if (dma_ops == &dma_iommu_ops)
return dma_iommu_get_required_mask(dev);
#endif
#ifdef CONFIG_SWIOTLB
else if (dma_ops == &swiotlb_dma_ops) {
u64 max_direct_dma_addr = dev->archdata.max_direct_dma_addr;
if (dma_ops->get_required_mask)
return dma_ops->get_required_mask(dev);
end = memblock_end_of_DRAM();
if (max_direct_dma_addr && end > max_direct_dma_addr)
end = max_direct_dma_addr;
end += get_dma_offset(dev);
}
#endif
else if (dma_ops == &dma_direct_ops)
end = memblock_end_of_DRAM() + get_dma_offset(dev);
else {
WARN_ONCE(1, "%s: unknown ops %p\n", __func__, dma_ops);
end = memblock_end_of_DRAM();
}
mask = 1ULL << (fls64(end) - 1);
mask += mask - 1;
return mask;
return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
}
EXPORT_SYMBOL_GPL(dma_get_required_mask);

View File

@@ -125,7 +125,12 @@ static void ibmebus_unmap_sg(struct device *dev,
static int ibmebus_dma_supported(struct device *dev, u64 mask)
{
return 1;
return mask == DMA_BIT_MASK(64);
}
static u64 ibmebus_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
}
static struct dma_map_ops ibmebus_dma_ops = {
@@ -134,6 +139,7 @@ static struct dma_map_ops ibmebus_dma_ops = {
.map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported,
.get_required_mask = ibmebus_dma_get_required_mask,
.map_page = ibmebus_map_page,
.unmap_page = ibmebus_unmap_page,
};

View File

@@ -605,6 +605,11 @@ static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
return dma_iommu_ops.dma_supported(dev, mask);
}
static u64 vio_dma_get_required_mask(struct device *dev)
{
return dma_iommu_ops.get_required_mask(dev);
}
struct dma_map_ops vio_dma_mapping_ops = {
.alloc_coherent = vio_dma_iommu_alloc_coherent,
.free_coherent = vio_dma_iommu_free_coherent,
@@ -613,7 +618,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask,
};
/**