Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig: "This is the first pull request for the new dma-mapping subsystem In this new subsystem we'll try to properly maintain all the generic code related to dma-mapping, and will further consolidate arch code into common helpers. This pull request contains: - removal of the DMA_ERROR_CODE macro, replacing it with calls to ->mapping_error so that the dma_map_ops instances are more self contained and can be shared across architectures (me) - removal of the ->set_dma_mask method, which duplicates the ->dma_capable one in terms of functionality, but requires more duplicate code. - various updates for the coherent dma pool and related arm code (Vladimir) - various smaller cleanups (me)" * tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits) ARM: dma-mapping: Remove traces of NOMMU code ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus ARM: NOMMU: Introduce dma operations for noMMU drivers: dma-mapping: allow dma_common_mmap() for NOMMU drivers: dma-coherent: Introduce default DMA pool drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma: Take into account dma_pfn_offset dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dma-mapping: remove dmam_free_noncoherent crypto: qat - avoid an uninitialized variable warning au1100fb: remove a bogus dma_free_nonconsistent call MAINTAINERS: add entry for dma mapping helpers powerpc: merge __dma_set_mask into dma_set_mask dma-mapping: remove the set_dma_mask method powerpc/cell: use the dma_supported method for ops switching powerpc/cell: clean up fixed mapping dma_ops initialization tile: remove dma_supported and mapping_error methods xen-swiotlb: remove xen_swiotlb_set_dma_mask arm: implement ->dma_supported instead of ->set_dma_mask mips/loongson64: implement ->dma_supported instead of ->set_dma_mask ...
This commit is contained in:
@@ -14,6 +14,8 @@
|
||||
#include <linux/pci.h>
|
||||
#include <asm/pci_dma.h>
|
||||
|
||||
#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
|
||||
|
||||
static struct kmem_cache *dma_region_table_cache;
|
||||
static struct kmem_cache *dma_page_table_cache;
|
||||
static int s390_iommu_strict;
|
||||
@@ -281,7 +283,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
|
||||
|
||||
out_error:
|
||||
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
return S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
|
||||
@@ -329,7 +331,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
||||
/* This rounds up number of pages based on size and offset */
|
||||
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
|
||||
dma_addr = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr == DMA_ERROR_CODE) {
|
||||
if (dma_addr == S390_MAPPING_ERROR) {
|
||||
ret = -ENOSPC;
|
||||
goto out_err;
|
||||
}
|
||||
@@ -352,7 +354,7 @@ out_free:
|
||||
out_err:
|
||||
zpci_err("map error:\n");
|
||||
zpci_err_dma(ret, pa);
|
||||
return DMA_ERROR_CODE;
|
||||
return S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
|
||||
@@ -429,7 +431,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int ret;
|
||||
|
||||
dma_addr_base = dma_alloc_address(dev, nr_pages);
|
||||
if (dma_addr_base == DMA_ERROR_CODE)
|
||||
if (dma_addr_base == S390_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_addr = dma_addr_base;
|
||||
@@ -476,7 +478,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
for (i = 1; i < nr_elements; i++) {
|
||||
s = sg_next(s);
|
||||
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_address = S390_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
|
||||
if (s->offset || (size & ~PAGE_MASK) ||
|
||||
@@ -525,6 +527,11 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
s->dma_length = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == S390_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
{
|
||||
@@ -659,6 +666,7 @@ const struct dma_map_ops s390_pci_dma_ops = {
|
||||
.unmap_sg = s390_dma_unmap_sg,
|
||||
.map_page = s390_dma_map_pages,
|
||||
.unmap_page = s390_dma_unmap_pages,
|
||||
.mapping_error = s390_mapping_error,
|
||||
/* if we support direct DMA this must be conditional */
|
||||
.is_phys = 0,
|
||||
/* dma_supported is unconditionally true without a callback */
|
||||
|
Reference in New Issue
Block a user