Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig: "A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script" * tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits) dma-mapping: fix inverted logic in dma_supported dma-mapping: deprecate dma_zalloc_coherent dma-mapping: zero memory returned from dma_alloc_* sparc/iommu: fix ->map_sg return value sparc/io-unit: fix ->map_sg return value arm64: default to the direct mapping in get_arch_dma_ops PCI: Remove unused attr variable in pci_dma_configure ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled dma-mapping: bypass indirect calls for dma-direct vmd: use the proper dma_* APIs instead of direct methods calls dma-direct: merge swiotlb_dma_ops into the dma_direct code dma-direct: use dma_direct_map_page to implement dma_direct_map_sg dma-direct: improve addressability error reporting swiotlb: remove dma_mark_clean swiotlb: remove SWIOTLB_MAP_ERROR ACPI / scan: Refactor _CCA enforcement dma-mapping: factor out dummy DMA ops dma-mapping: always build the direct mapping code dma-mapping: move dma_cache_sync out of line dma-mapping: move various slow path functions out of line ...
This commit is contained in:
@@ -291,7 +291,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
||||
use direct_map above, it now must be considered an error. */
|
||||
if (! alpha_mv.mv_pci_tbi) {
|
||||
printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
arena = hose->sg_pci;
|
||||
@@ -307,7 +307,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
|
||||
if (dma_ofs < 0) {
|
||||
printk(KERN_WARNING "pci_map_single failed: "
|
||||
"could not allocate dma page tables\n");
|
||||
return 0;
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
paddr &= PAGE_MASK;
|
||||
@@ -443,7 +443,7 @@ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
|
||||
gfp &= ~GFP_DMA;
|
||||
|
||||
try_again:
|
||||
cpu_addr = (void *)__get_free_pages(gfp, order);
|
||||
cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
|
||||
if (! cpu_addr) {
|
||||
printk(KERN_INFO "pci_alloc_consistent: "
|
||||
"get_free_pages failed from %pf\n",
|
||||
@@ -455,7 +455,7 @@ try_again:
|
||||
memset(cpu_addr, 0, size);
|
||||
|
||||
*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
|
||||
if (*dma_addrp == 0) {
|
||||
if (*dma_addrp == DMA_MAPPING_ERROR) {
|
||||
free_pages((unsigned long)cpu_addr, order);
|
||||
if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
|
||||
return NULL;
|
||||
@@ -671,7 +671,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
sg->dma_address
|
||||
= pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
|
||||
sg->length, dac_allowed);
|
||||
return sg->dma_address != 0;
|
||||
return sg->dma_address != DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
start = sg;
|
||||
@@ -935,11 +935,6 @@ iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == 0;
|
||||
}
|
||||
|
||||
const struct dma_map_ops alpha_pci_ops = {
|
||||
.alloc = alpha_pci_alloc_coherent,
|
||||
.free = alpha_pci_free_coherent,
|
||||
@@ -947,7 +942,6 @@ const struct dma_map_ops alpha_pci_ops = {
|
||||
.unmap_page = alpha_pci_unmap_page,
|
||||
.map_sg = alpha_pci_map_sg,
|
||||
.unmap_sg = alpha_pci_unmap_sg,
|
||||
.mapping_error = alpha_pci_mapping_error,
|
||||
.dma_supported = alpha_pci_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(alpha_pci_ops);
|
||||
|
Reference in New Issue
Block a user