Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig: "This is the first pull request for the new dma-mapping subsystem In this new subsystem we'll try to properly maintain all the generic code related to dma-mapping, and will further consolidate arch code into common helpers. This pull request contains: - removal of the DMA_ERROR_CODE macro, replacing it with calls to ->mapping_error so that the dma_map_ops instances are more self contained and can be shared across architectures (me) - removal of the ->set_dma_mask method, which duplicates the ->dma_capable one in terms of functionality, but requires more duplicate code. - various updates for the coherent dma pool and related arm code (Vladimir) - various smaller cleanups (me)" * tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits) ARM: dma-mapping: Remove traces of NOMMU code ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus ARM: NOMMU: Introduce dma operations for noMMU drivers: dma-mapping: allow dma_common_mmap() for NOMMU drivers: dma-coherent: Introduce default DMA pool drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma: Take into account dma_pfn_offset dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dma-mapping: remove dmam_free_noncoherent crypto: qat - avoid an uninitialized variable warning au1100fb: remove a bogus dma_free_nonconsistent call MAINTAINERS: add entry for dma mapping helpers powerpc: merge __dma_set_mask into dma_set_mask dma-mapping: remove the set_dma_mask method powerpc/cell: use the dma_supported method for ops switching powerpc/cell: clean up fixed mapping dma_ops initialization tile: remove dma_supported and mapping_error methods xen-swiotlb: remove xen_swiotlb_set_dma_mask arm: implement ->dma_supported instead of ->set_dma_mask mips/loongson64: implement ->dma_supported instead of ->set_dma_mask ...
This commit is contained in:
@@ -1045,8 +1045,8 @@ config ARM_L1_CACHE_SHIFT
|
||||
default 5
|
||||
|
||||
config ARM_DMA_MEM_BUFFERABLE
|
||||
bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7
|
||||
default y if CPU_V6 || CPU_V6K || CPU_V7
|
||||
bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K || CPU_V7M) && !CPU_V7
|
||||
default y if CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M
|
||||
help
|
||||
Historically, the kernel has used strongly ordered mappings to
|
||||
provide DMA coherent memory. With the advent of ARMv7, mapping
|
||||
@@ -1061,6 +1061,10 @@ config ARM_DMA_MEM_BUFFERABLE
|
||||
and therefore turning this on may result in unpredictable driver
|
||||
behaviour. Therefore, we offer this as an option.
|
||||
|
||||
On some of the beefier ARMv7-M machines (with DMA and write
|
||||
buffers) you likely want this enabled, while those that
|
||||
didn't need it until now also won't need it in the future.
|
||||
|
||||
You are recommended say 'Y' here and debug any affected drivers.
|
||||
|
||||
config ARM_HEAVY_MB
|
||||
|
@@ -2,9 +2,8 @@
|
||||
# Makefile for the linux arm-specific parts of the memory manager.
|
||||
#
|
||||
|
||||
obj-y := dma-mapping.o extable.o fault.o init.o \
|
||||
iomap.o
|
||||
|
||||
obj-y := extable.o fault.o init.o iomap.o
|
||||
obj-y += dma-mapping$(MMUEXT).o
|
||||
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
|
||||
mmap.o pgd.o mmu.o pageattr.o
|
||||
|
||||
|
228
arch/arm/mm/dma-mapping-nommu.c
Normal file
228
arch/arm/mm/dma-mapping-nommu.c
Normal file
@@ -0,0 +1,228 @@
|
||||
/*
|
||||
* Based on linux/arch/arm/mm/dma-mapping.c
|
||||
*
|
||||
* Copyright (C) 2000-2004 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/cachetype.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/outercache.h>
|
||||
#include <asm/cp15.h>
|
||||
|
||||
#include "dma.h"
|
||||
|
||||
/*
|
||||
* dma_noop_ops is used if
|
||||
* - MMU/MPU is off
|
||||
* - cpu is v7m w/o cache support
|
||||
* - device is coherent
|
||||
* otherwise arm_nommu_dma_ops is used.
|
||||
*
|
||||
* arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
|
||||
* [1] on how to declare such memory).
|
||||
*
|
||||
* [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
|
||||
*/
|
||||
|
||||
static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
|
||||
{
|
||||
const struct dma_map_ops *ops = &dma_noop_ops;
|
||||
|
||||
/*
|
||||
* We are here because:
|
||||
* - no consistent DMA region has been defined, so we can't
|
||||
* continue.
|
||||
* - there is no space left in consistent DMA region, so we
|
||||
* only can fallback to generic allocator if we are
|
||||
* advertised that consistency is not required.
|
||||
*/
|
||||
|
||||
if (attrs & DMA_ATTR_NON_CONSISTENT)
|
||||
return ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_free(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = &dma_noop_ops;
|
||||
|
||||
if (attrs & DMA_ATTR_NON_CONSISTENT)
|
||||
ops->free(dev, size, cpu_addr, dma_addr, attrs);
|
||||
else
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dmac_map_area(__va(paddr), size, dir);
|
||||
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
outer_inv_range(paddr, paddr + size);
|
||||
else
|
||||
outer_clean_range(paddr, paddr + size);
|
||||
}
|
||||
|
||||
static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (dir != DMA_TO_DEVICE) {
|
||||
outer_inv_range(paddr, paddr + size);
|
||||
dmac_unmap_area(__va(paddr), size, dir);
|
||||
}
|
||||
}
|
||||
|
||||
static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t handle = page_to_phys(page) + offset;
|
||||
|
||||
__dma_page_cpu_to_dev(handle, size, dir);
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
__dma_page_dev_to_cpu(handle, size, dir);
|
||||
}
|
||||
|
||||
|
||||
static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg_dma_address(sg) = sg_phys(sg);
|
||||
sg_dma_len(sg) = sg->length;
|
||||
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(handle, size, dir);
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(handle, size, dir);
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
|
||||
}
|
||||
|
||||
static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
|
||||
}
|
||||
|
||||
const struct dma_map_ops arm_nommu_dma_ops = {
|
||||
.alloc = arm_nommu_dma_alloc,
|
||||
.free = arm_nommu_dma_free,
|
||||
.map_page = arm_nommu_dma_map_page,
|
||||
.unmap_page = arm_nommu_dma_unmap_page,
|
||||
.map_sg = arm_nommu_dma_map_sg,
|
||||
.unmap_sg = arm_nommu_dma_unmap_sg,
|
||||
.sync_single_for_device = arm_nommu_dma_sync_single_for_device,
|
||||
.sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
|
||||
.sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
|
||||
.sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_nommu_dma_ops);
|
||||
|
||||
static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
const struct dma_map_ops *dma_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
||||
/*
|
||||
* Cache support for v7m is optional, so can be treated as
|
||||
* coherent if no cache has been detected. Note that it is not
|
||||
* enough to check if MPU is in use or not since in absense of
|
||||
* MPU system memory map is used.
|
||||
*/
|
||||
dev->archdata.dma_coherent = (cacheid) ? coherent : true;
|
||||
} else {
|
||||
/*
|
||||
* Assume coherent DMA in case MMU/MPU has not been set up.
|
||||
*/
|
||||
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
|
||||
}
|
||||
|
||||
dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
|
||||
|
||||
set_dma_ops(dev, dma_ops);
|
||||
}
|
||||
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
||||
|
||||
static int __init dma_debug_do_init(void)
|
||||
{
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
core_initcall(dma_debug_do_init);
|
@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev,
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
}
|
||||
|
||||
static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops arm_dma_ops = {
|
||||
.alloc = arm_dma_alloc,
|
||||
.free = arm_dma_free,
|
||||
@@ -193,6 +198,8 @@ const struct dma_map_ops arm_dma_ops = {
|
||||
.sync_single_for_device = arm_dma_sync_single_for_device,
|
||||
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = arm_dma_sync_sg_for_device,
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_dma_ops);
|
||||
|
||||
@@ -211,6 +218,8 @@ const struct dma_map_ops arm_coherent_dma_ops = {
|
||||
.get_sgtable = arm_dma_get_sgtable,
|
||||
.map_page = arm_coherent_dma_map_page,
|
||||
.map_sg = arm_dma_map_sg,
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
||||
|
||||
@@ -344,8 +353,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
static void *__alloc_from_contiguous(struct device *dev, size_t size,
|
||||
pgprot_t prot, struct page **ret_page,
|
||||
const void *caller, bool want_vaddr,
|
||||
@@ -647,22 +654,6 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
|
||||
return prot;
|
||||
}
|
||||
|
||||
#define nommu() 0
|
||||
|
||||
#else /* !CONFIG_MMU */
|
||||
|
||||
#define nommu() 1
|
||||
|
||||
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
|
||||
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
|
||||
#define __alloc_from_pool(size, ret_page) NULL
|
||||
#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL
|
||||
#define __free_from_pool(cpu_addr, size) do { } while (0)
|
||||
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
|
||||
#define __dma_free_remap(cpu_addr, size) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
|
||||
struct page **ret_page)
|
||||
{
|
||||
@@ -799,13 +790,13 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
gfp &= ~(__GFP_COMP);
|
||||
args.gfp = gfp;
|
||||
|
||||
*handle = DMA_ERROR_CODE;
|
||||
*handle = ARM_MAPPING_ERROR;
|
||||
allowblock = gfpflags_allow_blocking(gfp);
|
||||
cma = allowblock ? dev_get_cma_area(dev) : false;
|
||||
|
||||
if (cma)
|
||||
buf->allocator = &cma_allocator;
|
||||
else if (nommu() || is_coherent)
|
||||
else if (is_coherent)
|
||||
buf->allocator = &simple_allocator;
|
||||
else if (allowblock)
|
||||
buf->allocator = &remap_allocator;
|
||||
@@ -854,8 +845,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int ret = -ENXIO;
|
||||
#ifdef CONFIG_MMU
|
||||
int ret;
|
||||
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = dma_to_pfn(dev, dma_addr);
|
||||
@@ -870,10 +860,6 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
#else
|
||||
ret = vm_iomap_memory(vma, vma->vm_start,
|
||||
(vma->vm_end - vma->vm_start));
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -892,9 +878,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||
#endif /* CONFIG_MMU */
|
||||
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
@@ -1177,11 +1161,10 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
* during bus mastering, then you would pass 0x00ffffff as the mask
|
||||
* to this function.
|
||||
*/
|
||||
int dma_supported(struct device *dev, u64 mask)
|
||||
int arm_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return __dma_supported(dev, mask, false);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_supported);
|
||||
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
|
||||
|
||||
@@ -1254,7 +1237,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||
if (i == mapping->nr_bitmaps) {
|
||||
if (extend_iommu_mapping(mapping)) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
return ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
|
||||
@@ -1262,7 +1245,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
|
||||
|
||||
if (start > mapping->bits) {
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
return DMA_ERROR_CODE;
|
||||
return ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
bitmap_set(mapping->bitmaps[i], start, count);
|
||||
@@ -1445,7 +1428,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
||||
int i;
|
||||
|
||||
dma_addr = __alloc_iova(mapping, size);
|
||||
if (dma_addr == DMA_ERROR_CODE)
|
||||
if (dma_addr == ARM_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
iova = dma_addr;
|
||||
@@ -1472,7 +1455,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
|
||||
fail:
|
||||
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
|
||||
__free_iova(mapping, dma_addr, size);
|
||||
return DMA_ERROR_CODE;
|
||||
return ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
|
||||
@@ -1533,7 +1516,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, &page, size, attrs);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
if (*handle == ARM_MAPPING_ERROR)
|
||||
goto err_mapping;
|
||||
|
||||
return addr;
|
||||
@@ -1561,7 +1544,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
struct page **pages;
|
||||
void *addr = NULL;
|
||||
|
||||
*handle = DMA_ERROR_CODE;
|
||||
*handle = ARM_MAPPING_ERROR;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
|
||||
@@ -1582,7 +1565,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, pages, size, attrs);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
if (*handle == ARM_MAPPING_ERROR)
|
||||
goto err_buffer;
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
|
||||
@@ -1732,10 +1715,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||
int prot;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
*handle = DMA_ERROR_CODE;
|
||||
*handle = ARM_MAPPING_ERROR;
|
||||
|
||||
iova_base = iova = __alloc_iova(mapping, size);
|
||||
if (iova == DMA_ERROR_CODE)
|
||||
if (iova == ARM_MAPPING_ERROR)
|
||||
return -ENOMEM;
|
||||
|
||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||
@@ -1775,7 +1758,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
for (i = 1; i < nents; i++) {
|
||||
s = sg_next(s);
|
||||
|
||||
s->dma_address = DMA_ERROR_CODE;
|
||||
s->dma_address = ARM_MAPPING_ERROR;
|
||||
s->dma_length = 0;
|
||||
|
||||
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
|
||||
@@ -1950,7 +1933,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
||||
int ret, prot, len = PAGE_ALIGN(size + offset);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_ERROR_CODE)
|
||||
if (dma_addr == ARM_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs);
|
||||
@@ -1962,7 +1945,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
||||
return dma_addr + offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_ERROR_CODE;
|
||||
return ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2056,7 +2039,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
||||
size_t len = PAGE_ALIGN(size + offset);
|
||||
|
||||
dma_addr = __alloc_iova(mapping, len);
|
||||
if (dma_addr == DMA_ERROR_CODE)
|
||||
if (dma_addr == ARM_MAPPING_ERROR)
|
||||
return dma_addr;
|
||||
|
||||
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
|
||||
@@ -2068,7 +2051,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
|
||||
return dma_addr + offset;
|
||||
fail:
|
||||
__free_iova(mapping, dma_addr, len);
|
||||
return DMA_ERROR_CODE;
|
||||
return ARM_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2140,6 +2123,9 @@ const struct dma_map_ops iommu_ops = {
|
||||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
|
||||
const struct dma_map_ops iommu_coherent_ops = {
|
||||
@@ -2156,6 +2142,9 @@ const struct dma_map_ops iommu_coherent_ops = {
|
||||
|
||||
.map_resource = arm_iommu_map_resource,
|
||||
.unmap_resource = arm_iommu_unmap_resource,
|
||||
|
||||
.mapping_error = arm_dma_mapping_error,
|
||||
.dma_supported = arm_dma_supported,
|
||||
};
|
||||
|
||||
/**
|
||||
|
Reference in New Issue
Block a user