Merge tag 'dma-mapping-5.3' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - move the USB special case that bounced DMA through a device bar into the USB code instead of handling it in the common DMA code (Laurentiu Tudor and Fredrik Noring) - don't dip into the global CMA pool for single page allocations (Nicolin Chen) - fix a crash when allocating memory for the atomic pool failed during boot (Florian Fainelli) - move support for MIPS-style uncached segments to the common code and use that for MIPS and nios2 (me) - make support for DMA_ATTR_NON_CONSISTENT and DMA_ATTR_NO_KERNEL_MAPPING generic (me) - convert nds32 to the generic remapping allocator (me) * tag 'dma-mapping-5.3' of git://git.infradead.org/users/hch/dma-mapping: (29 commits) dma-mapping: mark dma_alloc_need_uncached as __always_inline MIPS: only select ARCH_HAS_UNCACHED_SEGMENT for non-coherent platforms usb: host: Fix excessive alignment restriction for local memory allocations lib/genalloc.c: Add algorithm, align and zeroed family of DMA allocators nios2: use the generic uncached segment support in dma-direct nds32: use the generic remapping allocator for coherent DMA allocations arc: use the generic remapping allocator for coherent DMA allocations dma-direct: handle DMA_ATTR_NO_KERNEL_MAPPING in common code dma-direct: handle DMA_ATTR_NON_CONSISTENT in common code dma-mapping: add a dma_alloc_need_uncached helper openrisc: remove the partial DMA_ATTR_NON_CONSISTENT support arc: remove the partial DMA_ATTR_NON_CONSISTENT support arm-nommu: remove the partial DMA_ATTR_NON_CONSISTENT support ARM: dma-mapping: allow larger DMA mask than supported dma-mapping: truncate dma masks to what dma_addr_t can hold iommu/dma: Apply dma_{alloc,free}_contiguous functions dma-remap: Avoid de-referencing NULL atomic_pool MIPS: use the generic uncached segment support in dma-direct dma-direct: provide generic support for uncached kernel segments au1100fb: fix DMA API abuse ...
This commit is contained in:
@@ -214,6 +214,62 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||
return cma_release(dev_get_cma_area(dev), pages, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_alloc_contiguous() - allocate contiguous pages
|
||||
* @dev: Pointer to device for which the allocation is performed.
|
||||
* @size: Requested allocation size.
|
||||
* @gfp: Allocation flags.
|
||||
*
|
||||
* This function allocates contiguous memory buffer for specified device. It
|
||||
* first tries to use device specific contiguous memory area if available or
|
||||
* the default global one, then tries a fallback allocation of normal pages.
|
||||
*
|
||||
* Note that it byapss one-page size of allocations from the global area as
|
||||
* the addresses within one page are always contiguous, so there is no need
|
||||
* to waste CMA pages for that kind; it also helps reduce fragmentations.
|
||||
*/
|
||||
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||
{
|
||||
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
|
||||
size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
size_t align = get_order(PAGE_ALIGN(size));
|
||||
struct page *page = NULL;
|
||||
struct cma *cma = NULL;
|
||||
|
||||
if (dev && dev->cma_area)
|
||||
cma = dev->cma_area;
|
||||
else if (count > 1)
|
||||
cma = dma_contiguous_default_area;
|
||||
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
if (cma && gfpflags_allow_blocking(gfp)) {
|
||||
align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
|
||||
page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
|
||||
}
|
||||
|
||||
/* Fallback allocation of normal pages */
|
||||
if (!page)
|
||||
page = alloc_pages_node(node, gfp, align);
|
||||
return page;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_free_contiguous() - release allocated pages
|
||||
* @dev: Pointer to device for which the pages were allocated.
|
||||
* @page: Pointer to the allocated pages.
|
||||
* @size: Size of allocated pages.
|
||||
*
|
||||
* This function releases memory allocated by dma_alloc_contiguous(). As the
|
||||
* cma_release returns false when provided pages do not belong to contiguous
|
||||
* area and true otherwise, this function then does a fallback __free_pages()
|
||||
* upon a false-return.
|
||||
*/
|
||||
void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
|
||||
{
|
||||
if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
|
||||
__free_pages(page, get_order(size));
|
||||
}
|
||||
|
||||
/*
|
||||
* Support for reserved memory regions defined in device tree
|
||||
*/
|
||||
|
@@ -96,8 +96,6 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
int page_order = get_order(size);
|
||||
struct page *page = NULL;
|
||||
u64 phys_mask;
|
||||
|
||||
@@ -109,20 +107,9 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_mask);
|
||||
again:
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
if (gfpflags_allow_blocking(gfp)) {
|
||||
page = dma_alloc_from_contiguous(dev, count, page_order,
|
||||
gfp & __GFP_NOWARN);
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
dma_release_from_contiguous(dev, page, count);
|
||||
page = NULL;
|
||||
}
|
||||
}
|
||||
if (!page)
|
||||
page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
|
||||
|
||||
page = dma_alloc_contiguous(dev, size, gfp);
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
__free_pages(page, page_order);
|
||||
dma_free_contiguous(dev, page, size);
|
||||
page = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
@@ -151,10 +138,18 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
if (!PageHighMem(page))
|
||||
arch_dma_prep_coherent(page, size);
|
||||
/* return the page pointer as the opaque cookie */
|
||||
return page;
|
||||
}
|
||||
|
||||
if (PageHighMem(page)) {
|
||||
/*
|
||||
* Depending on the cma= arguments and per-arch setup
|
||||
* dma_alloc_from_contiguous could return highmem pages.
|
||||
* dma_alloc_contiguous could return highmem pages.
|
||||
* Without remapping there is no way to return them here,
|
||||
* so log an error and fail.
|
||||
*/
|
||||
@@ -171,15 +166,19 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
dma_alloc_need_uncached(dev, attrs)) {
|
||||
arch_dma_prep_coherent(page, size);
|
||||
ret = uncached_kernel_address(ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
if (!dma_release_from_contiguous(dev, page, count))
|
||||
__free_pages(page, get_order(size));
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
@@ -187,15 +186,26 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
/* cpu_addr is a struct page cookie, not a kernel address */
|
||||
__dma_direct_free_pages(dev, size, cpu_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (force_dma_unencrypted())
|
||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
dma_alloc_need_uncached(dev, attrs))
|
||||
cpu_addr = cached_kernel_address(cpu_addr);
|
||||
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
|
||||
}
|
||||
|
||||
void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
dma_alloc_need_uncached(dev, attrs))
|
||||
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
|
||||
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
|
||||
}
|
||||
@@ -203,7 +213,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
void dma_direct_free(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
dma_alloc_need_uncached(dev, attrs))
|
||||
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
|
||||
else
|
||||
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
|
||||
|
@@ -317,6 +317,12 @@ void arch_dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* Truncate the mask to the actually supported dma_addr_t width to
|
||||
* avoid generating unsupportable addresses.
|
||||
*/
|
||||
mask = (dma_addr_t)mask;
|
||||
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
@@ -330,6 +336,12 @@ EXPORT_SYMBOL(dma_set_mask);
|
||||
#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* Truncate the mask to the actually supported dma_addr_t width to
|
||||
* avoid generating unsupportable addresses.
|
||||
*/
|
||||
mask = (dma_addr_t)mask;
|
||||
|
||||
if (!dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
|
@@ -158,6 +158,9 @@ out:
|
||||
|
||||
bool dma_in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
if (unlikely(!atomic_pool))
|
||||
return false;
|
||||
|
||||
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
|
||||
}
|
||||
|
||||
@@ -199,8 +202,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (!gfpflags_allow_blocking(flags) &&
|
||||
!(attrs & DMA_ATTR_NO_KERNEL_MAPPING)) {
|
||||
if (!gfpflags_allow_blocking(flags)) {
|
||||
ret = dma_alloc_from_pool(size, &page, flags);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
@@ -214,11 +216,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
/* remove any dirty cache lines on the kernel alias */
|
||||
arch_dma_prep_coherent(page, size);
|
||||
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
ret = page; /* opaque cookie */
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* create a coherent mapping */
|
||||
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
@@ -237,10 +234,7 @@ done:
|
||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
|
||||
/* vaddr is a struct page cookie, not a kernel address */
|
||||
__dma_direct_free_pages(dev, size, vaddr);
|
||||
} else if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
|
||||
if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
|
||||
phys_addr_t phys = dma_to_phys(dev, dma_handle);
|
||||
struct page *page = pfn_to_page(__phys_to_pfn(phys));
|
||||
|
||||
|
Reference in New Issue
Block a user