dma-mapping: add a new dma_alloc_pages API
This API is the equivalent of alloc_pages, except that the returned memory is guaranteed to be DMA addressable by the passed in device. The implementation will also be used to provide a more sensible replacement for DMA_ATTR_NON_CONSISTENT flag. Additionally dma_alloc_noncoherent is switched over to use dma_alloc_pages as its backend. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> (MIPS part)
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Christoph Hellwig.
|
||||
* Copyright (C) 2018-2020 Christoph Hellwig.
|
||||
*
|
||||
* DMA operations that map physical memory directly without using an IOMMU.
|
||||
*/
|
||||
@@ -292,6 +292,56 @@ void dma_direct_free(struct device *dev, size_t size,
|
||||
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
|
||||
}
|
||||
|
||||
struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
void *ret;
|
||||
|
||||
if (dma_should_alloc_from_pool(dev, gfp, 0)) {
|
||||
page = dma_alloc_from_pool(dev, size, &ret, gfp,
|
||||
dma_coherent_ok);
|
||||
if (!page)
|
||||
return NULL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
page = __dma_direct_alloc_pages(dev, size, gfp);
|
||||
if (!page)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
if (set_memory_decrypted((unsigned long)ret,
|
||||
1 << get_order(size)))
|
||||
goto out_free_pages;
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
done:
|
||||
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
|
||||
return page;
|
||||
out_free_pages:
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dma_direct_free_pages(struct device *dev, size_t size,
|
||||
struct page *page, dma_addr_t dma_addr,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned int page_order = get_order(size);
|
||||
void *vaddr = page_address(page);
|
||||
|
||||
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
|
||||
if (dma_should_free_from_pool(dev, 0) &&
|
||||
dma_free_from_pool(dev, vaddr, size))
|
||||
return;
|
||||
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
|
||||
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
|
Reference in New Issue
Block a user