dma-mapping: always use VM_DMA_COHERENT for generic DMA remap
Currently the generic dma remap allocator gets a vm_flags passed by the caller that is a little confusing. We just introduced a generic vmalloc-level flag to identify the dma coherent allocations, so use that everywhere and remove the now pointless argument. Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
@@ -12,12 +12,11 @@
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
||||
size_t size, unsigned long vm_flags, pgprot_t prot,
|
||||
const void *caller)
|
||||
size_t size, pgprot_t prot, const void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
area = get_vm_area_caller(size, vm_flags, caller);
|
||||
area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
@@ -34,12 +33,11 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
||||
* Cannot be used in non-sleeping contexts
|
||||
*/
|
||||
void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
unsigned long vm_flags, pgprot_t prot,
|
||||
const void *caller)
|
||||
pgprot_t prot, const void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
|
||||
area = __dma_common_pages_remap(pages, size, prot, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
@@ -53,7 +51,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
* Cannot be used in non-sleeping contexts
|
||||
*/
|
||||
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
unsigned long vm_flags,
|
||||
pgprot_t prot, const void *caller)
|
||||
{
|
||||
int i;
|
||||
@@ -67,7 +64,7 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
for (i = 0; i < (size >> PAGE_SHIFT); i++)
|
||||
pages[i] = nth_page(page, i);
|
||||
|
||||
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
|
||||
area = __dma_common_pages_remap(pages, size, prot, caller);
|
||||
|
||||
kfree(pages);
|
||||
|
||||
@@ -79,11 +76,11 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
/*
|
||||
* Unmaps a range previously mapped by dma_common_*_remap
|
||||
*/
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size)
|
||||
{
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (!area || (area->flags & vm_flags) != vm_flags) {
|
||||
if (!area || area->flags != VM_DMA_COHERENT) {
|
||||
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
||||
return;
|
||||
}
|
||||
@@ -136,7 +133,7 @@ static int __init dma_atomic_pool_init(void)
|
||||
if (!atomic_pool)
|
||||
goto free_page;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size, VM_USERMAP,
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
||||
pgprot_dmacoherent(PAGE_KERNEL),
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
@@ -153,7 +150,7 @@ static int __init dma_atomic_pool_init(void)
|
||||
return 0;
|
||||
|
||||
remove_mapping:
|
||||
dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
|
||||
dma_common_free_remap(addr, atomic_pool_size);
|
||||
destroy_genpool:
|
||||
gen_pool_destroy(atomic_pool);
|
||||
atomic_pool = NULL;
|
||||
@@ -228,7 +225,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
arch_dma_prep_coherent(page, size);
|
||||
|
||||
/* create a coherent mapping */
|
||||
ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
ret = dma_common_contiguous_remap(page, size,
|
||||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
__builtin_return_address(0));
|
||||
if (!ret) {
|
||||
|
Reference in New Issue
Block a user