Files
android_kernel_xiaomi_sm8450/kernel/dma/remap.c
Linus Torvalds 1ee18de929 Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:

 - enhance the dma pool to allow atomic allocation on x86 with AMD SEV
   (David Rientjes)

 - two small cleanups (Jason Yan and Peter Collingbourne)

* tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping:
  dma-contiguous: fix comment for dma_release_from_contiguous
  dma-pool: scale the default DMA coherent pool size with memory capacity
  x86/mm: unencrypted non-blocking DMA allocations use coherent pools
  dma-pool: add pool sizes to debugfs
  dma-direct: atomic allocations must come from atomic coherent pools
  dma-pool: dynamically expanding atomic pools
  dma-pool: add additional coherent pools to map to gfp mask
  dma-remap: separate DMA atomic pools from direct remap code
  dma-debug: make __dma_entry_alloc_check_leak() static
2020-06-06 11:43:23 -07:00

71 lines
1.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 The Linux Foundation
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
struct page **dma_common_find_pages(void *cpu_addr)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || area->flags != VM_DMA_COHERENT)
return NULL;
return area->pages;
}
/*
* Remaps an array of PAGE_SIZE pages into another vm_area.
* Cannot be used in non-sleeping contexts
*/
void *dma_common_pages_remap(struct page **pages, size_t size,
pgprot_t prot, const void *caller)
{
void *vaddr;
vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
if (vaddr)
find_vm_area(vaddr)->pages = pages;
return vaddr;
}
/*
* Remaps an allocated contiguous region into another vm_area.
* Cannot be used in non-sleeping contexts
*/
void *dma_common_contiguous_remap(struct page *page, size_t size,
pgprot_t prot, const void *caller)
{
int count = size >> PAGE_SHIFT;
struct page **pages;
void *vaddr;
int i;
pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < count; i++)
pages[i] = nth_page(page, i);
vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
kfree(pages);
return vaddr;
}
/*
* Unmaps a range previously mapped by dma_common_*_remap
*/
void dma_common_free_remap(void *cpu_addr, size_t size)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || area->flags != VM_DMA_COHERENT) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
vunmap(cpu_addr);
}