dma-direct: provide mmap and get_sgtable method overrides
For dma-direct we know that the DMA address is an encoding of the physical address that we can trivially decode. Use that fact to provide implementations that do not need the arch_dma_coherent_to_pfn architecture hook. Note that we still can only support mmap of non-coherent memory only if the architecture provides a way to set an uncached bit in the page tables. This must be true for architectures that use the generic remap helpers, but other architectures can also manually select it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
@@ -112,24 +112,9 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
struct page *page = virt_to_page(cpu_addr);
|
||||
int ret;
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
unsigned long pfn;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
|
||||
return -ENXIO;
|
||||
|
||||
/* If the PFN is not valid, we do not have a struct page */
|
||||
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
|
||||
if (!pfn_valid(pfn))
|
||||
return -ENXIO;
|
||||
page = pfn_to_page(pfn);
|
||||
} else {
|
||||
page = virt_to_page(cpu_addr);
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (!ret)
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
@@ -154,7 +139,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr,
|
||||
return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
|
||||
size, attrs);
|
||||
if (!ops->get_sgtable)
|
||||
return -ENXIO;
|
||||
@@ -192,7 +177,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
unsigned long user_count = vma_pages(vma);
|
||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
unsigned long pfn;
|
||||
int ret = -ENXIO;
|
||||
|
||||
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
|
||||
@@ -203,19 +187,8 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
if (off >= count || user_count > count - off)
|
||||
return -ENXIO;
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
|
||||
return -ENXIO;
|
||||
|
||||
/* If the PFN is not valid, we do not have a struct page */
|
||||
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
|
||||
if (!pfn_valid(pfn))
|
||||
return -ENXIO;
|
||||
} else {
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
}
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
|
||||
return remap_pfn_range(vma, vma->vm_start,
|
||||
page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
|
||||
user_count << PAGE_SHIFT, vma->vm_page_prot);
|
||||
#else
|
||||
return -ENXIO;
|
||||
@@ -233,12 +206,8 @@ bool dma_can_mmap(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops)) {
|
||||
return IS_ENABLED(CONFIG_MMU) &&
|
||||
(dev_is_dma_coherent(dev) ||
|
||||
IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN));
|
||||
}
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
return dma_direct_can_mmap(dev);
|
||||
return ops->mmap != NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_can_mmap);
|
||||
@@ -263,7 +232,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size,
|
||||
return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
if (!ops->mmap)
|
||||
return -ENXIO;
|
||||
|
Reference in New Issue
Block a user