dma-mapping: merge direct and noncoherent ops

All the cache maintainance is already stubbed out when not enabled,
but merging the two allows us to nicely handle the case where
cache maintainance is required for some devices, but not others.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Paul Burton <paul.burton@mips.com> # MIPS parts
此提交包含在:
Christoph Hellwig
2018-09-08 11:22:43 +02:00
父節點 f3ecc0ff04
當前提交 bc3ec75de5
共有 29 個檔案被更改,包括 160 行新增192 行删除

查看文件

@@ -33,18 +33,13 @@ config DMA_DIRECT_OPS
bool
depends on HAS_DMA
config DMA_NONCOHERENT_OPS
bool
depends on HAS_DMA
select DMA_DIRECT_OPS
config DMA_NONCOHERENT_MMAP
bool
depends on DMA_NONCOHERENT_OPS
depends on DMA_DIRECT_OPS
config DMA_NONCOHERENT_CACHE_SYNC
bool
depends on DMA_NONCOHERENT_OPS
depends on DMA_DIRECT_OPS
config DMA_VIRT_OPS
bool

查看文件

@@ -4,7 +4,6 @@ obj-$(CONFIG_HAS_DMA) += mapping.o
obj-$(CONFIG_DMA_CMA) += contiguous.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o
obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o

查看文件

@@ -1,13 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
* DMA operations that map physical memory directly without using an IOMMU or
* flushing caches.
* Copyright (C) 2018 Christoph Hellwig.
*
* DMA operations that map physical memory directly without using an IOMMU.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <linux/dma-contiguous.h>
#include <linux/dma-noncoherent.h>
#include <linux/pfn.h>
#include <linux/set_memory.h>
@@ -58,8 +60,8 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
return addr + size - 1 <= dev->coherent_dma_mask;
}
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size);
@@ -124,7 +126,7 @@ again:
* NOTE: this function must never look at the dma_addr argument, because we want
* to be able to use it as a helper for iommu implementations as well.
*/
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -136,14 +138,106 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
free_pages((unsigned long)cpu_addr, page_order);
}
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
if (!dev_is_dma_coherent(dev))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
}
void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{
if (!dev_is_dma_coherent(dev))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
static int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
if (!dev_is_dma_coherent(dev) &&
IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP))
return arch_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
static void dma_direct_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
if (dev_is_dma_coherent(dev))
return;
arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
}
static void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
if (dev_is_dma_coherent(dev))
return;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
static void dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
if (dev_is_dma_coherent(dev))
return;
arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
arch_sync_dma_for_cpu_all(dev);
}
static void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
if (dev_is_dma_coherent(dev))
return;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
arch_sync_dma_for_cpu_all(dev);
}
static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
}
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
}
#endif
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
if (!check_addr(dev, dma_addr, size, __func__))
return DIRECT_MAPPING_ERROR;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_device(dev, dma_addr, size, dir);
return dma_addr;
}
@@ -162,6 +256,8 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
sg_dma_len(sg) = sg->length;
}
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
return nents;
}
@@ -197,9 +293,22 @@ int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc,
.free = dma_direct_free,
.mmap = dma_direct_mmap,
.map_page = dma_direct_map_page,
.map_sg = dma_direct_map_sg,
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
.sync_single_for_device = dma_direct_sync_single_for_device,
.sync_sg_for_device = dma_direct_sync_sg_for_device,
#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
.unmap_page = dma_direct_unmap_page,
.unmap_sg = dma_direct_unmap_sg,
#endif
.dma_supported = dma_direct_supported,
.mapping_error = dma_direct_mapping_error,
.cache_sync = arch_dma_cache_sync,
};
EXPORT_SYMBOL(dma_direct_ops);

查看文件

@@ -1,106 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Christoph Hellwig.
*
* DMA operations that map physical memory directly without providing cache
* coherence.
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/scatterlist.h>
static void dma_noncoherent_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
}
static void dma_noncoherent_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}
static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
dma_addr_t addr;
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
size, dir);
return addr;
}
static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
return nents;
}
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
arch_sync_dma_for_cpu_all(dev);
}
static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
arch_sync_dma_for_cpu_all(dev);
}
static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
}
static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
}
#endif
const struct dma_map_ops dma_noncoherent_ops = {
.alloc = arch_dma_alloc,
.free = arch_dma_free,
.mmap = arch_dma_mmap,
.sync_single_for_device = dma_noncoherent_sync_single_for_device,
.sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
.map_page = dma_noncoherent_map_page,
.map_sg = dma_noncoherent_map_sg,
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
.sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
.sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
.unmap_page = dma_noncoherent_unmap_page,
.unmap_sg = dma_noncoherent_unmap_sg,
#endif
.dma_supported = dma_direct_supported,
.mapping_error = dma_direct_mapping_error,
.cache_sync = arch_dma_cache_sync,
};
EXPORT_SYMBOL(dma_noncoherent_ops);