dma-mapping: move all DMA mapping code to kernel/dma
Currently the code is split over various files with dma- prefixes in the lib/ and drives/base directories, and the number of files keeps growing. Move them into a single directory to keep the code together and remove the file name prefixes. To match the irq infrastructure this directory is placed under the kernel/ directory. Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
47
lib/Kconfig
47
lib/Kconfig
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
|
||||
depends on HAS_IOMEM && !NO_IOPORT_MAP
|
||||
default y
|
||||
|
||||
config HAS_DMA
|
||||
bool
|
||||
depends on !NO_DMA
|
||||
default y
|
||||
source "kernel/dma/Kconfig"
|
||||
|
||||
config SGL_ALLOC
|
||||
bool
|
||||
default n
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
bool
|
||||
|
||||
config NEED_DMA_MAP_STATE
|
||||
bool
|
||||
|
||||
config ARCH_DMA_ADDR_T_64BIT
|
||||
def_bool 64BIT || PHYS_ADDR_T_64BIT
|
||||
|
||||
config IOMMU_HELPER
|
||||
bool
|
||||
|
||||
config ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
bool
|
||||
|
||||
config ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
bool
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
||||
config DMA_DIRECT_OPS
|
||||
bool
|
||||
depends on HAS_DMA
|
||||
|
||||
config DMA_NONCOHERENT_OPS
|
||||
bool
|
||||
depends on HAS_DMA
|
||||
select DMA_DIRECT_OPS
|
||||
|
||||
config DMA_NONCOHERENT_MMAP
|
||||
bool
|
||||
depends on DMA_NONCOHERENT_OPS
|
||||
|
||||
config DMA_NONCOHERENT_CACHE_SYNC
|
||||
bool
|
||||
depends on DMA_NONCOHERENT_OPS
|
||||
|
||||
config DMA_VIRT_OPS
|
||||
bool
|
||||
depends on HAS_DMA
|
||||
|
||||
config SWIOTLB
|
||||
bool
|
||||
select DMA_DIRECT_OPS
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
||||
config CHECK_SIGNATURE
|
||||
bool
|
||||
|
||||
|
@@ -29,9 +29,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||
lib-$(CONFIG_PRINTK) += dump_stack.o
|
||||
lib-$(CONFIG_MMU) += ioremap.o
|
||||
lib-$(CONFIG_SMP) += cpumask.o
|
||||
obj-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
|
||||
obj-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
|
||||
obj-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
|
||||
|
||||
lib-y += kobject.o klist.o
|
||||
obj-y += lockref.o
|
||||
@@ -148,7 +145,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
|
||||
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
|
||||
obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += swiotlb.o
|
||||
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
|
||||
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
|
||||
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
|
||||
@@ -169,8 +165,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
|
||||
|
||||
obj-$(CONFIG_LRU_CACHE) += lru_cache.o
|
||||
|
||||
obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CSUM) += checksum.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
|
||||
|
1773
lib/dma-debug.c
1773
lib/dma-debug.c
File diff suppressed because it is too large
Load Diff
204
lib/dma-direct.c
204
lib/dma-direct.c
@@ -1,204 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* DMA operations that map physical memory directly without using an IOMMU or
|
||||
* flushing caches.
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/set_memory.h>
|
||||
|
||||
#define DIRECT_MAPPING_ERROR 0
|
||||
|
||||
/*
|
||||
* Most architectures use ZONE_DMA for the first 16 Megabytes, but
|
||||
* some use it for entirely different regions:
|
||||
*/
|
||||
#ifndef ARCH_ZONE_DMA_BITS
|
||||
#define ARCH_ZONE_DMA_BITS 24
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For AMD SEV all DMA must be to unencrypted addresses.
|
||||
*/
|
||||
static inline bool force_dma_unencrypted(void)
|
||||
{
|
||||
return sev_active();
|
||||
}
|
||||
|
||||
static bool
|
||||
check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
const char *caller)
|
||||
{
|
||||
if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
|
||||
if (!dev->dma_mask) {
|
||||
dev_err(dev,
|
||||
"%s: call on device without dma_mask\n",
|
||||
caller);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
|
||||
dev_err(dev,
|
||||
"%s: overflow %pad+%zu of device mask %llx\n",
|
||||
caller, &dma_addr, size, *dev->dma_mask);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
{
|
||||
dma_addr_t addr = force_dma_unencrypted() ?
|
||||
__phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
|
||||
return addr + size - 1 <= dev->coherent_dma_mask;
|
||||
}
|
||||
|
||||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
int page_order = get_order(size);
|
||||
struct page *page = NULL;
|
||||
void *ret;
|
||||
|
||||
/* we always manually zero the memory once we are done: */
|
||||
gfp &= ~__GFP_ZERO;
|
||||
|
||||
/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
|
||||
if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
|
||||
gfp |= GFP_DMA;
|
||||
if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
|
||||
gfp |= GFP_DMA32;
|
||||
|
||||
again:
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
if (gfpflags_allow_blocking(gfp)) {
|
||||
page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
dma_release_from_contiguous(dev, page, count);
|
||||
page = NULL;
|
||||
}
|
||||
}
|
||||
if (!page)
|
||||
page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
|
||||
|
||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||
__free_pages(page, page_order);
|
||||
page = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
|
||||
!(gfp & (GFP_DMA32 | GFP_DMA))) {
|
||||
gfp |= GFP_DMA32;
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
|
||||
!(gfp & GFP_DMA)) {
|
||||
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted()) {
|
||||
set_memory_decrypted((unsigned long)ret, 1 << page_order);
|
||||
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
|
||||
} else {
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: this function must never look at the dma_addr argument, because we want
|
||||
* to be able to use it as a helper for iommu implementations as well.
|
||||
*/
|
||||
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned int page_order = get_order(size);
|
||||
|
||||
if (force_dma_unencrypted())
|
||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
||||
if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
|
||||
free_pages((unsigned long)cpu_addr, page_order);
|
||||
}
|
||||
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
|
||||
|
||||
if (!check_addr(dev, dma_addr, size, __func__))
|
||||
return DIRECT_MAPPING_ERROR;
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
|
||||
sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
|
||||
if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
|
||||
return 0;
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
int dma_direct_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
|
||||
return 0;
|
||||
#else
|
||||
/*
|
||||
* Because 32-bit DMA masks are so common we expect every architecture
|
||||
* to be able to satisfy them - either by not supporting more physical
|
||||
* memory, or by providing a ZONE_DMA32. If neither is the case, the
|
||||
* architecture needs to use an IOMMU instead of the direct mapping.
|
||||
*/
|
||||
if (mask < DMA_BIT_MASK(32))
|
||||
return 0;
|
||||
#endif
|
||||
/*
|
||||
* Various PCI/PCIe bridges have broken support for > 32bit DMA even
|
||||
* if the device itself might support it.
|
||||
*/
|
||||
if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == DIRECT_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
const struct dma_map_ops dma_direct_ops = {
|
||||
.alloc = dma_direct_alloc,
|
||||
.free = dma_direct_free,
|
||||
.map_page = dma_direct_map_page,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.dma_supported = dma_direct_supported,
|
||||
.mapping_error = dma_direct_mapping_error,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_direct_ops);
|
@@ -1,102 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Christoph Hellwig.
|
||||
*
|
||||
* DMA operations that map physical memory directly without providing cache
|
||||
* coherence.
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
static void dma_noncoherent_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
|
||||
}
|
||||
|
||||
static void dma_noncoherent_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
|
||||
size, dir);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
|
||||
if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
|
||||
return nents;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
|
||||
}
|
||||
|
||||
static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct dma_map_ops dma_noncoherent_ops = {
|
||||
.alloc = arch_dma_alloc,
|
||||
.free = arch_dma_free,
|
||||
.mmap = arch_dma_mmap,
|
||||
.sync_single_for_device = dma_noncoherent_sync_single_for_device,
|
||||
.sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
|
||||
.map_page = dma_noncoherent_map_page,
|
||||
.map_sg = dma_noncoherent_map_sg,
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
.sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
|
||||
.unmap_page = dma_noncoherent_unmap_page,
|
||||
.unmap_sg = dma_noncoherent_unmap_sg,
|
||||
#endif
|
||||
.dma_supported = dma_direct_supported,
|
||||
.mapping_error = dma_direct_mapping_error,
|
||||
.cache_sync = arch_dma_cache_sync,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_noncoherent_ops);
|
@@ -1,61 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* lib/dma-virt.c
|
||||
*
|
||||
* DMA operations that map to virtual addresses without flushing memory.
|
||||
*/
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
static void *dma_virt_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
||||
if (ret)
|
||||
*dma_handle = (uintptr_t)ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dma_virt_free(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
free_pages((unsigned long)cpu_addr, get_order(size));
|
||||
}
|
||||
|
||||
static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return (uintptr_t)(page_address(page) + offset);
|
||||
}
|
||||
|
||||
static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
|
||||
sg_dma_len(sg) = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
const struct dma_map_ops dma_virt_ops = {
|
||||
.alloc = dma_virt_alloc,
|
||||
.free = dma_virt_free,
|
||||
.map_page = dma_virt_map_page,
|
||||
.map_sg = dma_virt_map_sg,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_virt_ops);
|
1087
lib/swiotlb.c
1087
lib/swiotlb.c
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user