Merge tag 'dma-mapping-4.18' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - replace the force_dma flag with a dma_configure bus method. (Nipun Gupta, although one patch is іncorrectly attributed to me due to a git rebase bug) - use GFP_DMA32 more agressively in dma-direct. (Takashi Iwai) - remove PCI_DMA_BUS_IS_PHYS and rely on the dma-mapping API to do the right thing for bounce buffering. - move dma-debug initialization to common code, and apply a few cleanups to the dma-debug code. - cleanup the Kconfig mess around swiotlb selection - swiotlb comment fixup (Yisheng Xie) - a trivial swiotlb fix. (Dan Carpenter) - support swiotlb on RISC-V. (based on a patch from Palmer Dabbelt) - add a new generic dma-noncoherent dma_map_ops implementation and use it for arc, c6x and nds32. - improve scatterlist validity checking in dma-debug. (Robin Murphy) - add a struct device quirk to limit the dma-mask to 32-bit due to bridge/system issues, and switch x86 to use it instead of a local hack for VIA bridges. - handle devices without a dma_mask more gracefully in the dma-direct code. * tag 'dma-mapping-4.18' of git://git.infradead.org/users/hch/dma-mapping: (48 commits) dma-direct: don't crash on device without dma_mask nds32: use generic dma_noncoherent_ops nds32: implement the unmap_sg DMA operation nds32: consolidate DMA cache maintainance routines x86/pci-dma: switch the VIA 32-bit DMA quirk to use the struct device flag x86/pci-dma: remove the explicit nodac and allowdac option x86/pci-dma: remove the experimental forcesac boot option Documentation/x86: remove a stray reference to pci-nommu.c core, dma-direct: add a flag 32-bit dma limits dma-mapping: remove unused gfp_t parameter to arch_dma_alloc_attrs dma-debug: check scatterlist segments c6x: use generic dma_noncoherent_ops arc: use generic dma_noncoherent_ops arc: fix arc_dma_{map,unmap}_page arc: fix arc_dma_sync_sg_for_{cpu,device} arc: simplify arc_dma_sync_single_for_{cpu,device} dma-mapping: provide a generic dma-noncoherent implementation dma-mapping: simplify Kconfig dependencies riscv: add swiotlb support riscv: only enable ZONE_DMA32 for 64-bit ...
This commit is contained in:
@@ -59,7 +59,7 @@ obj-$(CONFIG_SPARC32) += leon_pmc.o
|
||||
|
||||
obj-$(CONFIG_SPARC64) += reboot.o
|
||||
obj-$(CONFIG_SPARC64) += sysfs.o
|
||||
obj-$(CONFIG_SPARC64) += iommu.o
|
||||
obj-$(CONFIG_SPARC64) += iommu.o iommu-common.o
|
||||
obj-$(CONFIG_SPARC64) += central.o
|
||||
obj-$(CONFIG_SPARC64) += starfire.o
|
||||
obj-$(CONFIG_SPARC64) += power.o
|
||||
@@ -74,8 +74,6 @@ obj-$(CONFIG_SPARC64) += pcr.o
|
||||
obj-$(CONFIG_SPARC64) += nmi.o
|
||||
obj-$(CONFIG_SPARC64_SMP) += cpumap.o
|
||||
|
||||
obj-y += dma.o
|
||||
|
||||
obj-$(CONFIG_PCIC_PCI) += pcic.o
|
||||
obj-$(CONFIG_LEON_PCI) += leon_pci.o
|
||||
obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o
|
||||
|
@@ -1,13 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 15)
|
||||
|
||||
static int __init dma_init(void)
|
||||
{
|
||||
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
|
||||
return 0;
|
||||
}
|
||||
fs_initcall(dma_init);
|
264
arch/sparc/kernel/iommu-common.c
Normal file
264
arch/sparc/kernel/iommu-common.c
Normal file
@@ -0,0 +1,264 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* IOMMU mmap management and range allocation functions.
|
||||
* Based almost entirely upon the powerpc iommu allocator.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/hash.h>
|
||||
#include <asm/iommu-common.h>
|
||||
|
||||
static unsigned long iommu_large_alloc = 15;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
|
||||
|
||||
static inline bool need_flush(struct iommu_map_table *iommu)
|
||||
{
|
||||
return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
|
||||
}
|
||||
|
||||
static inline void set_flush(struct iommu_map_table *iommu)
|
||||
{
|
||||
iommu->flags |= IOMMU_NEED_FLUSH;
|
||||
}
|
||||
|
||||
static inline void clear_flush(struct iommu_map_table *iommu)
|
||||
{
|
||||
iommu->flags &= ~IOMMU_NEED_FLUSH;
|
||||
}
|
||||
|
||||
static void setup_iommu_pool_hash(void)
|
||||
{
|
||||
unsigned int i;
|
||||
static bool do_once;
|
||||
|
||||
if (do_once)
|
||||
return;
|
||||
do_once = true;
|
||||
for_each_possible_cpu(i)
|
||||
per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize iommu_pool entries for the iommu_map_table. `num_entries'
|
||||
* is the number of table entries. If `large_pool' is set to true,
|
||||
* the top 1/4 of the table will be set aside for pool allocations
|
||||
* of more than iommu_large_alloc pages.
|
||||
*/
|
||||
void iommu_tbl_pool_init(struct iommu_map_table *iommu,
|
||||
unsigned long num_entries,
|
||||
u32 table_shift,
|
||||
void (*lazy_flush)(struct iommu_map_table *),
|
||||
bool large_pool, u32 npools,
|
||||
bool skip_span_boundary_check)
|
||||
{
|
||||
unsigned int start, i;
|
||||
struct iommu_pool *p = &(iommu->large_pool);
|
||||
|
||||
setup_iommu_pool_hash();
|
||||
if (npools == 0)
|
||||
iommu->nr_pools = IOMMU_NR_POOLS;
|
||||
else
|
||||
iommu->nr_pools = npools;
|
||||
BUG_ON(npools > IOMMU_NR_POOLS);
|
||||
|
||||
iommu->table_shift = table_shift;
|
||||
iommu->lazy_flush = lazy_flush;
|
||||
start = 0;
|
||||
if (skip_span_boundary_check)
|
||||
iommu->flags |= IOMMU_NO_SPAN_BOUND;
|
||||
if (large_pool)
|
||||
iommu->flags |= IOMMU_HAS_LARGE_POOL;
|
||||
|
||||
if (!large_pool)
|
||||
iommu->poolsize = num_entries/iommu->nr_pools;
|
||||
else
|
||||
iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
|
||||
for (i = 0; i < iommu->nr_pools; i++) {
|
||||
spin_lock_init(&(iommu->pools[i].lock));
|
||||
iommu->pools[i].start = start;
|
||||
iommu->pools[i].hint = start;
|
||||
start += iommu->poolsize; /* start for next pool */
|
||||
iommu->pools[i].end = start - 1;
|
||||
}
|
||||
if (!large_pool)
|
||||
return;
|
||||
/* initialize large_pool */
|
||||
spin_lock_init(&(p->lock));
|
||||
p->start = start;
|
||||
p->hint = p->start;
|
||||
p->end = num_entries;
|
||||
}
|
||||
|
||||
unsigned long iommu_tbl_range_alloc(struct device *dev,
|
||||
struct iommu_map_table *iommu,
|
||||
unsigned long npages,
|
||||
unsigned long *handle,
|
||||
unsigned long mask,
|
||||
unsigned int align_order)
|
||||
{
|
||||
unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
|
||||
unsigned long n, end, start, limit, boundary_size;
|
||||
struct iommu_pool *pool;
|
||||
int pass = 0;
|
||||
unsigned int pool_nr;
|
||||
unsigned int npools = iommu->nr_pools;
|
||||
unsigned long flags;
|
||||
bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
|
||||
bool largealloc = (large_pool && npages > iommu_large_alloc);
|
||||
unsigned long shift;
|
||||
unsigned long align_mask = 0;
|
||||
|
||||
if (align_order > 0)
|
||||
align_mask = ~0ul >> (BITS_PER_LONG - align_order);
|
||||
|
||||
/* Sanity check */
|
||||
if (unlikely(npages == 0)) {
|
||||
WARN_ON_ONCE(1);
|
||||
return IOMMU_ERROR_CODE;
|
||||
}
|
||||
|
||||
if (largealloc) {
|
||||
pool = &(iommu->large_pool);
|
||||
pool_nr = 0; /* to keep compiler happy */
|
||||
} else {
|
||||
/* pick out pool_nr */
|
||||
pool_nr = pool_hash & (npools - 1);
|
||||
pool = &(iommu->pools[pool_nr]);
|
||||
}
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
|
||||
again:
|
||||
if (pass == 0 && handle && *handle &&
|
||||
(*handle >= pool->start) && (*handle < pool->end))
|
||||
start = *handle;
|
||||
else
|
||||
start = pool->hint;
|
||||
|
||||
limit = pool->end;
|
||||
|
||||
/* The case below can happen if we have a small segment appended
|
||||
* to a large, or when the previous alloc was at the very end of
|
||||
* the available space. If so, go back to the beginning. If a
|
||||
* flush is needed, it will get done based on the return value
|
||||
* from iommu_area_alloc() below.
|
||||
*/
|
||||
if (start >= limit)
|
||||
start = pool->start;
|
||||
shift = iommu->table_map_base >> iommu->table_shift;
|
||||
if (limit + shift > mask) {
|
||||
limit = mask - shift + 1;
|
||||
/* If we're constrained on address range, first try
|
||||
* at the masked hint to avoid O(n) search complexity,
|
||||
* but on second pass, start at 0 in pool 0.
|
||||
*/
|
||||
if ((start & mask) >= limit || pass > 0) {
|
||||
spin_unlock(&(pool->lock));
|
||||
pool = &(iommu->pools[0]);
|
||||
spin_lock(&(pool->lock));
|
||||
start = pool->start;
|
||||
} else {
|
||||
start &= mask;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev)
|
||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||
1 << iommu->table_shift);
|
||||
else
|
||||
boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
|
||||
|
||||
boundary_size = boundary_size >> iommu->table_shift;
|
||||
/*
|
||||
* if the skip_span_boundary_check had been set during init, we set
|
||||
* things up so that iommu_is_span_boundary() merely checks if the
|
||||
* (index + npages) < num_tsb_entries
|
||||
*/
|
||||
if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
|
||||
shift = 0;
|
||||
boundary_size = iommu->poolsize * iommu->nr_pools;
|
||||
}
|
||||
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
|
||||
boundary_size, align_mask);
|
||||
if (n == -1) {
|
||||
if (likely(pass == 0)) {
|
||||
/* First failure, rescan from the beginning. */
|
||||
pool->hint = pool->start;
|
||||
set_flush(iommu);
|
||||
pass++;
|
||||
goto again;
|
||||
} else if (!largealloc && pass <= iommu->nr_pools) {
|
||||
spin_unlock(&(pool->lock));
|
||||
pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
|
||||
pool = &(iommu->pools[pool_nr]);
|
||||
spin_lock(&(pool->lock));
|
||||
pool->hint = pool->start;
|
||||
set_flush(iommu);
|
||||
pass++;
|
||||
goto again;
|
||||
} else {
|
||||
/* give up */
|
||||
n = IOMMU_ERROR_CODE;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
if (iommu->lazy_flush &&
|
||||
(n < pool->hint || need_flush(iommu))) {
|
||||
clear_flush(iommu);
|
||||
iommu->lazy_flush(iommu);
|
||||
}
|
||||
|
||||
end = n + npages;
|
||||
pool->hint = end;
|
||||
|
||||
/* Update handle for SG allocations */
|
||||
if (handle)
|
||||
*handle = end;
|
||||
bail:
|
||||
spin_unlock_irqrestore(&(pool->lock), flags);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
|
||||
unsigned long entry)
|
||||
{
|
||||
struct iommu_pool *p;
|
||||
unsigned long largepool_start = tbl->large_pool.start;
|
||||
bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
|
||||
|
||||
/* The large pool is the last pool at the top of the table */
|
||||
if (large_pool && entry >= largepool_start) {
|
||||
p = &tbl->large_pool;
|
||||
} else {
|
||||
unsigned int pool_nr = entry / tbl->poolsize;
|
||||
|
||||
BUG_ON(pool_nr >= tbl->nr_pools);
|
||||
p = &tbl->pools[pool_nr];
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/* Caller supplies the index of the entry into the iommu map table
|
||||
* itself when the mapping from dma_addr to the entry is not the
|
||||
* default addr->entry mapping below.
|
||||
*/
|
||||
void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
|
||||
unsigned long npages, unsigned long entry)
|
||||
{
|
||||
struct iommu_pool *pool;
|
||||
unsigned long flags;
|
||||
unsigned long shift = iommu->table_shift;
|
||||
|
||||
if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
|
||||
entry = (dma_addr - iommu->table_map_base) >> shift;
|
||||
pool = get_pool(iommu, entry);
|
||||
|
||||
spin_lock_irqsave(&(pool->lock), flags);
|
||||
bitmap_clear(iommu->map, entry, npages);
|
||||
spin_unlock_irqrestore(&(pool->lock), flags);
|
||||
}
|
@@ -14,7 +14,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/iommu-helper.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/iommu-common.h>
|
||||
#include <asm/iommu-common.h>
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#include <linux/pci.h>
|
||||
|
@@ -16,7 +16,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/iommu-common.h>
|
||||
#include <asm/iommu-common.h>
|
||||
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/iommu.h>
|
||||
|
@@ -16,7 +16,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/iommu-common.h>
|
||||
#include <asm/iommu-common.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/irq.h>
|
||||
|
Reference in New Issue
Block a user