Merge branches 'core/debugobjects', 'core/iommu', 'core/locking', 'core/printk', 'core/rcu', 'core/resources', 'core/softirq' and 'core/stacktrace' into core/core
This commit is contained in:

@@ -252,6 +252,14 @@ config DEBUG_OBJECTS_TIMERS
|
||||
timer routines to track the life time of timer objects and
|
||||
validate the timer operations.
|
||||
|
||||
config DEBUG_OBJECTS_ENABLE_DEFAULT
|
||||
int "debug_objects bootup default value (0-1)"
|
||||
range 0 1
|
||||
default "1"
|
||||
depends on DEBUG_OBJECTS
|
||||
help
|
||||
Debug objects boot parameter default value
|
||||
|
||||
config DEBUG_SLAB
|
||||
bool "Debug slab memory allocations"
|
||||
depends on DEBUG_KERNEL && SLAB
|
||||
@@ -629,6 +637,19 @@ config RCU_CPU_STALL_DETECTOR
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
config RCU_CPU_STALL_DETECTOR
|
||||
bool "Check for stalled CPUs delaying RCU grace periods"
|
||||
depends on CLASSIC_RCU || TREE_RCU
|
||||
default n
|
||||
help
|
||||
This option causes RCU to printk information on which
|
||||
CPUs are delaying the current grace period, but only when
|
||||
the grace period extends for excessive time periods.
|
||||
|
||||
Say Y if you want RCU to perform such checks.
|
||||
|
||||
Say N if you are unsure.
|
||||
|
||||
config KPROBES_SANITY_TEST
|
||||
bool "Kprobes sanity tests"
|
||||
depends on DEBUG_KERNEL
|
||||
|
@@ -45,7 +45,9 @@ static struct kmem_cache *obj_cache;
|
||||
static int debug_objects_maxchain __read_mostly;
|
||||
static int debug_objects_fixups __read_mostly;
|
||||
static int debug_objects_warnings __read_mostly;
|
||||
static int debug_objects_enabled __read_mostly;
|
||||
static int debug_objects_enabled __read_mostly
|
||||
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
|
||||
|
||||
static struct debug_obj_descr *descr_test __read_mostly;
|
||||
|
||||
static int __init enable_object_debug(char *str)
|
||||
|
255
lib/swiotlb.c
255
lib/swiotlb.c
@@ -21,9 +21,12 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/dma.h>
|
||||
@@ -36,22 +39,6 @@
|
||||
#define OFFSET(val,align) ((unsigned long) \
|
||||
( (val) & ( (align) - 1)))
|
||||
|
||||
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
|
||||
#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
|
||||
|
||||
/*
|
||||
* Maximum allowable number of contiguous slabs to map,
|
||||
* must be a power of 2. What is the appropriate value ?
|
||||
* The complexity of {map,unmap}_single is linearly dependent on this value.
|
||||
*/
|
||||
#define IO_TLB_SEGSIZE 128
|
||||
|
||||
/*
|
||||
* log of the size of each IO TLB slab. The number of slabs is command line
|
||||
* controllable.
|
||||
*/
|
||||
#define IO_TLB_SHIFT 11
|
||||
|
||||
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
|
||||
|
||||
/*
|
||||
@@ -102,7 +89,10 @@ static unsigned int io_tlb_index;
|
||||
* We need to save away the original address corresponding to a mapped entry
|
||||
* for the sync operations.
|
||||
*/
|
||||
static unsigned char **io_tlb_orig_addr;
|
||||
static struct swiotlb_phys_addr {
|
||||
struct page *page;
|
||||
unsigned int offset;
|
||||
} *io_tlb_orig_addr;
|
||||
|
||||
/*
|
||||
* Protect the above data structures in the map and unmap calls
|
||||
@@ -126,6 +116,72 @@ setup_io_tlb_npages(char *str)
|
||||
__setup("swiotlb=", setup_io_tlb_npages);
|
||||
/* make io_tlb_overflow tunable too? */
|
||||
|
||||
void * __weak swiotlb_alloc_boot(size_t size, unsigned long nslabs)
|
||||
{
|
||||
return alloc_bootmem_low_pages(size);
|
||||
}
|
||||
|
||||
void * __weak swiotlb_alloc(unsigned order, unsigned long nslabs)
|
||||
{
|
||||
return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
|
||||
}
|
||||
|
||||
dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
|
||||
{
|
||||
return baddr;
|
||||
}
|
||||
|
||||
static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
|
||||
{
|
||||
return swiotlb_phys_to_bus(virt_to_phys(address));
|
||||
}
|
||||
|
||||
static void *swiotlb_bus_to_virt(dma_addr_t address)
|
||||
{
|
||||
return phys_to_virt(swiotlb_bus_to_phys(address));
|
||||
}
|
||||
|
||||
int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static dma_addr_t swiotlb_sg_to_bus(struct scatterlist *sg)
|
||||
{
|
||||
return swiotlb_phys_to_bus(page_to_phys(sg_page(sg)) + sg->offset);
|
||||
}
|
||||
|
||||
static void swiotlb_print_info(unsigned long bytes)
|
||||
{
|
||||
phys_addr_t pstart, pend;
|
||||
dma_addr_t bstart, bend;
|
||||
|
||||
pstart = virt_to_phys(io_tlb_start);
|
||||
pend = virt_to_phys(io_tlb_end);
|
||||
|
||||
bstart = swiotlb_phys_to_bus(pstart);
|
||||
bend = swiotlb_phys_to_bus(pend);
|
||||
|
||||
printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
|
||||
bytes >> 20, io_tlb_start, io_tlb_end);
|
||||
if (pstart != bstart || pend != bend)
|
||||
printk(KERN_INFO "software IO TLB at phys %#llx - %#llx"
|
||||
" bus %#llx - %#llx\n",
|
||||
(unsigned long long)pstart,
|
||||
(unsigned long long)pend,
|
||||
(unsigned long long)bstart,
|
||||
(unsigned long long)bend);
|
||||
else
|
||||
printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
|
||||
(unsigned long long)pstart,
|
||||
(unsigned long long)pend);
|
||||
}
|
||||
|
||||
/*
|
||||
* Statically reserve bounce buffer space and initialize bounce buffer data
|
||||
* structures for the software IO TLB used to implement the DMA API.
|
||||
@@ -145,7 +201,7 @@ swiotlb_init_with_default_size(size_t default_size)
|
||||
/*
|
||||
* Get IO TLB memory from the low pages
|
||||
*/
|
||||
io_tlb_start = alloc_bootmem_low_pages(bytes);
|
||||
io_tlb_start = swiotlb_alloc_boot(bytes, io_tlb_nslabs);
|
||||
if (!io_tlb_start)
|
||||
panic("Cannot allocate SWIOTLB buffer");
|
||||
io_tlb_end = io_tlb_start + bytes;
|
||||
@@ -159,7 +215,7 @@ swiotlb_init_with_default_size(size_t default_size)
|
||||
for (i = 0; i < io_tlb_nslabs; i++)
|
||||
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
||||
io_tlb_index = 0;
|
||||
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
|
||||
io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
|
||||
|
||||
/*
|
||||
* Get the overflow emergency buffer
|
||||
@@ -168,8 +224,7 @@ swiotlb_init_with_default_size(size_t default_size)
|
||||
if (!io_tlb_overflow_buffer)
|
||||
panic("Cannot allocate SWIOTLB overflow buffer!\n");
|
||||
|
||||
printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
|
||||
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
|
||||
swiotlb_print_info(bytes);
|
||||
}
|
||||
|
||||
void __init
|
||||
@@ -202,8 +257,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
|
||||
|
||||
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
|
||||
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
|
||||
order);
|
||||
io_tlb_start = swiotlb_alloc(order, io_tlb_nslabs);
|
||||
if (io_tlb_start)
|
||||
break;
|
||||
order--;
|
||||
@@ -235,12 +289,12 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
|
||||
io_tlb_index = 0;
|
||||
|
||||
io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
|
||||
get_order(io_tlb_nslabs * sizeof(char *)));
|
||||
io_tlb_orig_addr = (struct swiotlb_phys_addr *)__get_free_pages(GFP_KERNEL,
|
||||
get_order(io_tlb_nslabs * sizeof(struct swiotlb_phys_addr)));
|
||||
if (!io_tlb_orig_addr)
|
||||
goto cleanup3;
|
||||
|
||||
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
|
||||
memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(struct swiotlb_phys_addr));
|
||||
|
||||
/*
|
||||
* Get the overflow emergency buffer
|
||||
@@ -250,9 +304,7 @@ swiotlb_late_init_with_default_size(size_t default_size)
|
||||
if (!io_tlb_overflow_buffer)
|
||||
goto cleanup4;
|
||||
|
||||
printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
|
||||
"0x%lx\n", bytes >> 20,
|
||||
virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
|
||||
swiotlb_print_info(bytes);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -279,16 +331,69 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size)
|
||||
return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
|
||||
}
|
||||
|
||||
static inline int range_needs_mapping(void *ptr, size_t size)
|
||||
{
|
||||
return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size);
|
||||
}
|
||||
|
||||
static int is_swiotlb_buffer(char *addr)
|
||||
{
|
||||
return addr >= io_tlb_start && addr < io_tlb_end;
|
||||
}
|
||||
|
||||
static struct swiotlb_phys_addr swiotlb_bus_to_phys_addr(char *dma_addr)
|
||||
{
|
||||
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||
struct swiotlb_phys_addr buffer = io_tlb_orig_addr[index];
|
||||
buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
|
||||
buffer.page += buffer.offset >> PAGE_SHIFT;
|
||||
buffer.offset &= PAGE_SIZE - 1;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static void
|
||||
__sync_single(struct swiotlb_phys_addr buffer, char *dma_addr, size_t size, int dir)
|
||||
{
|
||||
if (PageHighMem(buffer.page)) {
|
||||
size_t len, bytes;
|
||||
char *dev, *host, *kmp;
|
||||
|
||||
len = size;
|
||||
while (len != 0) {
|
||||
unsigned long flags;
|
||||
|
||||
bytes = len;
|
||||
if ((bytes + buffer.offset) > PAGE_SIZE)
|
||||
bytes = PAGE_SIZE - buffer.offset;
|
||||
local_irq_save(flags); /* protects KM_BOUNCE_READ */
|
||||
kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
|
||||
dev = dma_addr + size - len;
|
||||
host = kmp + buffer.offset;
|
||||
if (dir == DMA_FROM_DEVICE)
|
||||
memcpy(host, dev, bytes);
|
||||
else
|
||||
memcpy(dev, host, bytes);
|
||||
kunmap_atomic(kmp, KM_BOUNCE_READ);
|
||||
local_irq_restore(flags);
|
||||
len -= bytes;
|
||||
buffer.page++;
|
||||
buffer.offset = 0;
|
||||
}
|
||||
} else {
|
||||
void *v = page_address(buffer.page) + buffer.offset;
|
||||
|
||||
if (dir == DMA_TO_DEVICE)
|
||||
memcpy(dma_addr, v, size);
|
||||
else
|
||||
memcpy(v, dma_addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocates bounce buffer and returns its kernel virtual address.
|
||||
*/
|
||||
static void *
|
||||
map_single(struct device *hwdev, char *buffer, size_t size, int dir)
|
||||
map_single(struct device *hwdev, struct swiotlb_phys_addr buffer, size_t size, int dir)
|
||||
{
|
||||
unsigned long flags;
|
||||
char *dma_addr;
|
||||
@@ -298,11 +403,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
|
||||
unsigned long mask;
|
||||
unsigned long offset_slots;
|
||||
unsigned long max_slots;
|
||||
struct swiotlb_phys_addr slot_buf;
|
||||
|
||||
mask = dma_get_seg_boundary(hwdev);
|
||||
start_dma_addr = virt_to_bus(io_tlb_start) & mask;
|
||||
start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
|
||||
|
||||
offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
|
||||
/*
|
||||
* Carefully handle integer overflow which can occur when mask == ~0UL.
|
||||
*/
|
||||
max_slots = mask + 1
|
||||
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
|
||||
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
|
||||
@@ -378,10 +488,15 @@ found:
|
||||
* This is needed when we sync the memory. Then we sync the buffer if
|
||||
* needed.
|
||||
*/
|
||||
for (i = 0; i < nslots; i++)
|
||||
io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
|
||||
slot_buf = buffer;
|
||||
for (i = 0; i < nslots; i++) {
|
||||
slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
|
||||
slot_buf.offset &= PAGE_SIZE - 1;
|
||||
io_tlb_orig_addr[index+i] = slot_buf;
|
||||
slot_buf.offset += 1 << IO_TLB_SHIFT;
|
||||
}
|
||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||
memcpy(dma_addr, buffer, size);
|
||||
__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
|
||||
|
||||
return dma_addr;
|
||||
}
|
||||
@@ -395,17 +510,17 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
|
||||
unsigned long flags;
|
||||
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
|
||||
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||
char *buffer = io_tlb_orig_addr[index];
|
||||
struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
|
||||
|
||||
/*
|
||||
* First, sync the memory before unmapping the entry
|
||||
*/
|
||||
if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
|
||||
if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
|
||||
/*
|
||||
* bounce... copy the data back into the original buffer * and
|
||||
* delete the bounce buffer.
|
||||
*/
|
||||
memcpy(buffer, dma_addr, size);
|
||||
__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
|
||||
|
||||
/*
|
||||
* Return the buffer to the free list by setting the corresponding
|
||||
@@ -437,21 +552,18 @@ static void
|
||||
sync_single(struct device *hwdev, char *dma_addr, size_t size,
|
||||
int dir, int target)
|
||||
{
|
||||
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
|
||||
char *buffer = io_tlb_orig_addr[index];
|
||||
|
||||
buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
|
||||
struct swiotlb_phys_addr buffer = swiotlb_bus_to_phys_addr(dma_addr);
|
||||
|
||||
switch (target) {
|
||||
case SYNC_FOR_CPU:
|
||||
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
memcpy(buffer, dma_addr, size);
|
||||
__sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
|
||||
else
|
||||
BUG_ON(dir != DMA_TO_DEVICE);
|
||||
break;
|
||||
case SYNC_FOR_DEVICE:
|
||||
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
memcpy(dma_addr, buffer, size);
|
||||
__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
|
||||
else
|
||||
BUG_ON(dir != DMA_FROM_DEVICE);
|
||||
break;
|
||||
@@ -473,7 +585,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_mask = hwdev->coherent_dma_mask;
|
||||
|
||||
ret = (void *)__get_free_pages(flags, order);
|
||||
if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
|
||||
if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
|
||||
/*
|
||||
* The allocated memory isn't reachable by the device.
|
||||
* Fall back on swiotlb_map_single().
|
||||
@@ -488,13 +600,16 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
* swiotlb_map_single(), which will grab memory from
|
||||
* the lowest available address range.
|
||||
*/
|
||||
ret = map_single(hwdev, NULL, size, DMA_FROM_DEVICE);
|
||||
struct swiotlb_phys_addr buffer;
|
||||
buffer.page = virt_to_page(NULL);
|
||||
buffer.offset = 0;
|
||||
ret = map_single(hwdev, buffer, size, DMA_FROM_DEVICE);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(ret, 0, size);
|
||||
dev_addr = virt_to_bus(ret);
|
||||
dev_addr = swiotlb_virt_to_bus(ret);
|
||||
|
||||
/* Confirm address can be DMA'd by device */
|
||||
if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
|
||||
@@ -554,8 +669,9 @@ dma_addr_t
|
||||
swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
dma_addr_t dev_addr = virt_to_bus(ptr);
|
||||
dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
|
||||
void *map;
|
||||
struct swiotlb_phys_addr buffer;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
/*
|
||||
@@ -563,19 +679,22 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
|
||||
* we can safely return the device addr and not worry about bounce
|
||||
* buffering it.
|
||||
*/
|
||||
if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force)
|
||||
if (!address_needs_mapping(hwdev, dev_addr, size) &&
|
||||
!range_needs_mapping(ptr, size))
|
||||
return dev_addr;
|
||||
|
||||
/*
|
||||
* Oh well, have to allocate and map a bounce buffer.
|
||||
*/
|
||||
map = map_single(hwdev, ptr, size, dir);
|
||||
buffer.page = virt_to_page(ptr);
|
||||
buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
|
||||
map = map_single(hwdev, buffer, size, dir);
|
||||
if (!map) {
|
||||
swiotlb_full(hwdev, size, dir, 1);
|
||||
map = io_tlb_overflow_buffer;
|
||||
}
|
||||
|
||||
dev_addr = virt_to_bus(map);
|
||||
dev_addr = swiotlb_virt_to_bus(map);
|
||||
|
||||
/*
|
||||
* Ensure that the address returned is DMA'ble
|
||||
@@ -605,7 +724,7 @@ void
|
||||
swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
char *dma_addr = bus_to_virt(dev_addr);
|
||||
char *dma_addr = swiotlb_bus_to_virt(dev_addr);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
if (is_swiotlb_buffer(dma_addr))
|
||||
@@ -635,7 +754,7 @@ static void
|
||||
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
|
||||
size_t size, int dir, int target)
|
||||
{
|
||||
char *dma_addr = bus_to_virt(dev_addr);
|
||||
char *dma_addr = swiotlb_bus_to_virt(dev_addr);
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
if (is_swiotlb_buffer(dma_addr))
|
||||
@@ -666,7 +785,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
|
||||
unsigned long offset, size_t size,
|
||||
int dir, int target)
|
||||
{
|
||||
char *dma_addr = bus_to_virt(dev_addr) + offset;
|
||||
char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
if (is_swiotlb_buffer(dma_addr))
|
||||
@@ -714,18 +833,20 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
int dir, struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
void *addr;
|
||||
struct swiotlb_phys_addr buffer;
|
||||
dma_addr_t dev_addr;
|
||||
int i;
|
||||
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i) {
|
||||
addr = SG_ENT_VIRT_ADDRESS(sg);
|
||||
dev_addr = virt_to_bus(addr);
|
||||
if (swiotlb_force ||
|
||||
dev_addr = swiotlb_sg_to_bus(sg);
|
||||
if (range_needs_mapping(sg_virt(sg), sg->length) ||
|
||||
address_needs_mapping(hwdev, dev_addr, sg->length)) {
|
||||
void *map = map_single(hwdev, addr, sg->length, dir);
|
||||
void *map;
|
||||
buffer.page = sg_page(sg);
|
||||
buffer.offset = sg->offset;
|
||||
map = map_single(hwdev, buffer, sg->length, dir);
|
||||
if (!map) {
|
||||
/* Don't panic here, we expect map_sg users
|
||||
to do proper error handling. */
|
||||
@@ -735,7 +856,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
||||
sgl[0].dma_length = 0;
|
||||
return 0;
|
||||
}
|
||||
sg->dma_address = virt_to_bus(map);
|
||||
sg->dma_address = swiotlb_virt_to_bus(map);
|
||||
} else
|
||||
sg->dma_address = dev_addr;
|
||||
sg->dma_length = sg->length;
|
||||
@@ -765,11 +886,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i) {
|
||||
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
|
||||
unmap_single(hwdev, bus_to_virt(sg->dma_address),
|
||||
if (sg->dma_address != swiotlb_sg_to_bus(sg))
|
||||
unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
|
||||
sg->dma_length, dir);
|
||||
else if (dir == DMA_FROM_DEVICE)
|
||||
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
|
||||
dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
|
||||
@@ -798,11 +919,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i) {
|
||||
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
|
||||
sync_single(hwdev, bus_to_virt(sg->dma_address),
|
||||
if (sg->dma_address != swiotlb_sg_to_bus(sg))
|
||||
sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
|
||||
sg->dma_length, dir, target);
|
||||
else if (dir == DMA_FROM_DEVICE)
|
||||
dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
|
||||
dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -823,7 +944,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
||||
int
|
||||
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
||||
{
|
||||
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
|
||||
return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -835,7 +956,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
||||
int
|
||||
swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
||||
{
|
||||
return virt_to_bus(io_tlb_end - 1) <= mask;
|
||||
return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(swiotlb_map_single);
|
||||
|
Reference in New Issue
Block a user