Merge tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - fix an integer overflow in the coherent pool (Kevin Grandemange) - provide support for in-place uncached remapping and use that for openrisc - fix the arm coherent allocator to take the bus limit into account * tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping: ARM/dma-mapping: merge __dma_supported into arm_dma_supported ARM/dma-mapping: take the bus limit into account in __dma_alloc ARM/dma-mapping: remove get_coherent_dma_mask openrisc: use the generic in-place uncached DMA allocator dma-direct: provide a arch_dma_clear_uncached hook dma-direct: make uncached_kernel_address more general dma-direct: consolidate the error handling in dma_direct_alloc_pages dma-direct: remove the cached_kernel_address hook dma-coherent: fix integer overflow in the reserved-memory dma allocation
This commit is contained in:
@@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev,
|
||||
|
||||
spin_lock_irqsave(&mem->spinlock, flags);
|
||||
|
||||
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
||||
if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
|
||||
goto err;
|
||||
|
||||
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
||||
@@ -144,8 +144,9 @@ static void *__dma_alloc_from_coherent(struct device *dev,
|
||||
/*
|
||||
* Memory was found in the coherent area.
|
||||
*/
|
||||
*dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
|
||||
ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
||||
*dma_handle = dma_get_device_base(dev, mem) +
|
||||
((dma_addr_t)pageno << PAGE_SHIFT);
|
||||
ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
|
||||
spin_unlock_irqrestore(&mem->spinlock, flags);
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
@@ -194,7 +195,7 @@ static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
||||
int order, void *vaddr)
|
||||
{
|
||||
if (mem && vaddr >= mem->virt_base && vaddr <
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
(mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
|
||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -238,10 +239,10 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
||||
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
||||
{
|
||||
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
(mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
int user_count = vma_pages(vma);
|
||||
unsigned long user_count = vma_pages(vma);
|
||||
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
*ret = -ENXIO;
|
||||
|
@@ -157,11 +157,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
|
||||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||
__builtin_return_address(0));
|
||||
if (!ret) {
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
goto out_free_pages;
|
||||
memset(ret, 0, size);
|
||||
goto done;
|
||||
}
|
||||
@@ -174,8 +171,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
* so log an error and fail.
|
||||
*/
|
||||
dev_info(dev, "Rejecting highmem page from CMA.\n");
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return NULL;
|
||||
goto out_free_pages;
|
||||
}
|
||||
|
||||
ret = page_address(page);
|
||||
@@ -184,10 +180,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
|
||||
memset(ret, 0, size);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||
dma_alloc_need_uncached(dev, attrs)) {
|
||||
arch_dma_prep_coherent(page, size);
|
||||
ret = uncached_kernel_address(ret);
|
||||
ret = arch_dma_set_uncached(ret, size);
|
||||
if (IS_ERR(ret))
|
||||
goto out_free_pages;
|
||||
}
|
||||
done:
|
||||
if (force_dma_unencrypted(dev))
|
||||
@@ -195,6 +193,9 @@ done:
|
||||
else
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
return ret;
|
||||
out_free_pages:
|
||||
dma_free_contiguous(dev, page, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
@@ -218,6 +219,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
||||
vunmap(cpu_addr);
|
||||
else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
|
||||
arch_dma_clear_uncached(cpu_addr, size);
|
||||
|
||||
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
|
||||
}
|
||||
@@ -225,7 +228,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||
void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_alloc_need_uncached(dev, attrs))
|
||||
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
|
||||
@@ -235,7 +238,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||
void dma_direct_free(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
dma_alloc_need_uncached(dev, attrs))
|
||||
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
|
||||
|
Reference in New Issue
Block a user