dma-mapping: use unsigned long for dma_attrs
The dma-mapping core and the implementations do not change the DMA attributes passed by pointer. Thus the pointer can point to const data. However the attributes do not have to be a bitfield. Instead unsigned long will do fine: 1. This is just simpler. Both in terms of reading the code and setting attributes. Instead of initializing local attributes on the stack and passing pointer to it to dma_set_attr(), just set the bits. 2. It brings safeness and checking for const correctness because the attributes are passed by value. Semantic patches for this change (at least most of them): virtual patch virtual context @r@ identifier f, attrs; @@ f(..., - struct dma_attrs *attrs + unsigned long attrs , ...) { ... } @@ identifier r.f; @@ f(..., - NULL + 0 ) and // Options: --all-includes virtual patch virtual context @r@ identifier f, attrs; type t; @@ t f(..., struct dma_attrs *attrs); @@ identifier r.f; @@ f(..., - NULL + 0 ) Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com> Acked-by: Vineet Gupta <vgupta@synopsys.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> Acked-by: Mark Salter <msalter@redhat.com> [c6x] Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris] Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm] Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Acked-by: Joerg Roedel <jroedel@suse.de> [iommu] Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp] Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core] Acked-by: David Vrabel <david.vrabel@citrix.com> [xen] Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb] Acked-by: Joerg Roedel <jroedel@suse.de> [iommu] Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon] Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390] Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org> Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32] Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc] Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:

committed by
Linus Torvalds

parent
1605d2715a
commit
00085f1efa
@@ -27,7 +27,7 @@ struct vb2_dc_buf {
|
||||
unsigned long size;
|
||||
void *cookie;
|
||||
dma_addr_t dma_addr;
|
||||
struct dma_attrs attrs;
|
||||
unsigned long attrs;
|
||||
enum dma_data_direction dma_dir;
|
||||
struct sg_table *dma_sgt;
|
||||
struct frame_vector *vec;
|
||||
@@ -130,12 +130,12 @@ static void vb2_dc_put(void *buf_priv)
|
||||
kfree(buf->sgt_base);
|
||||
}
|
||||
dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
|
||||
&buf->attrs);
|
||||
buf->attrs);
|
||||
put_device(buf->dev);
|
||||
kfree(buf);
|
||||
}
|
||||
|
||||
static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs,
|
||||
static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
|
||||
unsigned long size, enum dma_data_direction dma_dir,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
@@ -146,16 +146,16 @@ static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (attrs)
|
||||
buf->attrs = *attrs;
|
||||
buf->attrs = attrs;
|
||||
buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
|
||||
GFP_KERNEL | gfp_flags, &buf->attrs);
|
||||
GFP_KERNEL | gfp_flags, buf->attrs);
|
||||
if (!buf->cookie) {
|
||||
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
|
||||
kfree(buf);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
|
||||
if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
|
||||
buf->vaddr = buf->cookie;
|
||||
|
||||
/* Prevent the device from being released while the buffer is used */
|
||||
@@ -189,7 +189,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
|
||||
vma->vm_pgoff = 0;
|
||||
|
||||
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
|
||||
buf->dma_addr, buf->size, &buf->attrs);
|
||||
buf->dma_addr, buf->size, buf->attrs);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Remapping memory failed, error: %d\n", ret);
|
||||
@@ -372,7 +372,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
|
||||
}
|
||||
|
||||
ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
|
||||
buf->size, &buf->attrs);
|
||||
buf->size, buf->attrs);
|
||||
if (ret < 0) {
|
||||
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
|
||||
kfree(sgt);
|
||||
@@ -421,15 +421,12 @@ static void vb2_dc_put_userptr(void *buf_priv)
|
||||
struct page **pages;
|
||||
|
||||
if (sgt) {
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
/*
|
||||
* No need to sync to CPU, it's already synced to the CPU
|
||||
* since the finish() memop will have been called before this.
|
||||
*/
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
pages = frame_vector_pages(buf->vec);
|
||||
/* sgt should exist only if vector contains pages... */
|
||||
BUG_ON(IS_ERR(pages));
|
||||
@@ -484,9 +481,6 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
struct sg_table *sgt;
|
||||
unsigned long contig_size;
|
||||
unsigned long dma_align = dma_get_cache_alignment();
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
|
||||
/* Only cache aligned DMA transfers are reliable */
|
||||
if (!IS_ALIGNED(vaddr | size, dma_align)) {
|
||||
@@ -548,7 +542,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (sgt->nents <= 0) {
|
||||
pr_err("failed to map scatterlist\n");
|
||||
ret = -EIO;
|
||||
@@ -572,7 +566,7 @@ out:
|
||||
|
||||
fail_map_sg:
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
fail_sgt_init:
|
||||
sg_free_table(sgt);
|
||||
|
@@ -95,7 +95,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs,
|
||||
static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
|
||||
unsigned long size, enum dma_data_direction dma_dir,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
@@ -103,9 +103,6 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at
|
||||
struct sg_table *sgt;
|
||||
int ret;
|
||||
int num_pages;
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
|
||||
if (WARN_ON(dev == NULL))
|
||||
return NULL;
|
||||
@@ -144,7 +141,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_at
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents)
|
||||
goto fail_map;
|
||||
|
||||
@@ -179,13 +176,10 @@ static void vb2_dma_sg_put(void *buf_priv)
|
||||
int i = buf->num_pages;
|
||||
|
||||
if (atomic_dec_and_test(&buf->refcount)) {
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
|
||||
buf->num_pages);
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
@@ -228,10 +222,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
{
|
||||
struct vb2_dma_sg_buf *buf;
|
||||
struct sg_table *sgt;
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
struct frame_vector *vec;
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
buf = kzalloc(sizeof *buf, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
@@ -262,7 +254,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
|
||||
* prepare() memop is called.
|
||||
*/
|
||||
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
|
||||
buf->dma_dir, &attrs);
|
||||
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (!sgt->nents)
|
||||
goto userptr_fail_map;
|
||||
|
||||
@@ -286,14 +278,11 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
|
||||
struct vb2_dma_sg_buf *buf = buf_priv;
|
||||
struct sg_table *sgt = &buf->sg_table;
|
||||
int i = buf->num_pages;
|
||||
DEFINE_DMA_ATTRS(attrs);
|
||||
|
||||
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
|
||||
|
||||
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
|
||||
__func__, buf->num_pages);
|
||||
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
|
||||
&attrs);
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (buf->vaddr)
|
||||
vm_unmap_ram(buf->vaddr, buf->num_pages);
|
||||
sg_free_table(buf->dma_sgt);
|
||||
|
@@ -33,7 +33,7 @@ struct vb2_vmalloc_buf {
|
||||
|
||||
static void vb2_vmalloc_put(void *buf_priv);
|
||||
|
||||
static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs,
|
||||
static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
|
||||
unsigned long size, enum dma_data_direction dma_dir,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
|
Reference in New Issue
Block a user