ANDROID: staging: ion: Remove unnecessary ion heap ops

The heap operations are redundant now that we have default
dma_buf operations. So, remove them.

Bug: 133508579
Bug: 140290587
Test: ion-unit-tests

Change-Id: I8f87c22896e128f48ed222421cabc23ab238ff69
Signed-off-by: Sandeep Patil <sspatil@google.com>
This commit is contained in:
Sandeep Patil
2019-09-01 23:02:37 -07:00
parent 296ba13074
commit 8eb89f277e
6 changed files with 13 additions and 36 deletions

View File

@@ -95,9 +95,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
};
static struct ion_heap *__ion_cma_heap_create(struct cma *cma)

View File

@@ -77,9 +77,6 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
};
static struct ion_heap contig_heap = {

View File

@@ -241,9 +241,6 @@ err_create_pool:
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
.shrink = ion_system_heap_shrink,
};

View File

@@ -201,7 +201,7 @@ void ion_buffer_release(struct ion_buffer *buffer)
if (buffer->kmap_cnt > 0) {
pr_warn_once("%s: buffer still mapped in the kernel\n",
__func__);
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
ion_heap_unmap_kernel(buffer->heap, buffer);
}
buffer->heap->ops->free(buffer);
spin_lock(&buffer->heap->stat_lock);
@@ -245,7 +245,7 @@ void *ion_buffer_kmap_get(struct ion_buffer *buffer)
buffer->kmap_cnt++;
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
vaddr = ion_heap_map_kernel(buffer->heap, buffer);
if (WARN_ONCE(!vaddr,
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
@@ -260,7 +260,7 @@ void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
buffer->kmap_cnt--;
if (!buffer->kmap_cnt) {
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
ion_heap_unmap_kernel(buffer->heap, buffer);
buffer->vaddr = NULL;
}
}

View File

@@ -154,19 +154,16 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
/*
* TODO: Move this elsewhere because we don't always need a vaddr
* FIXME: Why do we need a vaddr here?
*/
ret = 0;
if (heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unlock;
}
mutex_unlock(&buffer->lock);
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unlock;
}
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
direction);
@@ -206,13 +203,9 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
if (heap->buf_ops.end_cpu_access)
return heap->buf_ops.end_cpu_access(dmabuf, direction);
if (heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
mutex_unlock(&buffer->lock);
}
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
direction);
@@ -249,7 +242,7 @@ static void *ion_dma_buf_map(struct dma_buf *dmabuf, unsigned long offset)
if (heap->buf_ops.map)
return heap->buf_ops.map(dmabuf, offset);
return buffer->vaddr + offset * PAGE_SIZE;
return ion_buffer_kmap_get(buffer) + offset * PAGE_SIZE;
}
static int ion_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)

View File

@@ -57,11 +57,8 @@ struct ion_buffer {
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
* @free: free memory
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
* allocate returns 0 on success, -errno on error.
* map_dma and map_kernel return pointer on success, ERR_PTR on
* error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
* the buffer's private_flags when called from a shrinker. In that
@@ -73,10 +70,6 @@ struct ion_heap_ops {
struct ion_buffer *buffer, unsigned long len,
unsigned long flags);
void (*free)(struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
};