|
@@ -75,7 +75,7 @@ static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
|
|
uintptr_t *vaddr,
|
|
uintptr_t *vaddr,
|
|
size_t *len)
|
|
size_t *len)
|
|
{
|
|
{
|
|
- int i, j, rc;
|
|
|
|
|
|
+ int rc = 0;
|
|
void *addr;
|
|
void *addr;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -88,30 +88,16 @@ static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Code could be simplified if ION support of dma_buf_vmap is
|
|
|
|
- * available. This workaround takes the avandaage that ion_alloc
|
|
|
|
- * returns a virtually contiguous memory region, so we just need
|
|
|
|
- * to _kmap each individual page and then only use the virtual
|
|
|
|
- * address returned from the first call to _kmap.
|
|
|
|
- */
|
|
|
|
- for (i = 0; i < PAGE_ALIGN(dmabuf->size) / PAGE_SIZE; i++) {
|
|
|
|
- addr = dma_buf_kmap(dmabuf, i);
|
|
|
|
- if (IS_ERR_OR_NULL(addr)) {
|
|
|
|
- CAM_ERR(CAM_MEM, "kernel map fail");
|
|
|
|
- for (j = 0; j < i; j++)
|
|
|
|
- dma_buf_kunmap(dmabuf,
|
|
|
|
- j,
|
|
|
|
- (void *)(*vaddr + (j * PAGE_SIZE)));
|
|
|
|
- *vaddr = 0;
|
|
|
|
- *len = 0;
|
|
|
|
- rc = -ENOSPC;
|
|
|
|
- goto fail;
|
|
|
|
- }
|
|
|
|
- if (i == 0)
|
|
|
|
- *vaddr = (uint64_t)addr;
|
|
|
|
|
|
+ addr = dma_buf_vmap(dmabuf);
|
|
|
|
+ if (!addr) {
|
|
|
|
+ CAM_ERR(CAM_MEM, "kernel map fail");
|
|
|
|
+ *vaddr = 0;
|
|
|
|
+ *len = 0;
|
|
|
|
+ rc = -ENOSPC;
|
|
|
|
+ goto fail;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ *vaddr = (uint64_t)addr;
|
|
*len = dmabuf->size;
|
|
*len = dmabuf->size;
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -123,19 +109,14 @@ fail:
|
|
static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
|
|
static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
|
|
uint64_t vaddr)
|
|
uint64_t vaddr)
|
|
{
|
|
{
|
|
- int i, rc = 0, page_num;
|
|
|
|
|
|
+ int rc = 0;
|
|
|
|
|
|
if (!dmabuf || !vaddr) {
|
|
if (!dmabuf || !vaddr) {
|
|
CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
|
|
CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
- page_num = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;
|
|
|
|
-
|
|
|
|
- for (i = 0; i < page_num; i++) {
|
|
|
|
- dma_buf_kunmap(dmabuf, i,
|
|
|
|
- (void *)(vaddr + (i * PAGE_SIZE)));
|
|
|
|
- }
|
|
|
|
|
|
+ dma_buf_vunmap(dmabuf, (void *)vaddr);
|
|
|
|
|
|
/*
|
|
/*
|
|
* dma_buf_begin_cpu_access() and
|
|
* dma_buf_begin_cpu_access() and
|
|
@@ -982,6 +963,10 @@ static void cam_mem_mgr_unmap_active_buf(int idx)
|
|
region = CAM_SMMU_REGION_IO;
|
|
region = CAM_SMMU_REGION_IO;
|
|
|
|
|
|
cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
|
|
cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
|
|
|
|
+
|
|
|
|
+ if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
|
|
|
|
+ cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
|
|
|
|
+ tbl.bufq[idx].kmdvaddr);
|
|
}
|
|
}
|
|
|
|
|
|
static int cam_mem_mgr_cleanup_table(void)
|
|
static int cam_mem_mgr_cleanup_table(void)
|