|
@@ -340,7 +340,6 @@ static int32_t cam_mem_get_slot(void)
|
|
|
tbl.bufq[idx].release_deferred = false;
|
|
|
CAM_GET_TIMESTAMP((tbl.bufq[idx].timestamp));
|
|
|
mutex_init(&tbl.bufq[idx].q_lock);
|
|
|
- mutex_init(&tbl.bufq[idx].ref_lock);
|
|
|
mutex_unlock(&tbl.m_lock);
|
|
|
|
|
|
return idx;
|
|
@@ -354,13 +353,10 @@ static void cam_mem_put_slot(int32_t idx)
|
|
|
tbl.bufq[idx].release_deferred = false;
|
|
|
tbl.bufq[idx].is_internal = false;
|
|
|
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
|
|
|
+ kref_init(&tbl.bufq[idx].krefcount);
|
|
|
+ kref_init(&tbl.bufq[idx].urefcount);
|
|
|
mutex_unlock(&tbl.bufq[idx].q_lock);
|
|
|
- mutex_lock(&tbl.bufq[idx].ref_lock);
|
|
|
- memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
|
|
|
- memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
|
|
|
- mutex_unlock(&tbl.bufq[idx].ref_lock);
|
|
|
mutex_destroy(&tbl.bufq[idx].q_lock);
|
|
|
- mutex_destroy(&tbl.bufq[idx].ref_lock);
|
|
|
clear_bit(idx, tbl.bitmap);
|
|
|
mutex_unlock(&tbl.m_lock);
|
|
|
}
|
|
@@ -539,17 +535,14 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&tbl.bufq[idx].ref_lock);
|
|
|
if (tbl.bufq[idx].kmdvaddr && kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
|
|
|
*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
|
|
|
*len = tbl.bufq[idx].len;
|
|
|
} else {
|
|
|
- mutex_unlock(&tbl.bufq[idx].ref_lock);
|
|
|
CAM_ERR(CAM_MEM, "No KMD access requested, kmdvddr= %p, idx= %d, buf_handle= %d",
|
|
|
tbl.bufq[idx].kmdvaddr, idx, buf_handle);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
- mutex_unlock(&tbl.bufq[idx].ref_lock);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1713,13 +1706,10 @@ static int cam_mem_mgr_cleanup_table(void)
|
|
|
tbl.bufq[i].is_internal = false;
|
|
|
memset(tbl.bufq[i].hdls_info, 0x0, tbl.max_hdls_info_size);
|
|
|
cam_mem_mgr_reset_presil_params(i);
|
|
|
+ kref_init(&tbl.bufq[i].krefcount);
|
|
|
+ kref_init(&tbl.bufq[i].urefcount);
|
|
|
mutex_unlock(&tbl.bufq[i].q_lock);
|
|
|
- mutex_lock(&tbl.bufq[i].ref_lock);
|
|
|
- memset(&tbl.bufq[i].krefcount, 0, sizeof(struct kref));
|
|
|
- memset(&tbl.bufq[i].urefcount, 0, sizeof(struct kref));
|
|
|
- mutex_unlock(&tbl.bufq[i].ref_lock);
|
|
|
mutex_destroy(&tbl.bufq[i].q_lock);
|
|
|
- mutex_destroy(&tbl.bufq[i].ref_lock);
|
|
|
}
|
|
|
|
|
|
bitmap_zero(tbl.bitmap, tbl.bits);
|
|
@@ -1863,8 +1853,6 @@ static void cam_mem_util_unmap_wrapper(struct kref *kref)
|
|
|
}
|
|
|
|
|
|
cam_mem_util_unmap(idx);
|
|
|
-
|
|
|
- mutex_destroy(&tbl.bufq[idx].ref_lock);
|
|
|
}
|
|
|
|
|
|
void cam_mem_put_cpu_buf(int32_t buf_handle)
|
|
@@ -1897,7 +1885,6 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&tbl.bufq[idx].ref_lock);
|
|
|
kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy);
|
|
|
|
|
|
krefcount = kref_read(&tbl.bufq[idx].krefcount);
|
|
@@ -1930,11 +1917,6 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
|
|
|
"Unbalanced release Called buf_handle: %u, idx: %d",
|
|
|
tbl.bufq[idx].buf_handle, idx);
|
|
|
}
|
|
|
- mutex_unlock(&tbl.bufq[idx].ref_lock);
|
|
|
-
|
|
|
- if (unmap)
|
|
|
- mutex_destroy(&tbl.bufq[idx].ref_lock);
|
|
|
-
|
|
|
}
|
|
|
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
|
|
|
|
|
@@ -1979,7 +1961,6 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
|
|
|
|
|
|
CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
|
|
|
|
|
|
- mutex_lock(&tbl.bufq[idx].ref_lock);
|
|
|
kref_put(&tbl.bufq[idx].urefcount, cam_mem_util_unmap_dummy);
|
|
|
|
|
|
urefcount = kref_read(&tbl.bufq[idx].urefcount);
|
|
@@ -2013,11 +1994,6 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
|
|
|
tbl.bufq[idx].release_deferred = true;
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&tbl.bufq[idx].ref_lock);
|
|
|
-
|
|
|
- if (unmap)
|
|
|
- mutex_destroy(&tbl.bufq[idx].ref_lock);
|
|
|
-
|
|
|
return rc;
|
|
|
}
|
|
|
|