msm: camera: mem_mgr: Add refcount to track in use buffers

The function cam_mem_mgr_release can unmap the buffers when in use.

This change prevents unmapping the buffers when in use.

CRs-Fixed: 3489559
Change-Id: I2e72e795d39ac15abfa56c19043c419a03686966
Signed-off-by: Shivakumar Malke <quic_smalke@quicinc.com>
This commit is contained in:
Shivakumar Malke
2023-05-19 11:52:42 +05:30
committed by Camera Software Integration
parent 87913e552d
commit 6d99262523
24 changed files with 361 additions and 58 deletions

View File

@@ -491,6 +491,7 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
return -EINVAL;
idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
return -EINVAL;
@@ -500,18 +501,24 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
return -EPERM;
}
if (buf_handle != tbl.bufq[idx].buf_handle)
if (buf_handle != tbl.bufq[idx].buf_handle) {
CAM_ERR(CAM_MEM, "idx: %d Invalid buf handle %d",
idx, buf_handle);
return -EINVAL;
}
if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS))
if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)) {
CAM_ERR(CAM_MEM, "idx: %d Invalid flag 0x%x",
idx, tbl.bufq[idx].flags);
return -EINVAL;
}
if (tbl.bufq[idx].kmdvaddr) {
if (tbl.bufq[idx].kmdvaddr && kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
*len = tbl.bufq[idx].len;
} else {
CAM_ERR(CAM_MEM, "No KMD access was requested for 0x%x handle",
buf_handle);
CAM_ERR(CAM_MEM, "No KMD access requested, kmdvddr= %p, idx= %d, buf_handle= %d",
tbl.bufq[idx].kmdvaddr, idx, buf_handle);
return -EINVAL;
}
@@ -1357,6 +1364,8 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd_v2 *cmd)
tbl.bufq[idx].num_hdls = cmd->num_hdl;
cam_mem_mgr_reset_presil_params(idx);
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
mutex_unlock(&tbl.bufq[idx].q_lock);
cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
@@ -1488,6 +1497,8 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd_v2 *cmd)
tbl.bufq[idx].num_hdls = cmd->num_hdl;
tbl.bufq[idx].is_imported = true;
tbl.bufq[idx].is_internal = is_internal;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
mutex_unlock(&tbl.bufq[idx].q_lock);
cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
@@ -1667,24 +1678,30 @@ void cam_mem_mgr_deinit(void)
mutex_destroy(&tbl.m_lock);
}
static int cam_mem_util_unmap(int32_t idx,
enum cam_smmu_mapping_client client)
static void cam_mem_util_unmap(struct kref *kref)
{
int rc = 0;
int32_t idx;
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
enum cam_smmu_mapping_client client;
struct cam_mem_buf_queue *bufq =
container_of(kref, typeof(*bufq), krefcount);
idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
CAM_ERR(CAM_MEM, "Incorrect index");
return -EINVAL;
return;
}
client = tbl.bufq[idx].smmu_mapping_client;
CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
mutex_lock(&tbl.m_lock);
if (!tbl.bufq[idx].active) {
CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped", idx);
mutex_unlock(&tbl.m_lock);
return 0;
return;
}
/* Deactivate the buffer queue to prevent multiple unmap */
@@ -1750,13 +1767,50 @@ static int cam_mem_util_unmap(int32_t idx,
clear_bit(idx, tbl.bitmap);
mutex_unlock(&tbl.m_lock);
return rc;
}
void cam_mem_put_cpu_buf(int32_t buf_handle)
{
int rc = 0;
int idx;
if (!buf_handle) {
CAM_ERR(CAM_MEM, "Invalid buf_handle");
return;
}
idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
CAM_ERR(CAM_MEM, "idx: %d not valid", idx);
return;
}
if (!tbl.bufq[idx].active) {
CAM_ERR(CAM_MEM, "idx: %d not active", idx);
rc = -EPERM;
return;
}
if (buf_handle != tbl.bufq[idx].buf_handle) {
CAM_ERR(CAM_MEM, "idx: %d Invalid buf handle %d",
idx, buf_handle);
rc = -EINVAL;
return;
}
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
buf_handle, idx);
}
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
{
int idx;
int rc;
int rc = 0;
if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -1788,8 +1842,18 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap)) {
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
cmd->buf_handle, idx);
} else {
rc = -EINVAL;
CAM_ERR(CAM_MEM,
"Unbalanced release Called buf_handle: %u, idx: %d",
cmd->buf_handle, idx);
}
return rc;
}
@@ -1898,6 +1962,8 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
tbl.bufq[idx].len = inp->size;
tbl.bufq[idx].num_hdls = 1;
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_KERNEL;
mutex_unlock(&tbl.bufq[idx].q_lock);
out->kva = kvaddr;
@@ -1926,7 +1992,7 @@ EXPORT_SYMBOL(cam_mem_mgr_request_mem);
int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
{
int32_t idx;
int rc;
int rc = 0;
if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -1956,7 +2022,16 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
tbl.bufq[idx].buf_handle, idx);
else {
CAM_ERR(CAM_MEM,
"Unbalanced release Called buf_handle: %u, idx: %d",
tbl.bufq[idx].buf_handle, idx);
rc = -EINVAL;
}
return rc;
}
@@ -2049,6 +2124,8 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
tbl.bufq[idx].len = request_len;
tbl.bufq[idx].num_hdls = 1;
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_KERNEL;
mutex_unlock(&tbl.bufq[idx].q_lock);
out->kva = kvaddr;
@@ -2174,7 +2251,7 @@ int cam_mem_mgr_dump_user(struct cam_dump_req_cmd *dump_req)
int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
{
int32_t rc, idx, entry_idx;
int32_t rc = 0, idx, entry_idx;
if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -2235,9 +2312,16 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
if (rc)
CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
inp->mem_handle, idx);
else {
CAM_ERR(CAM_MEM,
"Unbalanced release Called buf_handle: %u, idx: %d",
inp->mem_handle, idx);
rc = -EINVAL;
}
return rc;
}