Merge "msm: camera: memmgr: Add refcount to track umd in use buffers" into camera-kernel.lnx.7.0

This commit is contained in:
Camera Software Integration
2024-04-15 07:20:51 -07:00
committed by Gerrit - the friendly Code Review server
2 changed files with 116 additions and 23 deletions

View File

@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -100,10 +100,11 @@ static void cam_mem_mgr_print_tbl(void)
for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[i].timestamp), hrs, min, sec, ms);
CAM_INFO(CAM_MEM,
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d refCount %d buf_name %s",
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s",
hrs, min, sec, ms, i, tbl.bufq[i].fd, tbl.bufq[i].i_ino,
tbl.bufq[i].len, tbl.bufq[i].active, tbl.bufq[i].buf_handle,
kref_read(&tbl.bufq[i].krefcount), tbl.bufq[i].buf_name);
kref_read(&tbl.bufq[i].krefcount), kref_read(&tbl.bufq[i].urefcount),
tbl.bufq[i].buf_name);
}
}
@@ -339,6 +340,7 @@ static int32_t cam_mem_get_slot(void)
tbl.bufq[idx].release_deferred = false;
CAM_GET_TIMESTAMP((tbl.bufq[idx].timestamp));
mutex_init(&tbl.bufq[idx].q_lock);
mutex_init(&tbl.bufq[idx].ref_lock);
mutex_unlock(&tbl.m_lock);
return idx;
@@ -353,7 +355,12 @@ static void cam_mem_put_slot(int32_t idx)
tbl.bufq[idx].is_internal = false;
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
mutex_unlock(&tbl.bufq[idx].q_lock);
mutex_lock(&tbl.bufq[idx].ref_lock);
memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
mutex_unlock(&tbl.bufq[idx].ref_lock);
mutex_destroy(&tbl.bufq[idx].q_lock);
mutex_destroy(&tbl.bufq[idx].ref_lock);
clear_bit(idx, tbl.bitmap);
mutex_unlock(&tbl.m_lock);
}
@@ -532,14 +539,17 @@ int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
return -EINVAL;
}
mutex_lock(&tbl.bufq[idx].ref_lock);
if (tbl.bufq[idx].kmdvaddr && kref_get_unless_zero(&tbl.bufq[idx].krefcount)) {
*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
*len = tbl.bufq[idx].len;
} else {
mutex_unlock(&tbl.bufq[idx].ref_lock);
CAM_ERR(CAM_MEM, "No KMD access requested, kmdvddr= %p, idx= %d, buf_handle= %d",
tbl.bufq[idx].kmdvaddr, idx, buf_handle);
return -EINVAL;
}
mutex_unlock(&tbl.bufq[idx].ref_lock);
return 0;
}
@@ -1416,7 +1426,12 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd_v2 *cmd)
tbl.bufq[idx].num_hdls = cmd->num_hdl;
cam_mem_mgr_reset_presil_params(idx);
tbl.bufq[idx].is_imported = false;
kref_init(&tbl.bufq[idx].krefcount);
if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
kref_init(&tbl.bufq[idx].krefcount);
kref_init(&tbl.bufq[idx].urefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
strscpy(tbl.bufq[idx].buf_name, cmd->buf_name, sizeof(tbl.bufq[idx].buf_name));
mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -1551,7 +1566,9 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd_v2 *cmd)
tbl.bufq[idx].num_hdls = cmd->num_hdl;
tbl.bufq[idx].is_imported = true;
tbl.bufq[idx].is_internal = is_internal;
kref_init(&tbl.bufq[idx].krefcount);
if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
kref_init(&tbl.bufq[idx].krefcount);
kref_init(&tbl.bufq[idx].urefcount);
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
strscpy(tbl.bufq[idx].buf_name, cmd->buf_name, sizeof(tbl.bufq[idx].buf_name));
mutex_unlock(&tbl.bufq[idx].q_lock);
@@ -1697,7 +1714,12 @@ static int cam_mem_mgr_cleanup_table(void)
memset(tbl.bufq[i].hdls_info, 0x0, tbl.max_hdls_info_size);
cam_mem_mgr_reset_presil_params(i);
mutex_unlock(&tbl.bufq[i].q_lock);
mutex_lock(&tbl.bufq[i].ref_lock);
memset(&tbl.bufq[i].krefcount, 0, sizeof(struct kref));
memset(&tbl.bufq[i].urefcount, 0, sizeof(struct kref));
mutex_unlock(&tbl.bufq[i].ref_lock);
mutex_destroy(&tbl.bufq[i].q_lock);
mutex_destroy(&tbl.bufq[i].ref_lock);
}
bitmap_zero(tbl.bitmap, tbl.bits);
@@ -1734,16 +1756,17 @@ void cam_mem_mgr_deinit(void)
mutex_destroy(&tbl.m_lock);
}
static void cam_mem_util_unmap(struct kref *kref)
static void cam_mem_util_unmap_dummy(struct kref *kref)
{
CAM_DBG(CAM_MEM, "Cam mem util unmap dummy");
}
static void cam_mem_util_unmap(int32_t idx)
{
int rc = 0;
int32_t idx;
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
enum cam_smmu_mapping_client client;
struct cam_mem_buf_queue *bufq =
container_of(kref, typeof(*bufq), krefcount);
idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
CAM_ERR(CAM_MEM, "Incorrect index");
return;
@@ -1819,6 +1842,8 @@ static void cam_mem_util_unmap(struct kref *kref)
memset(tbl.bufq[idx].hdls_info, 0x0, tbl.max_hdls_info_size);
cam_mem_mgr_reset_presil_params(idx);
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
mutex_unlock(&tbl.bufq[idx].q_lock);
mutex_destroy(&tbl.bufq[idx].q_lock);
clear_bit(idx, tbl.bitmap);
@@ -1826,11 +1851,29 @@ static void cam_mem_util_unmap(struct kref *kref)
}
static void cam_mem_util_unmap_wrapper(struct kref *kref)
{
int32_t idx;
struct cam_mem_buf_queue *bufq = container_of(kref, typeof(*bufq), krefcount);
idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
CAM_ERR(CAM_MEM, "idx: %d not valid", idx);
return;
}
cam_mem_util_unmap(idx);
mutex_destroy(&tbl.bufq[idx].ref_lock);
}
void cam_mem_put_cpu_buf(int32_t buf_handle)
{
int idx;
uint64_t ms, hrs, min, sec;
struct timespec64 current_ts;
uint32_t krefcount = 0, urefcount = 0;
bool unmap = false;
if (!buf_handle) {
CAM_ERR(CAM_MEM, "Invalid buf_handle");
@@ -1854,7 +1897,17 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
return;
}
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap)) {
mutex_lock(&tbl.bufq[idx].ref_lock);
kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy);
krefcount = kref_read(&tbl.bufq[idx].krefcount);
urefcount = kref_read(&tbl.bufq[idx].urefcount);
if ((krefcount == 1) && (urefcount == 0))
unmap = true;
if (unmap) {
cam_mem_util_unmap(idx);
CAM_GET_TIMESTAMP(current_ts);
CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
CAM_DBG(CAM_MEM,
@@ -1863,16 +1916,25 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
} else if (tbl.bufq[idx].release_deferred) {
CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms);
CAM_ERR(CAM_MEM,
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d refCount %d buf_name %s",
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s",
hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino,
tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle,
kref_read(&tbl.bufq[idx].krefcount), tbl.bufq[idx].buf_name);
krefcount, urefcount, tbl.bufq[idx].buf_name);
CAM_GET_TIMESTAMP(current_ts);
CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
CAM_ERR(CAM_MEM,
"%llu:%llu:%llu:%llu Not unmapping even after defer, buf_handle: %u, idx: %d",
hrs, min, sec, ms, buf_handle, idx);
} else if (krefcount == 0) {
CAM_ERR(CAM_MEM,
"Unbalanced release Called buf_handle: %u, idx: %d",
tbl.bufq[idx].buf_handle, idx);
}
mutex_unlock(&tbl.bufq[idx].ref_lock);
if (unmap)
mutex_destroy(&tbl.bufq[idx].ref_lock);
}
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
@@ -1883,6 +1945,8 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
int rc = 0;
uint64_t ms, hrs, min, sec;
struct timespec64 current_ts;
uint32_t krefcount = 0, urefcount = 0;
bool unmap = false;
if (!atomic_read(&cam_mem_mgr_state)) {
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
@@ -1914,22 +1978,46 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap)) {
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
cmd->buf_handle, idx);
mutex_lock(&tbl.bufq[idx].ref_lock);
kref_put(&tbl.bufq[idx].urefcount, cam_mem_util_unmap_dummy);
urefcount = kref_read(&tbl.bufq[idx].urefcount);
if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
krefcount = kref_read(&tbl.bufq[idx].krefcount);
if ((krefcount == 1) && (urefcount == 0))
unmap = true;
} else {
if (urefcount == 0)
unmap = true;
}
if (unmap) {
cam_mem_util_unmap(idx);
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d", cmd->buf_handle, idx);
} else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
rc = -EINVAL;
CAM_GET_TIMESTAMP(current_ts);
CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms);
CAM_ERR(CAM_MEM,
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d refCount %d buf_name %s",
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s",
hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino,
tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle,
kref_read(&tbl.bufq[idx].krefcount), tbl.bufq[idx].buf_name);
krefcount, urefcount, tbl.bufq[idx].buf_name);
if (tbl.bufq[idx].release_deferred)
CAM_ERR(CAM_MEM, "Unbalanced release Called buf_handle: %u, idx: %d",
tbl.bufq[idx].buf_handle, idx);
tbl.bufq[idx].release_deferred = true;
}
mutex_unlock(&tbl.bufq[idx].ref_lock);
if (unmap)
mutex_destroy(&tbl.bufq[idx].ref_lock);
return rc;
}
@@ -2099,7 +2187,7 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
tbl.bufq[idx].buf_handle, idx);
@@ -2393,7 +2481,7 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
}
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
CAM_DBG(CAM_MEM,
"Called unmap from here, buf_handle: %u, idx: %d",
inp->mem_handle, idx);

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CAM_MEM_MGR_H_
@@ -82,10 +82,13 @@ struct cam_mem_buf_hw_hdl_info {
* @is_internal: Flag indicating kernel allocated buffer
* @timestamp: Timestamp at which this entry in tbl was made
* @krefcount: Reference counter to track whether the buffer is
* mapped and in use
* mapped and in use by kmd
* @smmu_mapping_client: Client buffer (User or kernel)
* @buf_name: Name associated with buffer.
* @presil_params: Parameters specific to presil environment
* @urefcount: Reference counter to track whether the buffer is
* mapped and in use by umd
* @ref_lock: Mutex lock for refcount
*/
struct cam_mem_buf_queue {
struct dma_buf *dma_buf;
@@ -111,6 +114,8 @@ struct cam_mem_buf_queue {
#ifdef CONFIG_CAM_PRESIL
struct cam_presil_dmabuf_params presil_params;
#endif
struct kref urefcount;
struct mutex ref_lock;
};
/**