msm: camera: smmu: Enhance debug capability for camera memmgr

Improve debugging infrastructure and logging for memory related
issues. There are scenarios where the fd returned after ion_alloc
exists in camera smmu table as being mapped [stale entry].
On such scenarios, this change will return a specific error code to
userspace. The change also propagates the mapped size back to
user space.

CRs-Fixed: 2663114
Change-Id: Ia797b65d1e8ded58dec5b01df07d73262c4cfa95
Signed-off-by: Karthik Anantha Ram <kartanan@codeaurora.org>
This commit is contained in:
Karthik Anantha Ram
2020-03-10 15:37:33 -07:00
committed by Gerrit - the friendly Code Review server
parent a68684d77c
commit ec23ff584a
4 changed files with 107 additions and 21 deletions

View File

@@ -22,6 +22,39 @@
static struct cam_mem_table tbl;
static atomic_t cam_mem_mgr_state = ATOMIC_INIT(CAM_MEM_MGR_UNINITIALIZED);
static void cam_mem_mgr_print_tbl(void)
{
int i;
uint64_t ms, tmp, hrs, min, sec;
struct timespec64 *ts = NULL;
struct timespec64 current_ts;
ktime_get_real_ts64(&(current_ts));
tmp = current_ts.tv_sec;
ms = (current_ts.tv_nsec) / 1000000;
sec = do_div(tmp, 60);
min = do_div(tmp, 60);
hrs = do_div(tmp, 24);
CAM_INFO(CAM_MEM, "***%llu:%llu:%llu:%llu Mem mgr table dump***",
hrs, min, sec, ms);
for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
if (tbl.bufq[i].active) {
ts = &tbl.bufq[i].timestamp;
tmp = ts->tv_sec;
ms = (ts->tv_nsec) / 1000000;
sec = do_div(tmp, 60);
min = do_div(tmp, 60);
hrs = do_div(tmp, 24);
CAM_INFO(CAM_MEM,
"%llu:%llu:%llu:%llu idx %d fd %d size %llu",
hrs, min, sec, ms, i, tbl.bufq[i].fd,
tbl.bufq[i].len);
}
}
}
static int cam_mem_util_get_dma_dir(uint32_t flags)
{
int rc = -EINVAL;
@@ -185,6 +218,7 @@ static int32_t cam_mem_get_slot(void)
set_bit(idx, tbl.bitmap);
tbl.bufq[idx].active = true;
ktime_get_real_ts64(&(tbl.bufq[idx].timestamp));
mutex_init(&tbl.bufq[idx].q_lock);
mutex_unlock(&tbl.m_lock);
@@ -197,6 +231,7 @@ static void cam_mem_put_slot(int32_t idx)
mutex_lock(&tbl.bufq[idx].q_lock);
tbl.bufq[idx].active = false;
tbl.bufq[idx].is_internal = false;
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
mutex_unlock(&tbl.bufq[idx].q_lock);
mutex_destroy(&tbl.bufq[idx].q_lock);
clear_bit(idx, tbl.bitmap);
@@ -646,6 +681,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
CAM_ERR(CAM_MEM,
"Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
cmd->len, cmd->align, cmd->flags, cmd->num_hdl);
cam_mem_mgr_print_tbl();
return rc;
}
@@ -683,9 +719,14 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
if (rc) {
CAM_ERR(CAM_MEM,
"Failed in map_hw_va, len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
cmd->len, cmd->flags, fd, region,
cmd->num_hdl, rc);
"Failed in map_hw_va len=%llu, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
len, cmd->flags,
fd, region, cmd->num_hdl, rc);
if (rc == -EALREADY) {
if ((size_t)dmabuf->size != len)
rc = -EBADR;
cam_mem_mgr_print_tbl();
}
goto map_hw_fail;
}
}
@@ -806,9 +847,15 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
is_internal);
if (rc) {
CAM_ERR(CAM_MEM,
"Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
cmd->flags, cmd->fd, CAM_SMMU_REGION_IO,
cmd->num_hdl, rc);
"Failed in map_hw_va, flags=0x%x, fd=%d, len=%llu, region=%d, num_hdl=%d, rc=%d",
cmd->flags, cmd->fd, len,
CAM_SMMU_REGION_IO, cmd->num_hdl, rc);
if (rc == -EALREADY) {
if ((size_t)dmabuf->size != len) {
rc = -EBADR;
cam_mem_mgr_print_tbl();
}
}
goto map_fail;
}
}
@@ -844,7 +891,7 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
cmd->out.vaddr = 0;
cmd->out.size = (uint32_t)len;
CAM_DBG(CAM_MEM,
"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
@@ -1066,6 +1113,7 @@ static int cam_mem_util_unmap(int32_t idx,
tbl.bufq[idx].len = 0;
tbl.bufq[idx].num_hdl = 0;
tbl.bufq[idx].active = false;
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
mutex_unlock(&tbl.bufq[idx].q_lock);
mutex_destroy(&tbl.bufq[idx].q_lock);
clear_bit(idx, tbl.bitmap);

View File

@@ -42,6 +42,7 @@ enum cam_smmu_mapping_client {
* @active: state of the buffer
* @is_imported: Flag indicating if buffer is imported from an FD in user space
* @is_internal: Flag indicating kernel allocated buffer
* @timestamp: Timestamp at which this entry in tbl was made
*/
struct cam_mem_buf_queue {
struct dma_buf *dma_buf;
@@ -58,6 +59,7 @@ struct cam_mem_buf_queue {
bool active;
bool is_imported;
bool is_internal;
struct timespec64 timestamp;
};
/**

View File

@@ -198,6 +198,7 @@ struct cam_dma_buff_info {
size_t len;
size_t phys_len;
bool is_internal;
struct timespec64 ts;
};
struct cam_sec_buff_info {
@@ -403,6 +404,9 @@ static void cam_smmu_dump_cb_info(int idx)
size_t shared_reg_len = 0, io_reg_len = 0;
size_t shared_free_len = 0, io_free_len = 0;
uint32_t i = 0;
uint64_t ms, tmp, hrs, min, sec;
struct timespec64 *ts = NULL;
struct timespec64 current_ts;
struct cam_context_bank_info *cb_info =
&iommu_cb_set.cb_info[idx];
@@ -416,9 +420,15 @@ static void cam_smmu_dump_cb_info(int idx)
io_free_len = io_reg_len - cb_info->io_mapping_size;
}
ktime_get_real_ts64(&(current_ts));
tmp = current_ts.tv_sec;
ms = (current_ts.tv_nsec) / 1000000;
sec = do_div(tmp, 60);
min = do_div(tmp, 60);
hrs = do_div(tmp, 24);
CAM_ERR(CAM_SMMU,
"********** Context bank dump for %s **********",
cb_info->name[0]);
"********** %llu:%llu:%llu:%llu Context bank dump for %s **********",
hrs, min, sec, ms, cb_info->name[0]);
CAM_ERR(CAM_SMMU,
"Usage: shared_usage=%u io_usage=%u shared_free=%u io_free=%u",
(unsigned int)cb_info->shared_mapping_size,
@@ -430,9 +440,16 @@ static void cam_smmu_dump_cb_info(int idx)
list_for_each_entry_safe(mapping, mapping_temp,
&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
i++;
ts = &mapping->ts;
tmp = ts->tv_sec;
ms = (ts->tv_nsec) / 1000000;
sec = do_div(tmp, 60);
min = do_div(tmp, 60);
hrs = do_div(tmp, 24);
CAM_ERR(CAM_SMMU,
"%u. ion_fd=%d start=0x%x end=0x%x len=%u region=%d",
i, mapping->ion_fd, (void *)mapping->paddr,
"%llu:%llu:%llu:%llu: %u ion_fd=%d start=0x%x end=0x%x len=%u region=%d",
hrs, min, sec, ms, i, mapping->ion_fd,
(void *)mapping->paddr,
((uint64_t)mapping->paddr +
(uint64_t)mapping->len),
(unsigned int)mapping->len,
@@ -2017,6 +2034,7 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
mapping_info->ion_fd = ion_fd;
mapping_info->is_internal = is_internal;
ktime_get_real_ts64(&mapping_info->ts);
/* add to the list */
list_add(&mapping_info->list,
&iommu_cb_set.cb_info[idx].smmu_buf_list);
@@ -2044,7 +2062,7 @@ static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
}
mapping_info->ion_fd = -1;
ktime_get_real_ts64(&mapping_info->ts);
/* add to the list */
list_add(&mapping_info->list,
&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list);
@@ -2145,7 +2163,8 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
}
static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr,
struct timespec64 **ts_mapping)
{
struct cam_dma_buff_info *mapping;
@@ -2154,6 +2173,7 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
if (mapping->ion_fd == ion_fd) {
*paddr_ptr = mapping->paddr;
*len_ptr = mapping->len;
*ts_mapping = &mapping->ts;
return CAM_SMMU_BUFF_EXIST;
}
}
@@ -2886,6 +2906,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
bool is_internal)
{
int idx, rc = 0;
struct timespec64 *ts = NULL;
enum cam_smmu_buf_state buf_state;
enum dma_data_direction dma_dir;
@@ -2923,11 +2944,23 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
goto get_addr_end;
}
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
len_ptr, &ts);
if (buf_state == CAM_SMMU_BUFF_EXIST) {
uint64_t ms = 0, tmp = 0, hrs = 0, min = 0, sec = 0;
if (ts) {
tmp = ts->tv_sec;
ms = (ts->tv_nsec) / 1000000;
sec = do_div(tmp, 60);
min = do_div(tmp, 60);
hrs = do_div(tmp, 24);
}
CAM_ERR(CAM_SMMU,
"fd:%d already in list idx:%d, handle=%d, give same addr back",
ion_fd, idx, handle);
"fd=%d already in list [%llu:%llu:%lu:%llu] cb=%s idx=%d handle=%d len=%llu,give same addr back",
ion_fd, hrs, min, sec, ms,
iommu_cb_set.cb_info[idx].name[0],
idx, handle, *len_ptr);
rc = -EALREADY;
goto get_addr_end;
}
@@ -2937,8 +2970,9 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, bool dis_delayed_unmap,
region_id, is_internal);
if (rc < 0) {
CAM_ERR(CAM_SMMU,
"mapping or add list fail, idx=%d, fd=%d, region=%d, rc=%d",
idx, ion_fd, region_id, rc);
"mapping or add list fail cb:%s idx=%d, fd=%d, region=%d, rc=%d",
iommu_cb_set.cb_info[idx].name[0], idx,
ion_fd, region_id, rc);
cam_smmu_dump_cb_info(idx);
}
@@ -3012,6 +3046,7 @@ int cam_smmu_get_iova(int handle, int ion_fd,
dma_addr_t *paddr_ptr, size_t *len_ptr)
{
int idx, rc = 0;
struct timespec64 *ts = NULL;
enum cam_smmu_buf_state buf_state;
if (!paddr_ptr || !len_ptr) {
@@ -3051,7 +3086,8 @@ int cam_smmu_get_iova(int handle, int ion_fd,
goto get_addr_end;
}
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr,
len_ptr, &ts);
if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
rc = -EINVAL;

View File

@@ -343,12 +343,12 @@ struct cam_mem_alloc_out_params {
/**
* struct cam_mem_map_out_params
* @buf_handle: buffer handle
* @reserved: reserved for future
* @size: size of the buffer being mapped
* @vaddr: virtual address pointer
*/
struct cam_mem_map_out_params {
__u32 buf_handle;
__u32 reserved;
__u32 size;
__u64 vaddr;
};