Merge 1aa641e0f3
on remote branch
Change-Id: I09ad89110f508e568a9298cabcba8b7ad8fe6016
Šī revīzija ir iekļauta:
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
@@ -489,115 +489,253 @@ int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
|
||||
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
|
||||
static int cam_cdm_util_cmd_buf_validation(void __iomem *base_addr,
|
||||
uint32_t base_array_size,
|
||||
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
|
||||
uint32_t cmd_buf_size, uint32_t *cmd_buf, void *buf,
|
||||
resource_size_t *size,
|
||||
enum cam_cdm_command cmd_type)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t *data;
|
||||
struct cdm_regcontinuous_cmd *reg_cont;
|
||||
|
||||
if ((cmd_buf_size < cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
|
||||
(!base_addr)) {
|
||||
CAM_ERR(CAM_CDM, "invalid base addr and data length %d %pK",
|
||||
cmd_buf_size, base_addr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
|
||||
if ((!reg_cont->count) || (((reg_cont->count * sizeof(uint32_t)) +
|
||||
cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
|
||||
cmd_buf_size)) {
|
||||
CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
|
||||
cmd_buf_size, reg_cont->count);
|
||||
return -EINVAL;
|
||||
}
|
||||
data = cmd_buf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
|
||||
cam_io_memcpy(base_addr + reg_cont->offset, data,
|
||||
reg_cont->count * sizeof(uint32_t));
|
||||
|
||||
*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
|
||||
(4 * cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
|
||||
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
|
||||
{
|
||||
uint32_t i;
|
||||
struct cdm_regrandom_cmd *reg_random;
|
||||
uint32_t *data;
|
||||
int i, ret = 0;
|
||||
|
||||
if (!base_addr) {
|
||||
CAM_ERR(CAM_CDM, "invalid base address");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
|
||||
if ((!reg_random->count) || (((reg_random->count * (sizeof(uint32_t) * 2)) +
|
||||
cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
|
||||
cmd_buf_size)) {
|
||||
CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
|
||||
reg_random->count, cmd_buf_size);
|
||||
for (i = 0; i < base_array_size; i++) {
|
||||
if ((base_table[i]) &&
|
||||
((base_table[i])->mem_base == base_addr)) {
|
||||
*size = (base_table[i])->size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (*size == 0) {
|
||||
CAM_ERR(CAM_CDM, "Could not retrieve ioremap size, address not mapped!");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (cmd_type) {
|
||||
case CAM_CDM_CMD_REG_RANDOM: {
|
||||
struct cdm_regrandom_cmd *reg_random = (struct cdm_regrandom_cmd *)buf;
|
||||
uint32_t *data, offset;
|
||||
|
||||
if ((!reg_random->count) || (((reg_random->count * (sizeof(uint32_t) * 2)) +
|
||||
cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
|
||||
cmd_buf_size)) {
|
||||
CAM_ERR(CAM_CDM, "invalid reg_count %d cmd_buf_size %d",
|
||||
reg_random->count, cmd_buf_size);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
data = cmd_buf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
|
||||
|
||||
for (i = 0; i < reg_random->count; i++) {
|
||||
offset = data[0];
|
||||
if (offset > *size) {
|
||||
CAM_ERR(CAM_CDM, "Offset out of mapped range! size:%llu offset:%u",
|
||||
*size, offset);
|
||||
return -EINVAL;
|
||||
}
|
||||
data += 2;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CAM_CDM_CMD_REG_CONT: {
|
||||
struct cdm_regcontinuous_cmd *reg_cont = (struct cdm_regcontinuous_cmd *) buf;
|
||||
|
||||
if ((!reg_cont->count) || (((reg_cont->count * sizeof(uint32_t)) +
|
||||
cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
|
||||
cmd_buf_size)) {
|
||||
CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
|
||||
cmd_buf_size, reg_cont->count);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if ((reg_cont->offset > *size) && ((reg_cont->offset +
|
||||
(reg_cont->count * sizeof(uint32_t))) > *size)) {
|
||||
CAM_ERR(CAM_CDM, "Offset out of mapped range! size: %lu, offset: %u",
|
||||
*size, reg_cont->offset);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CAM_CDM_CMD_SWD_DMI_64: {
|
||||
struct cdm_dmi_cmd *swd_dmi = (struct cdm_dmi_cmd *) buf;
|
||||
|
||||
if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) {
|
||||
CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
|
||||
swd_dmi->length + 1);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if ((swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET > *size) ||
|
||||
(swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET > *size)) {
|
||||
CAM_ERR(CAM_CDM,
|
||||
"Offset out of mapped range! size:%llu lo_offset:%u hi_offset:%u",
|
||||
*size, swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET,
|
||||
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CAM_CDM_CMD_SWD_DMI_32: {
|
||||
struct cdm_dmi_cmd *swd_dmi = (struct cdm_dmi_cmd *) buf;
|
||||
|
||||
if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) {
|
||||
CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
|
||||
swd_dmi->length + 1);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET > *size) {
|
||||
CAM_ERR(CAM_CDM,
|
||||
"Offset out of mapped range! size:%llu lo_offset:%u",
|
||||
*size, swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CAM_CDM_CMD_DMI: {
|
||||
struct cdm_dmi_cmd *swd_dmi = (struct cdm_dmi_cmd *) buf;
|
||||
|
||||
if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) {
|
||||
CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
|
||||
swd_dmi->length + 1);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_OFFSET > *size) {
|
||||
CAM_ERR(CAM_CDM,
|
||||
"Offset out of mapped range! size:%llu offset:%u",
|
||||
*size, swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_OFFSET);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
|
||||
cmd_type);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
|
||||
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes,
|
||||
uint32_t base_array_size,
|
||||
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK])
|
||||
{
|
||||
int rc;
|
||||
uint32_t *data;
|
||||
struct cdm_regcontinuous_cmd reg_cont;
|
||||
resource_size_t size = 0;
|
||||
|
||||
memcpy(®_cont, cmd_buf, sizeof(struct cdm_regcontinuous_cmd));
|
||||
rc = cam_cdm_util_cmd_buf_validation(base_addr, base_array_size, base_table,
|
||||
cmd_buf_size, cmd_buf, (void *)®_cont,
|
||||
&size, CAM_CDM_CMD_REG_CONT);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_CDM, "Validation failed! rc=%d", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
data = cmd_buf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
|
||||
|
||||
cam_io_memcpy(base_addr + reg_cont.offset, data,
|
||||
reg_cont.count * sizeof(uint32_t));
|
||||
*used_bytes = (reg_cont.count * sizeof(uint32_t)) +
|
||||
(4 * cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
|
||||
uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes,
|
||||
uint32_t base_array_size,
|
||||
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK])
|
||||
{
|
||||
int i, rc;
|
||||
struct cdm_regrandom_cmd reg_random;
|
||||
uint32_t *data, offset;
|
||||
resource_size_t size = 0;
|
||||
|
||||
memcpy(®_random, cmd_buf, sizeof(struct cdm_regrandom_cmd));
|
||||
|
||||
rc = cam_cdm_util_cmd_buf_validation(base_addr, base_array_size, base_table,
|
||||
cmd_buf_size, cmd_buf, (void *)®_random,
|
||||
&size, CAM_CDM_CMD_REG_RANDOM);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_CDM, "Validation failed! rc=%d", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
data = cmd_buf + cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
|
||||
|
||||
for (i = 0; i < reg_random->count; i++) {
|
||||
for (i = 0; i < reg_random.count; i++) {
|
||||
offset = data[0];
|
||||
CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
|
||||
((void __iomem *)(base_addr + data[0])),
|
||||
((void __iomem *)(base_addr + offset)),
|
||||
data[1]);
|
||||
cam_io_w(data[1], base_addr + data[0]);
|
||||
cam_io_w(data[1], base_addr + offset);
|
||||
data += 2;
|
||||
}
|
||||
|
||||
*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
|
||||
*used_bytes = ((reg_random.count * (sizeof(uint32_t) * 2)) +
|
||||
(4 * cam_cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
|
||||
void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
|
||||
uint32_t *used_bytes)
|
||||
uint32_t *used_bytes, uint32_t base_array_size,
|
||||
struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK])
|
||||
{
|
||||
uint32_t i;
|
||||
struct cdm_dmi_cmd *swd_dmi;
|
||||
int i, rc;
|
||||
struct cdm_dmi_cmd swd_dmi;
|
||||
uint32_t *data;
|
||||
resource_size_t size = 0;
|
||||
|
||||
swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
|
||||
|
||||
if (cmd_buf_size < (cam_cdm_required_size_dmi() + swd_dmi->length + 1)) {
|
||||
CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
|
||||
swd_dmi->length + 1);
|
||||
return -EINVAL;
|
||||
memcpy(&swd_dmi, cmd_buf, sizeof(struct cdm_dmi_cmd));
|
||||
rc = cam_cdm_util_cmd_buf_validation(base_addr, base_array_size, base_table,
|
||||
cmd_buf_size, cmd_buf, (void *)&swd_dmi,
|
||||
&size, cdm_cmd_type);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_CDM, "Validation failed! rc=%d", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
data = cmd_buf + cam_cdm_required_size_dmi();
|
||||
|
||||
if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
|
||||
for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
|
||||
for (i = 0; i < (swd_dmi.length + 1)/8; i++) {
|
||||
cam_io_w_mb(data[0], base_addr +
|
||||
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
|
||||
swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
|
||||
cam_io_w_mb(data[1], base_addr +
|
||||
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
|
||||
swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
|
||||
data += 2;
|
||||
}
|
||||
} else if (cdm_cmd_type == CAM_CDM_CMD_DMI) {
|
||||
for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
|
||||
for (i = 0; i < (swd_dmi.length + 1)/4; i++) {
|
||||
cam_io_w_mb(data[0], base_addr +
|
||||
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_OFFSET);
|
||||
swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_OFFSET);
|
||||
data += 1;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
|
||||
for (i = 0; i < (swd_dmi.length + 1)/4; i++) {
|
||||
cam_io_w_mb(data[0], base_addr +
|
||||
swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
|
||||
swd_dmi.DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
|
||||
data += 1;
|
||||
}
|
||||
}
|
||||
*used_bytes = (4 * cam_cdm_required_size_dmi()) + swd_dmi->length + 1;
|
||||
*used_bytes = (4 * cam_cdm_required_size_dmi()) + swd_dmi.length + 1;
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
|
||||
@@ -615,7 +753,8 @@ int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
|
||||
switch (cdm_cmd_type) {
|
||||
case CAM_CDM_CMD_REG_CONT: {
|
||||
ret = cam_cdm_util_reg_cont_write(*current_device_base,
|
||||
cmd_buf, cmd_buf_size, &used_bytes);
|
||||
cmd_buf, cmd_buf_size, &used_bytes,
|
||||
base_array_size, base_table);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -628,7 +767,7 @@ int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
|
||||
case CAM_CDM_CMD_REG_RANDOM: {
|
||||
ret = cam_cdm_util_reg_random_write(
|
||||
*current_device_base, cmd_buf, cmd_buf_size,
|
||||
&used_bytes);
|
||||
&used_bytes, base_array_size, base_table);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@@ -650,7 +789,7 @@ int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
|
||||
}
|
||||
ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
|
||||
*current_device_base, cmd_buf, cmd_buf_size,
|
||||
&used_bytes);
|
||||
&used_bytes, base_array_size, base_table);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
@@ -1041,7 +1041,6 @@ static int cam_tfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
|
||||
cam_io_w(rsrc_data->common_data->counter_limit_mask,
|
||||
common_data->mem_base + rsrc_data->hw_regs->bw_limit);
|
||||
|
||||
cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->bw_limit);
|
||||
cam_io_w((rsrc_data->height << height_shift) | rsrc_data->width,
|
||||
common_data->mem_base + rsrc_data->hw_regs->image_cfg_0);
|
||||
cam_io_w(rsrc_data->pack_fmt,
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
@@ -100,10 +100,11 @@ static void cam_mem_mgr_print_tbl(void)
|
||||
for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
|
||||
CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[i].timestamp), hrs, min, sec, ms);
|
||||
CAM_INFO(CAM_MEM,
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d refCount %d buf_name %s",
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s",
|
||||
hrs, min, sec, ms, i, tbl.bufq[i].fd, tbl.bufq[i].i_ino,
|
||||
tbl.bufq[i].len, tbl.bufq[i].active, tbl.bufq[i].buf_handle,
|
||||
kref_read(&tbl.bufq[i].krefcount), tbl.bufq[i].buf_name);
|
||||
kref_read(&tbl.bufq[i].krefcount), kref_read(&tbl.bufq[i].urefcount),
|
||||
tbl.bufq[i].buf_name);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -352,6 +353,8 @@ static void cam_mem_put_slot(int32_t idx)
|
||||
tbl.bufq[idx].release_deferred = false;
|
||||
tbl.bufq[idx].is_internal = false;
|
||||
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
|
||||
kref_init(&tbl.bufq[idx].krefcount);
|
||||
kref_init(&tbl.bufq[idx].urefcount);
|
||||
mutex_unlock(&tbl.bufq[idx].q_lock);
|
||||
mutex_destroy(&tbl.bufq[idx].q_lock);
|
||||
clear_bit(idx, tbl.bitmap);
|
||||
@@ -1416,7 +1419,12 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd_v2 *cmd)
|
||||
tbl.bufq[idx].num_hdls = cmd->num_hdl;
|
||||
cam_mem_mgr_reset_presil_params(idx);
|
||||
tbl.bufq[idx].is_imported = false;
|
||||
kref_init(&tbl.bufq[idx].krefcount);
|
||||
|
||||
if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
|
||||
kref_init(&tbl.bufq[idx].krefcount);
|
||||
|
||||
kref_init(&tbl.bufq[idx].urefcount);
|
||||
|
||||
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
|
||||
strscpy(tbl.bufq[idx].buf_name, cmd->buf_name, sizeof(tbl.bufq[idx].buf_name));
|
||||
mutex_unlock(&tbl.bufq[idx].q_lock);
|
||||
@@ -1551,7 +1559,9 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd_v2 *cmd)
|
||||
tbl.bufq[idx].num_hdls = cmd->num_hdl;
|
||||
tbl.bufq[idx].is_imported = true;
|
||||
tbl.bufq[idx].is_internal = is_internal;
|
||||
kref_init(&tbl.bufq[idx].krefcount);
|
||||
if (cmd->flags & CAM_MEM_FLAG_KMD_ACCESS)
|
||||
kref_init(&tbl.bufq[idx].krefcount);
|
||||
kref_init(&tbl.bufq[idx].urefcount);
|
||||
tbl.bufq[idx].smmu_mapping_client = CAM_SMMU_MAPPING_USER;
|
||||
strscpy(tbl.bufq[idx].buf_name, cmd->buf_name, sizeof(tbl.bufq[idx].buf_name));
|
||||
mutex_unlock(&tbl.bufq[idx].q_lock);
|
||||
@@ -1696,6 +1706,8 @@ static int cam_mem_mgr_cleanup_table(void)
|
||||
tbl.bufq[i].is_internal = false;
|
||||
memset(tbl.bufq[i].hdls_info, 0x0, tbl.max_hdls_info_size);
|
||||
cam_mem_mgr_reset_presil_params(i);
|
||||
kref_init(&tbl.bufq[i].krefcount);
|
||||
kref_init(&tbl.bufq[i].urefcount);
|
||||
mutex_unlock(&tbl.bufq[i].q_lock);
|
||||
mutex_destroy(&tbl.bufq[i].q_lock);
|
||||
}
|
||||
@@ -1734,16 +1746,17 @@ void cam_mem_mgr_deinit(void)
|
||||
mutex_destroy(&tbl.m_lock);
|
||||
}
|
||||
|
||||
static void cam_mem_util_unmap(struct kref *kref)
|
||||
static void cam_mem_util_unmap_dummy(struct kref *kref)
|
||||
{
|
||||
CAM_DBG(CAM_MEM, "Cam mem util unmap dummy");
|
||||
}
|
||||
|
||||
static void cam_mem_util_unmap(int32_t idx)
|
||||
{
|
||||
int rc = 0;
|
||||
int32_t idx;
|
||||
enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
|
||||
enum cam_smmu_mapping_client client;
|
||||
struct cam_mem_buf_queue *bufq =
|
||||
container_of(kref, typeof(*bufq), krefcount);
|
||||
|
||||
idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
|
||||
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
|
||||
CAM_ERR(CAM_MEM, "Incorrect index");
|
||||
return;
|
||||
@@ -1819,6 +1832,8 @@ static void cam_mem_util_unmap(struct kref *kref)
|
||||
memset(tbl.bufq[idx].hdls_info, 0x0, tbl.max_hdls_info_size);
|
||||
cam_mem_mgr_reset_presil_params(idx);
|
||||
memset(&tbl.bufq[idx].timestamp, 0, sizeof(struct timespec64));
|
||||
memset(&tbl.bufq[idx].krefcount, 0, sizeof(struct kref));
|
||||
memset(&tbl.bufq[idx].urefcount, 0, sizeof(struct kref));
|
||||
mutex_unlock(&tbl.bufq[idx].q_lock);
|
||||
mutex_destroy(&tbl.bufq[idx].q_lock);
|
||||
clear_bit(idx, tbl.bitmap);
|
||||
@@ -1826,11 +1841,27 @@ static void cam_mem_util_unmap(struct kref *kref)
|
||||
|
||||
}
|
||||
|
||||
static void cam_mem_util_unmap_wrapper(struct kref *kref)
|
||||
{
|
||||
int32_t idx;
|
||||
struct cam_mem_buf_queue *bufq = container_of(kref, typeof(*bufq), krefcount);
|
||||
|
||||
idx = CAM_MEM_MGR_GET_HDL_IDX(bufq->buf_handle);
|
||||
if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
|
||||
CAM_ERR(CAM_MEM, "idx: %d not valid", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
cam_mem_util_unmap(idx);
|
||||
}
|
||||
|
||||
void cam_mem_put_cpu_buf(int32_t buf_handle)
|
||||
{
|
||||
int idx;
|
||||
uint64_t ms, hrs, min, sec;
|
||||
struct timespec64 current_ts;
|
||||
uint32_t krefcount = 0, urefcount = 0;
|
||||
bool unmap = false;
|
||||
|
||||
if (!buf_handle) {
|
||||
CAM_ERR(CAM_MEM, "Invalid buf_handle");
|
||||
@@ -1854,7 +1885,16 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
|
||||
return;
|
||||
}
|
||||
|
||||
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap)) {
|
||||
kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_dummy);
|
||||
|
||||
krefcount = kref_read(&tbl.bufq[idx].krefcount);
|
||||
urefcount = kref_read(&tbl.bufq[idx].urefcount);
|
||||
|
||||
if ((krefcount == 1) && (urefcount == 0))
|
||||
unmap = true;
|
||||
|
||||
if (unmap) {
|
||||
cam_mem_util_unmap(idx);
|
||||
CAM_GET_TIMESTAMP(current_ts);
|
||||
CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
|
||||
CAM_DBG(CAM_MEM,
|
||||
@@ -1863,15 +1903,19 @@ void cam_mem_put_cpu_buf(int32_t buf_handle)
|
||||
} else if (tbl.bufq[idx].release_deferred) {
|
||||
CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms);
|
||||
CAM_ERR(CAM_MEM,
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d refCount %d buf_name %s",
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s",
|
||||
hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino,
|
||||
tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle,
|
||||
kref_read(&tbl.bufq[idx].krefcount), tbl.bufq[idx].buf_name);
|
||||
krefcount, urefcount, tbl.bufq[idx].buf_name);
|
||||
CAM_GET_TIMESTAMP(current_ts);
|
||||
CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
|
||||
CAM_ERR(CAM_MEM,
|
||||
"%llu:%llu:%llu:%llu Not unmapping even after defer, buf_handle: %u, idx: %d",
|
||||
hrs, min, sec, ms, buf_handle, idx);
|
||||
} else if (krefcount == 0) {
|
||||
CAM_ERR(CAM_MEM,
|
||||
"Unbalanced release Called buf_handle: %u, idx: %d",
|
||||
tbl.bufq[idx].buf_handle, idx);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(cam_mem_put_cpu_buf);
|
||||
@@ -1883,6 +1927,8 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
|
||||
int rc = 0;
|
||||
uint64_t ms, hrs, min, sec;
|
||||
struct timespec64 current_ts;
|
||||
uint32_t krefcount = 0, urefcount = 0;
|
||||
bool unmap = false;
|
||||
|
||||
if (!atomic_read(&cam_mem_mgr_state)) {
|
||||
CAM_ERR(CAM_MEM, "failed. mem_mgr not initialized");
|
||||
@@ -1914,22 +1960,40 @@ int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
|
||||
}
|
||||
|
||||
CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
|
||||
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap)) {
|
||||
CAM_DBG(CAM_MEM,
|
||||
"Called unmap from here, buf_handle: %u, idx: %d",
|
||||
cmd->buf_handle, idx);
|
||||
|
||||
kref_put(&tbl.bufq[idx].urefcount, cam_mem_util_unmap_dummy);
|
||||
|
||||
urefcount = kref_read(&tbl.bufq[idx].urefcount);
|
||||
|
||||
if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
|
||||
krefcount = kref_read(&tbl.bufq[idx].krefcount);
|
||||
if ((krefcount == 1) && (urefcount == 0))
|
||||
unmap = true;
|
||||
} else {
|
||||
if (urefcount == 0)
|
||||
unmap = true;
|
||||
}
|
||||
|
||||
if (unmap) {
|
||||
cam_mem_util_unmap(idx);
|
||||
CAM_DBG(CAM_MEM,
|
||||
"Called unmap from here, buf_handle: %u, idx: %d", cmd->buf_handle, idx);
|
||||
} else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
|
||||
rc = -EINVAL;
|
||||
CAM_GET_TIMESTAMP(current_ts);
|
||||
CAM_CONVERT_TIMESTAMP_FORMAT(current_ts, hrs, min, sec, ms);
|
||||
CAM_CONVERT_TIMESTAMP_FORMAT((tbl.bufq[idx].timestamp), hrs, min, sec, ms);
|
||||
CAM_ERR(CAM_MEM,
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d refCount %d buf_name %s",
|
||||
"%llu:%llu:%llu:%llu idx %d fd %d i_ino %lu size %llu active %d buf_handle %d krefCount %d urefCount %d buf_name %s",
|
||||
hrs, min, sec, ms, idx, tbl.bufq[idx].fd, tbl.bufq[idx].i_ino,
|
||||
tbl.bufq[idx].len, tbl.bufq[idx].active, tbl.bufq[idx].buf_handle,
|
||||
kref_read(&tbl.bufq[idx].krefcount), tbl.bufq[idx].buf_name);
|
||||
krefcount, urefcount, tbl.bufq[idx].buf_name);
|
||||
if (tbl.bufq[idx].release_deferred)
|
||||
CAM_ERR(CAM_MEM, "Unbalanced release Called buf_handle: %u, idx: %d",
|
||||
tbl.bufq[idx].buf_handle, idx);
|
||||
tbl.bufq[idx].release_deferred = true;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -2099,7 +2163,7 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
|
||||
}
|
||||
|
||||
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
|
||||
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
|
||||
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
|
||||
CAM_DBG(CAM_MEM,
|
||||
"Called unmap from here, buf_handle: %u, idx: %d",
|
||||
tbl.bufq[idx].buf_handle, idx);
|
||||
@@ -2393,7 +2457,7 @@ int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
|
||||
}
|
||||
|
||||
CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
|
||||
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap))
|
||||
if (kref_put(&tbl.bufq[idx].krefcount, cam_mem_util_unmap_wrapper))
|
||||
CAM_DBG(CAM_MEM,
|
||||
"Called unmap from here, buf_handle: %u, idx: %d",
|
||||
inp->mem_handle, idx);
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _CAM_MEM_MGR_H_
|
||||
@@ -82,10 +82,12 @@ struct cam_mem_buf_hw_hdl_info {
|
||||
* @is_internal: Flag indicating kernel allocated buffer
|
||||
* @timestamp: Timestamp at which this entry in tbl was made
|
||||
* @krefcount: Reference counter to track whether the buffer is
|
||||
* mapped and in use
|
||||
* mapped and in use by kmd
|
||||
* @smmu_mapping_client: Client buffer (User or kernel)
|
||||
* @buf_name: Name associated with buffer.
|
||||
* @presil_params: Parameters specific to presil environment
|
||||
* @urefcount: Reference counter to track whether the buffer is
|
||||
* mapped and in use by umd
|
||||
*/
|
||||
struct cam_mem_buf_queue {
|
||||
struct dma_buf *dma_buf;
|
||||
@@ -111,6 +113,7 @@ struct cam_mem_buf_queue {
|
||||
#ifdef CONFIG_CAM_PRESIL
|
||||
struct cam_presil_dmabuf_params presil_params;
|
||||
#endif
|
||||
struct kref urefcount;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "tpg_hw.h"
|
||||
@@ -1086,6 +1086,12 @@ int tpg_hw_copy_settings_config(
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (settings->settings_array_offset >
|
||||
sizeof(struct tpg_settings_config_t)) {
|
||||
CAM_ERR(CAM_TPG, "Invalid Array Offset");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
reg_settings = (struct tpg_reg_settings *)
|
||||
((uint8_t *)settings + settings->settings_array_offset);
|
||||
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user