@@ -1529,7 +1529,6 @@ static int cam_context_user_dump(struct cam_context *ctx,
|
||||
struct cam_context_dump_header *hdr;
|
||||
uint8_t *dst;
|
||||
uint64_t *addr, *start;
|
||||
uint32_t min_len;
|
||||
size_t buf_len, remain_len;
|
||||
uintptr_t cpu_addr;
|
||||
uint32_t local_len;
|
||||
@@ -1556,38 +1555,16 @@ static int cam_context_user_dump(struct cam_context *ctx,
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ctx->lock);
|
||||
if (!list_empty(&ctx->active_req_list)) {
|
||||
req = list_first_entry(&ctx->active_req_list,
|
||||
struct cam_ctx_request, list);
|
||||
} else if (!list_empty(&ctx->wait_req_list)) {
|
||||
req = list_first_entry(&ctx->wait_req_list,
|
||||
struct cam_ctx_request, list);
|
||||
} else if (!list_empty(&ctx->pending_req_list)) {
|
||||
req = list_first_entry(&ctx->pending_req_list,
|
||||
struct cam_ctx_request, list);
|
||||
} else {
|
||||
CAM_ERR(CAM_CTXT, "[%s][%d] no request to dump",
|
||||
ctx->dev_name, ctx->ctx_id);
|
||||
}
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
|
||||
/* Check for min len in case of available request to dump */
|
||||
if (req != NULL) {
|
||||
remain_len = buf_len - dump_args->offset;
|
||||
min_len = sizeof(struct cam_context_dump_header) +
|
||||
(CAM_CTXT_DUMP_NUM_WORDS + req->num_in_map_entries +
|
||||
(req->num_out_map_entries * 2)) * sizeof(uint64_t);
|
||||
|
||||
if (remain_len < min_len) {
|
||||
CAM_WARN(CAM_CTXT, "dump buffer exhaust remain %zu min %u",
|
||||
remain_len, min_len);
|
||||
cam_mem_put_cpu_buf(dump_args->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
|
||||
/* Dump context info */
|
||||
remain_len = buf_len - dump_args->offset;
|
||||
if (remain_len < sizeof(struct cam_context_dump_header)) {
|
||||
CAM_WARN(CAM_CTXT,
|
||||
"No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d",
|
||||
remain_len, sizeof(struct cam_context_dump_header));
|
||||
cam_mem_put_cpu_buf(dump_args->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
dst = (uint8_t *)cpu_addr + dump_args->offset;
|
||||
hdr = (struct cam_context_dump_header *)dst;
|
||||
local_len =
|
||||
@@ -1611,6 +1588,15 @@ static int cam_context_user_dump(struct cam_context *ctx,
|
||||
if (!list_empty(&ctx->wait_req_list)) {
|
||||
list_for_each_entry_safe(req, req_temp, &ctx->wait_req_list, list) {
|
||||
for (i = 0; i < req->num_out_map_entries; i++) {
|
||||
remain_len = buf_len - dump_args->offset;
|
||||
if (remain_len < sizeof(struct cam_context_dump_header)) {
|
||||
CAM_WARN(CAM_CTXT,
|
||||
"No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d",
|
||||
remain_len, sizeof(struct cam_context_dump_header));
|
||||
cam_mem_put_cpu_buf(dump_args->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
dst = (uint8_t *)cpu_addr + dump_args->offset;
|
||||
hdr = (struct cam_context_dump_header *)dst;
|
||||
local_len = dump_args->offset +
|
||||
@@ -1643,6 +1629,15 @@ static int cam_context_user_dump(struct cam_context *ctx,
|
||||
if (!list_empty(&ctx->pending_req_list)) {
|
||||
list_for_each_entry_safe(req, req_temp, &ctx->pending_req_list, list) {
|
||||
for (i = 0; i < req->num_out_map_entries; i++) {
|
||||
remain_len = buf_len - dump_args->offset;
|
||||
if (remain_len < sizeof(struct cam_context_dump_header)) {
|
||||
CAM_WARN(CAM_CTXT,
|
||||
"No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d",
|
||||
remain_len, sizeof(struct cam_context_dump_header));
|
||||
cam_mem_put_cpu_buf(dump_args->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
dst = (uint8_t *)cpu_addr + dump_args->offset;
|
||||
hdr = (struct cam_context_dump_header *)dst;
|
||||
local_len = dump_args->offset +
|
||||
@@ -1675,6 +1670,15 @@ static int cam_context_user_dump(struct cam_context *ctx,
|
||||
if (!list_empty(&ctx->active_req_list)) {
|
||||
list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
|
||||
for (i = 0; i < req->num_out_map_entries; i++) {
|
||||
remain_len = buf_len - dump_args->offset;
|
||||
if (remain_len < sizeof(struct cam_context_dump_header)) {
|
||||
CAM_WARN(CAM_CTXT,
|
||||
"No sufficient space in dump buffer for headers, remain buf size: %d, header size: %d",
|
||||
remain_len, sizeof(struct cam_context_dump_header));
|
||||
cam_mem_put_cpu_buf(dump_args->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
dst = (uint8_t *)cpu_addr + dump_args->offset;
|
||||
hdr = (struct cam_context_dump_header *)dst;
|
||||
local_len = dump_args->offset +
|
||||
|
@@ -5393,7 +5393,8 @@ static bool cam_icp_mgr_is_valid_outconfig(struct cam_packet *packet)
|
||||
packet->io_configs_offset/4);
|
||||
|
||||
for (i = 0 ; i < packet->num_io_configs; i++)
|
||||
if (io_cfg_ptr[i].direction == CAM_BUF_OUTPUT)
|
||||
if ((io_cfg_ptr[i].direction == CAM_BUF_OUTPUT) ||
|
||||
(io_cfg_ptr[i].direction == CAM_BUF_IN_OUT))
|
||||
num_out_map_entries++;
|
||||
|
||||
if (num_out_map_entries <= CAM_MAX_OUT_RES) {
|
||||
@@ -5550,13 +5551,20 @@ static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
|
||||
if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
|
||||
sync_in_obj[j++] = io_cfg_ptr[i].fence;
|
||||
prepare_args->num_in_map_entries++;
|
||||
} else {
|
||||
} else if ((io_cfg_ptr[i].direction == CAM_BUF_OUTPUT) ||
|
||||
(io_cfg_ptr[i].direction == CAM_BUF_IN_OUT)) {
|
||||
prepare_args->out_map_entries[k].sync_id =
|
||||
io_cfg_ptr[i].fence;
|
||||
prepare_args->out_map_entries[k].resource_handle =
|
||||
io_cfg_ptr[i].resource_type;
|
||||
k++;
|
||||
prepare_args->num_out_map_entries++;
|
||||
} else {
|
||||
CAM_ERR(CAM_ICP, "dir: %d, max_out:%u, out %u",
|
||||
io_cfg_ptr[i].direction,
|
||||
prepare_args->max_out_map_entries,
|
||||
prepare_args->num_out_map_entries);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
CAM_DBG(CAM_REQ,
|
||||
@@ -5784,6 +5792,11 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
|
||||
|
||||
switch (blob_type) {
|
||||
case CAM_ICP_CMD_GENERIC_BLOB_CLK:
|
||||
if (index < 0) {
|
||||
CAM_ERR(CAM_ICP, "Invalid index %d", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
CAM_WARN_RATE_LIMIT_CUSTOM(CAM_PERF, 300, 1,
|
||||
"Using deprecated blob type GENERIC_BLOB_CLK");
|
||||
if (blob_size != sizeof(struct cam_icp_clk_bw_request)) {
|
||||
@@ -5815,6 +5828,11 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
|
||||
break;
|
||||
|
||||
case CAM_ICP_CMD_GENERIC_BLOB_CLK_V2:
|
||||
if (index < 0) {
|
||||
CAM_ERR(CAM_ICP, "Invalid index %d", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (blob_size < sizeof(struct cam_icp_clk_bw_request_v2)) {
|
||||
CAM_ERR(CAM_ICP, "%s: Mismatch blob size %d expected %lu",
|
||||
ctx_data->ctx_id_string,
|
||||
@@ -5962,6 +5980,11 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
|
||||
break;
|
||||
|
||||
case CAM_ICP_CMD_GENERIC_BLOB_PRESIL_HANGDUMP:
|
||||
if (index < 0) {
|
||||
CAM_ERR(CAM_ICP, "Invalid index %d", index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (cam_presil_mode_enabled()) {
|
||||
cmd_mem_regions = (struct cam_cmd_mem_regions *)blob_data;
|
||||
if (cmd_mem_regions->num_regions <= 0) {
|
||||
|
@@ -286,9 +286,8 @@ static int __cam_isp_ctx_dump_event_record(
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
min_len = sizeof(struct cam_isp_context_dump_header) +
|
||||
((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
|
||||
sizeof(uint64_t));
|
||||
min_len = (sizeof(struct cam_isp_context_dump_header) +
|
||||
(CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS * sizeof(uint64_t))) * num_entries;
|
||||
remain_len = dump_args->buf_len - dump_args->offset;
|
||||
|
||||
if (remain_len < min_len) {
|
||||
@@ -5598,10 +5597,10 @@ hw_dump:
|
||||
(CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
|
||||
|
||||
if (remain_len < min_len) {
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
CAM_WARN(CAM_ISP,
|
||||
"Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x",
|
||||
remain_len, min_len, ctx->ctx_id, ctx->link_hdl);
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
cam_mem_put_cpu_buf(dump_info->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
@@ -5635,6 +5634,18 @@ hw_dump:
|
||||
}
|
||||
dump_info->offset = dump_args.offset;
|
||||
|
||||
min_len = sizeof(struct cam_isp_context_dump_header) +
|
||||
(CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(int32_t));
|
||||
remain_len = buf_len - dump_info->offset;
|
||||
if (remain_len < min_len) {
|
||||
CAM_WARN(CAM_ISP,
|
||||
"Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x",
|
||||
remain_len, min_len, ctx->ctx_id, ctx->link_hdl);
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
cam_mem_put_cpu_buf(dump_info->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* Dump stream info */
|
||||
ctx->ctxt_to_hw_map = ctx_isp->hw_ctx;
|
||||
if (ctx->hw_mgr_intf->hw_dump) {
|
||||
@@ -5649,6 +5660,17 @@ hw_dump:
|
||||
goto end;
|
||||
}
|
||||
|
||||
dump_info->offset = dump_args.offset;
|
||||
remain_len = buf_len - dump_info->offset;
|
||||
if (remain_len < min_len) {
|
||||
CAM_WARN(CAM_ISP,
|
||||
"Dump buffer exhaust remain %zu min %u, ctx_idx: %u, link: 0x%x",
|
||||
remain_len, min_len, ctx->ctx_id, ctx->link_hdl);
|
||||
spin_unlock_bh(&ctx->lock);
|
||||
cam_mem_put_cpu_buf(dump_info->buf_handle);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* Dump second part of stream info from ife hw manager */
|
||||
hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
|
||||
hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
|
||||
|
@@ -157,6 +157,20 @@ static void *cam_ife_hw_mgr_get_hw_intf(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static int cam_ife_mgr_get_first_valid_csid_id(void)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
|
||||
if (g_ife_hw_mgr.csid_devices[i]) {
|
||||
CAM_DBG(CAM_ISP, "valid csid_id %d", i);
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cam_ife_mgr_update_core_info_to_cpas(struct cam_ife_hw_mgr_ctx *ctx,
|
||||
bool set_port)
|
||||
{
|
||||
@@ -802,8 +816,7 @@ static inline bool cam_ife_hw_mgr_is_ife_out_port(uint32_t res_id)
|
||||
bool is_ife_out = false;
|
||||
|
||||
if ((res_id >= CAM_ISP_IFE_OUT_RES_BASE) &&
|
||||
(res_id <= (CAM_ISP_IFE_OUT_RES_BASE +
|
||||
max_ife_out_res)))
|
||||
(res_id < (CAM_ISP_IFE_OUT_RES_BASE + max_ife_out_res)))
|
||||
is_ife_out = true;
|
||||
|
||||
return is_ife_out;
|
||||
@@ -2946,6 +2959,7 @@ static int cam_ife_hw_mgr_acquire_res_sfe_src(
|
||||
uint32_t sfe_res_id = 0;
|
||||
struct cam_ife_hw_mgr *hw_mgr;
|
||||
struct cam_isp_hw_mgr_res *csid_res_map[CAM_ISP_HW_SFE_IN_MAX];
|
||||
int valid_id = 0;
|
||||
|
||||
hw_mgr = ife_ctx->hw_mgr;
|
||||
list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
|
||||
@@ -2992,9 +3006,11 @@ static int cam_ife_hw_mgr_acquire_res_sfe_src(
|
||||
* 1. No read count
|
||||
* 2. Dynamic switch from SHDR-->HDR and HDR-->SHDR is possible
|
||||
*/
|
||||
valid_id = cam_ife_mgr_get_first_valid_csid_id();
|
||||
if ((!(sfe_required_res & BIT(CAM_ISP_HW_SFE_IN_PIX))) &&
|
||||
(!in_port->ife_rd_count || in_port->dynamic_hdr_switch_en) &&
|
||||
(BIT(csid_res->res_id) == hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res)) {
|
||||
(BIT(csid_res->res_id) ==
|
||||
hw_mgr->csid_hw_caps[valid_id].sfe_ipp_input_rdi_res)) {
|
||||
sfe_required_res |= BIT(CAM_ISP_HW_SFE_IN_PIX);
|
||||
csid_res_map[CAM_ISP_HW_SFE_IN_PIX] = csid_res;
|
||||
}
|
||||
@@ -3740,13 +3756,14 @@ static bool cam_ife_hw_mgr_is_need_csid_ipp(
|
||||
{
|
||||
struct cam_ife_hw_mgr *hw_mgr;
|
||||
bool need = true;
|
||||
|
||||
int valid_id = 0;
|
||||
hw_mgr = ife_ctx->hw_mgr;
|
||||
|
||||
valid_id = cam_ife_mgr_get_first_valid_csid_id();
|
||||
if (!(in_port->ipp_count || in_port->lcr_count))
|
||||
need = false;
|
||||
else if (ife_ctx->ctx_type == CAM_IFE_CTX_TYPE_SFE &&
|
||||
((hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res && !in_port->usage_type) ||
|
||||
((hw_mgr->csid_hw_caps[valid_id].sfe_ipp_input_rdi_res && !in_port->usage_type) ||
|
||||
in_port->ife_rd_count))
|
||||
need = false;
|
||||
|
||||
@@ -4097,11 +4114,13 @@ static int cam_ife_hw_mgr_get_csid_rdi_for_sfe_ipp_input(
|
||||
struct cam_ife_hw_mgr *hw_mgr;
|
||||
uint32_t res_id = CAM_IFE_PIX_PATH_RES_MAX;
|
||||
int rc = 0;
|
||||
int valid_id;
|
||||
|
||||
hw_mgr = ife_ctx->hw_mgr;
|
||||
|
||||
if (hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res && !in_port->usage_type)
|
||||
res_id = ffs(hw_mgr->csid_hw_caps[0].sfe_ipp_input_rdi_res) - 1;
|
||||
valid_id = cam_ife_mgr_get_first_valid_csid_id();
|
||||
if (hw_mgr->csid_hw_caps[valid_id].sfe_ipp_input_rdi_res && !in_port->usage_type)
|
||||
res_id = ffs(hw_mgr->csid_hw_caps[valid_id].sfe_ipp_input_rdi_res) - 1;
|
||||
|
||||
if ((res_id != CAM_IFE_PIX_PATH_RES_MAX) && (!(BIT(res_id) & (*acquired_rdi_res)))) {
|
||||
rc = cam_ife_hw_mgr_acquire_csid_rdi_util(ife_ctx,
|
||||
|
@@ -158,6 +158,12 @@ static int cam_tfe_mgr_handle_reg_dump(struct cam_tfe_hw_mgr_ctx *ctx,
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!ctx->init_done) {
|
||||
CAM_WARN(CAM_ISP, "regdump can't possible as HW not initialized, ctx_idx: %u",
|
||||
ctx->ctx_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!atomic_read(&ctx->cdm_done))
|
||||
CAM_WARN_RATE_LIMIT(CAM_ISP,
|
||||
"Reg dump values might be from more than one request");
|
||||
@@ -199,7 +205,7 @@ static int cam_tfe_mgr_get_hw_caps_internal(void *hw_mgr_priv,
|
||||
for (i = 0; i < CAM_TFE_CSID_HW_NUM_MAX; i++) {
|
||||
if (!hw_mgr->csid_devices[i])
|
||||
break;
|
||||
if (query_isp->num_dev < i)
|
||||
if (i >= query_isp->num_dev)
|
||||
return -EINVAL;
|
||||
|
||||
query_isp->dev_caps[i].hw_type = CAM_ISP_TFE_HW_TFE;
|
||||
@@ -295,7 +301,8 @@ static int cam_tfe_mgr_get_hw_caps_v2(void *hw_mgr_priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!tmp_query_isp_v2.num_dev) {
|
||||
if (!tmp_query_isp_v2.num_dev ||
|
||||
tmp_query_isp_v2.num_dev > CAM_TFE_CSID_HW_NUM_MAX) {
|
||||
CAM_ERR(CAM_ISP, "Invalid Num of dev is %d query cap version %d",
|
||||
tmp_query_isp_v2.num_dev, tmp_query_isp_v2.version);
|
||||
rc = -EINVAL;
|
||||
|
@@ -2711,7 +2711,6 @@ static int cam_ife_csid_ver1_enable_hw(struct cam_ife_csid_ver1_hw *csid_hw)
|
||||
csid_hw->flags.fatal_err_detected = false;
|
||||
csid_hw->flags.device_enabled = true;
|
||||
spin_unlock_irqrestore(&csid_hw->lock_state, flags);
|
||||
cam_tasklet_start(csid_hw->tasklet);
|
||||
|
||||
return rc;
|
||||
|
||||
@@ -2890,7 +2889,6 @@ static int cam_ife_csid_ver1_disable_hw(
|
||||
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
||||
csid_reg->cmn_reg->top_irq_mask_addr);
|
||||
|
||||
cam_tasklet_stop(csid_hw->tasklet);
|
||||
rc = cam_ife_csid_disable_soc_resources(soc_info);
|
||||
if (rc)
|
||||
CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
|
||||
|
@@ -1343,7 +1343,6 @@ static int cam_tfe_csid_enable_hw(struct cam_tfe_csid_hw *csid_hw)
|
||||
csid_hw->fatal_err_detected = false;
|
||||
csid_hw->device_enabled = 1;
|
||||
spin_unlock_irqrestore(&csid_hw->spin_lock, flags);
|
||||
cam_tasklet_start(csid_hw->tasklet);
|
||||
|
||||
if (csid_hw->pxl_pipe_enable ) {
|
||||
path_data = (struct cam_tfe_csid_path_cfg *)
|
||||
@@ -1415,8 +1414,6 @@ static int cam_tfe_csid_disable_hw(struct cam_tfe_csid_hw *csid_hw)
|
||||
cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
|
||||
csid_reg->cmn_reg->csid_top_irq_mask_addr);
|
||||
|
||||
cam_tasklet_stop(csid_hw->tasklet);
|
||||
|
||||
rc = cam_tfe_csid_disable_soc_resources(soc_info);
|
||||
if (rc)
|
||||
CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
|
||||
|
@@ -106,10 +106,6 @@ static int cam_jpeg_add_command_buffers(struct cam_packet *packet,
|
||||
struct cam_cmd_buf_desc *cmd_desc = NULL;
|
||||
struct cam_jpeg_request_data *jpeg_request_data;
|
||||
struct cam_kmd_buf_info kmd_buf;
|
||||
struct cam_jpeg_config_inout_param_info *inout_params;
|
||||
uint32_t *cmd_buf_kaddr;
|
||||
uintptr_t kaddr;
|
||||
size_t len;
|
||||
unsigned int num_entry = 0;
|
||||
unsigned int i;
|
||||
int rc;
|
||||
@@ -157,9 +153,10 @@ static int cam_jpeg_add_command_buffers(struct cam_packet *packet,
|
||||
num_entry++;
|
||||
|
||||
jpeg_request_data->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
|
||||
jpeg_request_data->encode_size_buffer_ptr = NULL;
|
||||
jpeg_request_data->request_id = packet->header.request_id;
|
||||
jpeg_request_data->thumbnail_threshold_size = 0;
|
||||
jpeg_request_data->out_size_mem_handle = 0;
|
||||
jpeg_request_data->out_size_offset = 0;
|
||||
|
||||
CAM_DBG(CAM_JPEG,
|
||||
"Change_Base HW_Entry. Offset: 0x%x Length: %u mem_handle: 0x%x num_entry: %d",
|
||||
@@ -193,29 +190,8 @@ static int cam_jpeg_add_command_buffers(struct cam_packet *packet,
|
||||
num_entry++;
|
||||
break;
|
||||
case CAM_JPEG_PACKET_INOUT_PARAM:
|
||||
rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
|
||||
(uintptr_t *)&kaddr, &len);
|
||||
if (rc) {
|
||||
CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d");
|
||||
return rc;
|
||||
}
|
||||
|
||||
cmd_buf_kaddr = (uint32_t *)kaddr;
|
||||
|
||||
if (cmd_desc[i].offset >= len) {
|
||||
CAM_ERR(CAM_JPEG, "Invalid offset: %u cmd buf len: %zu",
|
||||
cmd_desc[i].offset, len);
|
||||
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd_buf_kaddr += (cmd_desc[i].offset / sizeof(uint32_t));
|
||||
|
||||
inout_params = (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
|
||||
jpeg_request_data->encode_size_buffer_ptr = &inout_params->output_size;
|
||||
CAM_DBG(CAM_JPEG, "encode_size_buf_ptr: 0x%p",
|
||||
jpeg_request_data->encode_size_buffer_ptr);
|
||||
cam_mem_put_cpu_buf(cmd_desc[i].mem_handle);
|
||||
jpeg_request_data->out_size_mem_handle = cmd_desc[i].mem_handle;
|
||||
jpeg_request_data->out_size_offset = cmd_desc[i].offset;
|
||||
break;
|
||||
case CAM_JPEG_PACKET_GENERIC_BLOB:
|
||||
rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
|
||||
@@ -391,6 +367,11 @@ static int cam_jpeg_mgr_bottom_half_irq(void *priv, void *data)
|
||||
struct cam_ctx_request *req;
|
||||
struct cam_jpeg_misr_dump_args misr_args;
|
||||
struct cam_jpeg_hw_buf_done_evt_data jpeg_done_evt;
|
||||
struct cam_jpeg_config_inout_param_info *inout_params;
|
||||
uint32_t *cmd_buf_kaddr;
|
||||
uintptr_t kaddr;
|
||||
size_t len;
|
||||
size_t inout_param_size;
|
||||
|
||||
if (!data || !priv) {
|
||||
CAM_ERR(CAM_JPEG, "Invalid data");
|
||||
@@ -467,10 +448,26 @@ static int cam_jpeg_mgr_bottom_half_irq(void *priv, void *data)
|
||||
}
|
||||
|
||||
jpeg_req = irq_cb_data->jpeg_req;
|
||||
inout_param_size = sizeof(struct cam_jpeg_config_inout_param_info);
|
||||
|
||||
if (jpeg_req->dev_type == CAM_JPEG_RES_TYPE_ENC) {
|
||||
if (jpeg_req->encode_size_buffer_ptr)
|
||||
*jpeg_req->encode_size_buffer_ptr = task_data->u.output_encode_size;
|
||||
rc = cam_mem_get_cpu_buf(jpeg_req->out_size_mem_handle,
|
||||
(uintptr_t *)&kaddr, &len);
|
||||
if (!rc) {
|
||||
if ((inout_param_size > len) ||
|
||||
(jpeg_req->out_size_offset >= (len - inout_param_size)))
|
||||
CAM_ERR(CAM_JPEG,
|
||||
"Inval off = %u cmd buf len = %zu inout_param_size = %d",
|
||||
jpeg_req->out_size_offset, len, inout_param_size);
|
||||
else {
|
||||
cmd_buf_kaddr = (uint32_t *)kaddr;
|
||||
cmd_buf_kaddr += (jpeg_req->out_size_offset / sizeof(uint32_t));
|
||||
inout_params =
|
||||
(struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
|
||||
inout_params->output_size = task_data->u.output_encode_size;
|
||||
}
|
||||
cam_mem_put_cpu_buf(jpeg_req->out_size_mem_handle);
|
||||
}
|
||||
else
|
||||
CAM_ERR(CAM_JPEG, "Buffer pointer for inout param is null");
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef CAM_JPEG_HW_MGR_INTF_H
|
||||
@@ -23,15 +23,16 @@ enum cam_jpeg_hw_type {
|
||||
* struct cam_jpeg_request_data - Jpeg request data received from command buffers
|
||||
* @dev_type : Jpeg device type(ENC vs DMA)
|
||||
* @request_id : Request ID
|
||||
* @encode_size_buffer_ptr : Pointer to the buffer location for storing the encode
|
||||
size of the result
|
||||
* @thumbnail_threshold_size : Threshold size for thumbnail image
|
||||
* @out_size_mem_handle : handle to the buffer to share encoded output size with userspace
|
||||
* @out_size_offset : offset to memory where out_size_mem_handle is stored
|
||||
*/
|
||||
struct cam_jpeg_request_data {
|
||||
uint32_t dev_type;
|
||||
uint64_t request_id;
|
||||
uint32_t *encode_size_buffer_ptr;
|
||||
uint32_t thumbnail_threshold_size;
|
||||
__s32 out_size_mem_handle;
|
||||
uint32_t out_size_offset;
|
||||
};
|
||||
|
||||
typedef void (*cam_jpeg_mini_dump_cb)(void *priv, void *dst);
|
||||
|
@@ -2212,6 +2212,14 @@ static int cam_ope_mgr_process_cmd_buf_req(struct cam_ope_hw_mgr *hw_mgr,
|
||||
hw_mgr->iommu_hdl);
|
||||
goto end;
|
||||
}
|
||||
if ((len <= frame_process->cmd_buf[i][j].offset) ||
|
||||
(frame_process->cmd_buf[i][j].size <
|
||||
frame_process->cmd_buf[i][j].length) ||
|
||||
((len - frame_process->cmd_buf[i][j].offset) <
|
||||
frame_process->cmd_buf[i][j].length)) {
|
||||
CAM_ERR(CAM_OPE, "Invalid offset.");
|
||||
return -EINVAL;
|
||||
}
|
||||
cpu_addr = cpu_addr +
|
||||
frame_process->cmd_buf[i][j].offset;
|
||||
CAM_DBG(CAM_OPE, "Hdl %x size %d len %d off %d",
|
||||
@@ -2260,6 +2268,10 @@ static int cam_ope_mgr_process_cmd_buf_req(struct cam_ope_hw_mgr *hw_mgr,
|
||||
uint32_t s_idx = 0;
|
||||
|
||||
s_idx = cmd_buf->stripe_idx;
|
||||
if (s_idx < 0 || s_idx >= OPE_MAX_STRIPES) {
|
||||
CAM_ERR(CAM_OPE, "Invalid index.");
|
||||
return -EINVAL;
|
||||
}
|
||||
num_cmd_bufs =
|
||||
ope_request->num_stripe_cmd_bufs[i][s_idx];
|
||||
|
||||
|
在新工单中引用
屏蔽一个用户