Merge 494d493271
on remote branch
Change-Id: I57a902cea76c13dbc484a2402011d9561342be78
This commit is contained in:
@@ -21,7 +21,7 @@ ifeq ($(TARGET_BOARD_PLATFORM), volcano)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
# List of board platforms for which MMRM driver API should be enabled
|
# List of board platforms for which MMRM driver API should be enabled
|
||||||
MMRM_BOARDS := taro parrot kalama pineapple crow volcono
|
MMRM_BOARDS := taro parrot kalama pineapple crow volcano
|
||||||
|
|
||||||
# List of board platforms for which Synx V2 vendor driver API should be enabled
|
# List of board platforms for which Synx V2 vendor driver API should be enabled
|
||||||
SYNX_VENDOR_BOARDS := pineapple
|
SYNX_VENDOR_BOARDS := pineapple
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _CAM_HW_MGR_INTF_H_
|
#ifndef _CAM_HW_MGR_INTF_H_
|
||||||
@@ -175,6 +175,7 @@ struct cam_hw_acquire_stream_caps {
|
|||||||
* if input splits into multiple paths,
|
* if input splits into multiple paths,
|
||||||
* its updated per hardware
|
* its updated per hardware
|
||||||
* @valid_acquired_hw: Valid num of acquired hardware
|
* @valid_acquired_hw: Valid num of acquired hardware
|
||||||
|
* @total_ports_acq Total ports acquired ipp+ppp+rdi
|
||||||
* @op_params: OP Params from hw_mgr to ctx
|
* @op_params: OP Params from hw_mgr to ctx
|
||||||
* @mini_dump_cb: Mini dump callback function
|
* @mini_dump_cb: Mini dump callback function
|
||||||
*
|
*
|
||||||
@@ -194,6 +195,7 @@ struct cam_hw_acquire_args {
|
|||||||
uint32_t acquired_hw_id[CAM_MAX_ACQ_RES];
|
uint32_t acquired_hw_id[CAM_MAX_ACQ_RES];
|
||||||
uint32_t acquired_hw_path[CAM_MAX_ACQ_RES][CAM_MAX_HW_SPLIT];
|
uint32_t acquired_hw_path[CAM_MAX_ACQ_RES][CAM_MAX_HW_SPLIT];
|
||||||
uint32_t valid_acquired_hw;
|
uint32_t valid_acquired_hw;
|
||||||
|
uint32_t total_ports_acq;
|
||||||
struct cam_hw_acquire_stream_caps op_params;
|
struct cam_hw_acquire_stream_caps op_params;
|
||||||
cam_ctx_mini_dump_cb_func mini_dump_cb;
|
cam_ctx_mini_dump_cb_func mini_dump_cb;
|
||||||
};
|
};
|
||||||
|
@@ -6573,7 +6573,7 @@ static int cam_icp_mgr_hw_dump(void *hw_priv, void *hw_dump_args)
|
|||||||
*mgr_addr++ = hw_mgr->icp_booted;
|
*mgr_addr++ = hw_mgr->icp_booted;
|
||||||
*mgr_addr++ = hw_mgr->icp_resumed;
|
*mgr_addr++ = hw_mgr->icp_resumed;
|
||||||
*mgr_addr++ = hw_mgr->disable_ubwc_comp;
|
*mgr_addr++ = hw_mgr->disable_ubwc_comp;
|
||||||
memcpy(mgr_addr, &hw_mgr->dev_info, sizeof(hw_mgr->dev_info));
|
memcpy(mgr_addr, &hw_mgr->dev_info, sizeof(struct cam_icp_hw_device_info));
|
||||||
mgr_addr += sizeof(hw_mgr->dev_info);
|
mgr_addr += sizeof(hw_mgr->dev_info);
|
||||||
*mgr_addr++ = hw_mgr->icp_pc_flag;
|
*mgr_addr++ = hw_mgr->icp_pc_flag;
|
||||||
*mgr_addr++ = hw_mgr->dev_pc_flag;
|
*mgr_addr++ = hw_mgr->dev_pc_flag;
|
||||||
|
@@ -6667,6 +6667,8 @@ static int __cam_isp_ctx_flush_dev_in_top_state(struct cam_context *ctx,
|
|||||||
static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
|
static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
struct cam_isp_context *ctx_isp =
|
||||||
|
(struct cam_isp_context *) ctx->ctx_priv;
|
||||||
|
|
||||||
if (ctx->out_map_entries) {
|
if (ctx->out_map_entries) {
|
||||||
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
||||||
@@ -6698,6 +6700,13 @@ static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
|
|||||||
ctx->hw_update_entry = NULL;
|
ctx->hw_update_entry = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ctx_isp) {
|
||||||
|
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
||||||
|
kfree(ctx_isp->req_isp[i].deferred_fence_map_index);
|
||||||
|
ctx_isp->req_isp[i].deferred_fence_map_index = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ctx->max_out_map_entries = 0;
|
ctx->max_out_map_entries = 0;
|
||||||
ctx->max_in_map_entries = 0;
|
ctx->max_in_map_entries = 0;
|
||||||
ctx->max_hw_update_entries = 0;
|
ctx->max_hw_update_entries = 0;
|
||||||
@@ -7090,6 +7099,8 @@ static int __cam_isp_ctx_allocate_mem_hw_entries(
|
|||||||
struct cam_ctx_request *req;
|
struct cam_ctx_request *req;
|
||||||
struct cam_ctx_request *temp_req;
|
struct cam_ctx_request *temp_req;
|
||||||
struct cam_isp_ctx_req *req_isp;
|
struct cam_isp_ctx_req *req_isp;
|
||||||
|
struct cam_isp_context *ctx_isp =
|
||||||
|
(struct cam_isp_context *) ctx->ctx_priv;
|
||||||
|
|
||||||
if (!param->op_params.param_list[0])
|
if (!param->op_params.param_list[0])
|
||||||
max_res = CAM_ISP_CTX_RES_MAX;
|
max_res = CAM_ISP_CTX_RES_MAX;
|
||||||
@@ -7119,16 +7130,6 @@ static int __cam_isp_ctx_allocate_mem_hw_entries(
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
|
||||||
ctx->hw_update_entry[i] = kcalloc(ctx->max_hw_update_entries,
|
|
||||||
sizeof(struct cam_hw_update_entry), GFP_KERNEL);
|
|
||||||
if (!ctx->hw_update_entry[i]) {
|
|
||||||
CAM_ERR(CAM_CTXT, "%s[%u] no memory for hw_update_entry: %u, link: 0x%x",
|
|
||||||
ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->in_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
|
ctx->in_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
@@ -7139,19 +7140,6 @@ static int __cam_isp_ctx_allocate_mem_hw_entries(
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
|
||||||
ctx->in_map_entries[i] = kcalloc(ctx->max_in_map_entries,
|
|
||||||
sizeof(struct cam_hw_fence_map_entry),
|
|
||||||
GFP_KERNEL);
|
|
||||||
|
|
||||||
if (!ctx->in_map_entries[i]) {
|
|
||||||
CAM_ERR(CAM_CTXT, "%s[%u] no memory for in_map_entries: %u, link: 0x%x",
|
|
||||||
ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl);
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto end;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->out_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
|
ctx->out_map_entries = kcalloc(CAM_ISP_CTX_REQ_MAX, sizeof(struct cam_hw_fence_map_entry *),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
@@ -7162,7 +7150,29 @@ static int __cam_isp_ctx_allocate_mem_hw_entries(
|
|||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
|
||||||
|
ctx->hw_update_entry[i] = kcalloc(ctx->max_hw_update_entries,
|
||||||
|
sizeof(struct cam_hw_update_entry), GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!ctx->hw_update_entry[i]) {
|
||||||
|
CAM_ERR(CAM_CTXT, "%s[%u] no memory for hw_update_entry: %u, link: 0x%x",
|
||||||
|
ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl);
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx->in_map_entries[i] = kcalloc(ctx->max_in_map_entries,
|
||||||
|
sizeof(struct cam_hw_fence_map_entry),
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!ctx->in_map_entries[i]) {
|
||||||
|
CAM_ERR(CAM_CTXT, "%s[%u] no memory for in_map_entries: %u, link: 0x%x",
|
||||||
|
ctx->dev_name, ctx->ctx_id, i, ctx->link_hdl);
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
|
||||||
ctx->out_map_entries[i] = kcalloc(ctx->max_out_map_entries,
|
ctx->out_map_entries[i] = kcalloc(ctx->max_out_map_entries,
|
||||||
sizeof(struct cam_hw_fence_map_entry),
|
sizeof(struct cam_hw_fence_map_entry),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
@@ -7173,6 +7183,16 @@ static int __cam_isp_ctx_allocate_mem_hw_entries(
|
|||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto end;
|
goto end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx_isp->req_isp[i].deferred_fence_map_index = kcalloc(param->total_ports_acq,
|
||||||
|
sizeof(uint32_t), GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!ctx_isp->req_isp[i].deferred_fence_map_index) {
|
||||||
|
CAM_ERR(CAM_ISP, "%s[%d] no memory for defer fence map idx arr, ports:%u",
|
||||||
|
ctx->dev_name, ctx->ctx_id, param->total_ports_acq);
|
||||||
|
rc = -ENOMEM;
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(req, temp_req,
|
list_for_each_entry_safe(req, temp_req,
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
|
||||||
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
|
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _CAM_ISP_CONTEXT_H_
|
#ifndef _CAM_ISP_CONTEXT_H_
|
||||||
@@ -196,7 +196,7 @@ struct cam_isp_ctx_req {
|
|||||||
uint32_t num_fence_map_in;
|
uint32_t num_fence_map_in;
|
||||||
uint32_t num_acked;
|
uint32_t num_acked;
|
||||||
uint32_t num_deferred_acks;
|
uint32_t num_deferred_acks;
|
||||||
uint32_t deferred_fence_map_index[CAM_ISP_CTX_RES_MAX];
|
uint32_t *deferred_fence_map_index;
|
||||||
int32_t bubble_report;
|
int32_t bubble_report;
|
||||||
struct cam_isp_prepare_hw_update_data hw_update_data;
|
struct cam_isp_prepare_hw_update_data hw_update_data;
|
||||||
enum cam_hw_config_reapply_type reapply_type;
|
enum cam_hw_config_reapply_type reapply_type;
|
||||||
|
@@ -5700,6 +5700,7 @@ static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
|
|||||||
}
|
}
|
||||||
|
|
||||||
total_ports = total_pix_port + total_rdi_port + total_pd_port;
|
total_ports = total_pix_port + total_rdi_port + total_pd_port;
|
||||||
|
acquire_args->total_ports_acq = total_ports;
|
||||||
ife_ctx->res_list_ife_out = kcalloc(total_ports,
|
ife_ctx->res_list_ife_out = kcalloc(total_ports,
|
||||||
sizeof(struct cam_isp_hw_mgr_res), GFP_KERNEL);
|
sizeof(struct cam_isp_hw_mgr_res), GFP_KERNEL);
|
||||||
if (!ife_ctx->res_list_ife_out) {
|
if (!ife_ctx->res_list_ife_out) {
|
||||||
@@ -8859,6 +8860,15 @@ static int cam_isp_scratch_buf_update_util(
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (buffer_info->offset >= size) {
|
||||||
|
CAM_ERR(CAM_ISP,
|
||||||
|
"Invalid scratch buffer offset:%u size:%u mmu_hdl:%u hdl:%d res_type:0x%x",
|
||||||
|
buffer_info->offset, size, mmu_hdl, buffer_info->mem_handle,
|
||||||
|
buffer_info->resource_type);
|
||||||
|
rc = -EINVAL;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
port_info->res_id = buffer_info->resource_type;
|
port_info->res_id = buffer_info->resource_type;
|
||||||
port_info->io_addr = io_addr + buffer_info->offset;
|
port_info->io_addr = io_addr + buffer_info->offset;
|
||||||
port_info->width = buffer_info->width;
|
port_info->width = buffer_info->width;
|
||||||
@@ -11785,6 +11795,7 @@ static int cam_ife_mgr_util_insert_frame_header(
|
|||||||
uint32_t frame_header_iova, padded_bytes = 0;
|
uint32_t frame_header_iova, padded_bytes = 0;
|
||||||
size_t len;
|
size_t len;
|
||||||
struct cam_ife_hw_mgr *hw_mgr = &g_ife_hw_mgr;
|
struct cam_ife_hw_mgr *hw_mgr = &g_ife_hw_mgr;
|
||||||
|
struct cam_smmu_buffer_tracker *buf_track_entry;
|
||||||
|
|
||||||
mmu_hdl = cam_mem_is_secure_buf(
|
mmu_hdl = cam_mem_is_secure_buf(
|
||||||
kmd_buf->handle) ?
|
kmd_buf->handle) ?
|
||||||
@@ -11800,6 +11811,19 @@ static int cam_ife_mgr_util_insert_frame_header(
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (kmd_buf->offset >= len) {
|
||||||
|
CAM_ERR(CAM_ISP,
|
||||||
|
"Invalid kmd buffer offset %u",
|
||||||
|
kmd_buf->offset);
|
||||||
|
if (buf_tracker) {
|
||||||
|
buf_track_entry = list_first_entry_or_null(buf_tracker,
|
||||||
|
struct cam_smmu_buffer_tracker, list);
|
||||||
|
cam_smmu_buffer_tracker_buffer_putref(buf_track_entry);
|
||||||
|
}
|
||||||
|
rc = -EINVAL;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/* CDM buffer is within 32-bit address space */
|
/* CDM buffer is within 32-bit address space */
|
||||||
frame_header_iova = (uint32_t)iova_addr;
|
frame_header_iova = (uint32_t)iova_addr;
|
||||||
frame_header_iova += kmd_buf->offset;
|
frame_header_iova += kmd_buf->offset;
|
||||||
|
@@ -2455,6 +2455,7 @@ static int cam_tfe_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
|
|||||||
}
|
}
|
||||||
|
|
||||||
total_ports = total_pix_port + total_rdi_port + total_pd_port;
|
total_ports = total_pix_port + total_rdi_port + total_pd_port;
|
||||||
|
acquire_args->total_ports_acq = total_ports;
|
||||||
tfe_ctx->res_list_tfe_out = kcalloc(total_ports,
|
tfe_ctx->res_list_tfe_out = kcalloc(total_ports,
|
||||||
sizeof(struct cam_isp_hw_mgr_res), GFP_KERNEL);
|
sizeof(struct cam_isp_hw_mgr_res), GFP_KERNEL);
|
||||||
if (!tfe_ctx->res_list_tfe_out) {
|
if (!tfe_ctx->res_list_tfe_out) {
|
||||||
|
@@ -330,6 +330,7 @@ struct dma_fence *cam_dma_fence_get_fence_from_fd(
|
|||||||
int32_t fd, int32_t *dma_fence_row_idx)
|
int32_t fd, int32_t *dma_fence_row_idx)
|
||||||
{
|
{
|
||||||
struct dma_fence *dma_fence = NULL;
|
struct dma_fence *dma_fence = NULL;
|
||||||
|
struct cam_dma_fence_row *row;
|
||||||
|
|
||||||
dma_fence = __cam_dma_fence_find_fence_in_table(fd, dma_fence_row_idx);
|
dma_fence = __cam_dma_fence_find_fence_in_table(fd, dma_fence_row_idx);
|
||||||
if (IS_ERR_OR_NULL(dma_fence)) {
|
if (IS_ERR_OR_NULL(dma_fence)) {
|
||||||
@@ -339,7 +340,19 @@ struct dma_fence *cam_dma_fence_get_fence_from_fd(
|
|||||||
return cam_dma_fence_get_fence_from_sync_file(fd, dma_fence_row_idx);
|
return cam_dma_fence_get_fence_from_sync_file(fd, dma_fence_row_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_bh(&g_cam_dma_fence_dev->row_spinlocks[*dma_fence_row_idx]);
|
||||||
|
row = &g_cam_dma_fence_dev->rows[*dma_fence_row_idx];
|
||||||
|
|
||||||
|
if (row->state == CAM_DMA_FENCE_STATE_INVALID) {
|
||||||
|
CAM_ERR(CAM_DMA_FENCE,
|
||||||
|
"dma fence at idx: %d is in invalid state: %d",
|
||||||
|
dma_fence_row_idx, row->state);
|
||||||
|
spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[*dma_fence_row_idx]);
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
dma_fence_get(dma_fence);
|
dma_fence_get(dma_fence);
|
||||||
|
spin_unlock_bh(&g_cam_dma_fence_dev->row_spinlocks[*dma_fence_row_idx]);
|
||||||
|
|
||||||
CAM_DBG(CAM_DMA_FENCE, "dma fence found for fd: %d with seqno: %llu ref_cnt: %u",
|
CAM_DBG(CAM_DMA_FENCE, "dma fence found for fd: %d with seqno: %llu ref_cnt: %u",
|
||||||
fd, dma_fence->seqno, kref_read(&dma_fence->refcount));
|
fd, dma_fence->seqno, kref_read(&dma_fence->refcount));
|
||||||
|
@@ -1480,7 +1480,7 @@ static int cam_soc_util_set_clk_rate(struct cam_hw_soc_info *soc_info,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (applied_clk_rate)
|
if (applied_clk_rate && set_rate)
|
||||||
*applied_clk_rate = clk_rate_round;
|
*applied_clk_rate = clk_rate_round;
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
Reference in New Issue
Block a user