Browse Source

msm: camera: smmu: Early PF detection

This change adds support to detect early
unmapping of buffers from UMD, and signal
when the error occurs by sending an error
code to UMD.

At present, buffers are allocated and/or mapped
by UMD, and the following sequence occurs.

KMD prepares packets sent during a config ioctl,
and these packets reside in the mapped buffers;
HW blocks then access these buffers, and may
write output to other mapped buffers(io buffers);
Once HW is done(upon a buf done), KMD signals
this event to UMD;
UMD may free/unmap these relevant buffers.

This change adds support to detect cases where a
free/unmap happens before/while HW is accessing
these buffers.

This feature is enabled by default, but a debugfs
variable disable_buf_tracking is added under smmu
which will enable the user to disable the feature.
Camera server needs to be restarted whenever this
variable is set/unset for changes to take place.

CRs-Fixed: 3382609
Change-Id: I39c3f0c373743c10bc2e6304ffbdc820e3c95970
Signed-off-by: Li Sha Lim <[email protected]>
Li Sha Lim 3 năm trước cách đây
mục cha
commit
b3f493269c

+ 3 - 3
drivers/cam_cdm/cam_cdm_hw_core.c

@@ -865,7 +865,7 @@ int cam_hw_cdm_submit_gen_irq(
 		core->bl_fifo[fifo_idx].bl_tag, cdm_cmd->cmd_arrary_count, cdm_cmd->cookie);
 
 	rc = cam_mem_get_io_buf(cdm_cmd->genirq_buff->handle, core->iommu_hdl.non_secure,
-		&hw_vaddr_ptr, &len, NULL);
+		&hw_vaddr_ptr, &len, NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_CDM, "Getting a hwva from mem_hdl failed. rc: %d", rc);
 		return -EINVAL;
@@ -972,7 +972,7 @@ int cam_hw_cdm_submit_debug_gen_irq(
 		core->bl_fifo[fifo_idx].bl_tag, cdm_cmd->cmd_arrary_count, cdm_cmd->cookie);
 
 	rc = cam_mem_get_io_buf(cdm_cmd->genirq_buff->handle, core->iommu_hdl.non_secure,
-		&hw_vaddr_ptr, &len, NULL);
+		&hw_vaddr_ptr, &len, NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_CDM, "Getting a hwva from mem_hdl failed. rc: %d", rc);
 		return -EINVAL;
@@ -1101,7 +1101,7 @@ int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
 		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
 			rc = cam_mem_get_io_buf(cdm_cmd->cmd[i].bl_addr.mem_handle,
 				core->iommu_hdl.non_secure, &hw_vaddr_ptr,
-				&len, NULL);
+				&len, NULL, NULL);
 			if (rc) {
 				CAM_ERR(CAM_CDM,
 					"Getting a hwva from mem_hdl failed. rc: %d, cmd_ent: %u",

+ 19 - 16
drivers/cam_core/cam_context.h

@@ -66,26 +66,29 @@ enum cam_context_state {
  * @index:                 Index of request in the list
  * @flushed:               Request is flushed
  * @ctx:                   The context to which this request belongs
+ * @buf_tracker:           List of buffers we want to keep ref counts on
+ *                         used by the HW block for a particular req
  * @pf_data                page fault debug data
  *
  */
 struct cam_ctx_request {
-	struct list_head                  list;
-	uint32_t                          status;
-	uint64_t                          request_id;
-	void                             *req_priv;
-	struct cam_hw_update_entry       *hw_update_entries;
-	uint32_t                          num_hw_update_entries;
-	struct cam_hw_fence_map_entry    *in_map_entries;
-	uint32_t                          num_in_map_entries;
-	struct cam_hw_fence_map_entry    *out_map_entries;
-	uint32_t                          num_out_map_entries;
-	atomic_t                          num_in_acked;
-	uint32_t                          num_out_acked;
-	uint32_t                          index;
-	int                               flushed;
-	struct cam_context               *ctx;
-	struct cam_hw_mgr_pf_request_info pf_data;
+	struct list_head               list;
+	uint32_t                       status;
+	uint64_t                       request_id;
+	void                          *req_priv;
+	struct cam_hw_update_entry    *hw_update_entries;
+	uint32_t                       num_hw_update_entries;
+	struct cam_hw_fence_map_entry *in_map_entries;
+	uint32_t                       num_in_map_entries;
+	struct cam_hw_fence_map_entry *out_map_entries;
+	uint32_t                       num_out_map_entries;
+	atomic_t                       num_in_acked;
+	uint32_t                       num_out_acked;
+	uint32_t                       index;
+	int                            flushed;
+	struct cam_context            *ctx;
+	struct list_head               buf_tracker;
+	struct cam_hw_mgr_pf_request_info  pf_data;
 };
 
 /**

+ 17 - 0
drivers/cam_core/cam_context_utils.c

@@ -207,6 +207,7 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
 		CAM_DBG(CAM_CTXT, "[%s][%d] no output fence to signal",
 			ctx->dev_name, ctx->ctx_id);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		list_add_tail(&req->list, &ctx->free_req_list);
 		req->ctx = NULL;
 		spin_unlock(&ctx->lock);
@@ -219,6 +220,9 @@ int cam_context_buf_done_from_hw(struct cam_context *ctx,
 	 */
 	list_del_init(&req->list);
 	spin_unlock(&ctx->lock);
+
+	cam_smmu_buffer_tracker_putref(&req->buf_tracker);
+
 	if (evt_id == CAM_CTX_EVT_ID_SUCCESS)
 		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
 	else  if (evt_id == CAM_CTX_EVT_ID_CANCEL)
@@ -313,6 +317,7 @@ static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
 
 	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
 	if (rc) {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		spin_lock(&ctx->lock);
 		list_del_init(&req->list);
 		list_add_tail(&req->list, &ctx->free_req_list);
@@ -379,6 +384,7 @@ static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
 			req->flushed = 0;
 			req->ctx = NULL;
 			mutex_unlock(&ctx->sync_mutex);
+			cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 			spin_lock(&ctx->lock);
 			list_del_init(&req->list);
 			list_add_tail(&req->list, &ctx->free_req_list);
@@ -560,6 +566,11 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
 	cfg.priv = req->req_priv;
 	cfg.num_in_map_entries = 0;
 	cfg.num_out_map_entries = 0;
+	cfg.buf_tracker = &req->buf_tracker;
+	memset(req->out_map_entries, 0, sizeof(struct cam_hw_fence_map_entry)
+		* ctx->max_out_map_entries);
+
+	INIT_LIST_HEAD(cfg.buf_tracker);
 
 	rc = ctx->hw_mgr_intf->hw_prepare_update(
 		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -570,6 +581,7 @@ int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
 		rc = -EFAULT;
 		goto free_req;
 	}
+
 	req->num_hw_update_entries = cfg.num_hw_update_entries;
 	req->num_out_map_entries = cfg.num_out_map_entries;
 	req->num_in_map_entries = cfg.num_in_map_entries;
@@ -662,6 +674,7 @@ put_ref:
 				req->out_map_entries[i].sync_id);
 	}
 free_req:
+	cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 	spin_lock(&ctx->lock);
 	list_add_tail(&req->list, &ctx->free_req_list);
 	req->ctx = NULL;
@@ -835,6 +848,7 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 		list_del_init(&req->list);
 		req->flushed = 1;
 
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		flush_args.flush_req_pending[flush_args.num_req_pending++] =
 			req->req_priv;
 
@@ -934,6 +948,7 @@ int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
 			struct cam_ctx_request, list);
 		list_del_init(&req->list);
 
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		for (i = 0; i < req->num_out_map_entries; i++) {
 			if (req->out_map_entries[i].sync_id != -1) {
 				rc = cam_sync_signal(
@@ -1072,6 +1087,8 @@ int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
 		}
 
 		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			cam_smmu_buffer_tracker_putref(&req->buf_tracker);
+
 			for (i = 0; i < req->num_out_map_entries; i++) {
 				sync_id =
 					req->out_map_entries[i].sync_id;

+ 28 - 20
drivers/cam_core/cam_hw_mgr_intf.h

@@ -104,12 +104,18 @@ struct cam_hw_update_entry {
  * @resrouce_handle:       Resource port id for the buffer
  * @sync_id:               Sync id
  * @image_buf_addr:        Image buffer address array
+ * @buffer_tracker:        Some buffers with fences have buf dones come
+ *                         separately from each out port, and signalled
+ *                         independently. Ref counting needs to be handled
+ *                         independently as well corresponding to individual
+ *                         buf dones.
  *
  */
 struct cam_hw_fence_map_entry {
-	uint32_t           resource_handle;
-	int32_t            sync_id;
-	dma_addr_t         image_buf_addr[CAM_PACKET_MAX_PLANES];
+	uint32_t                        resource_handle;
+	int32_t                         sync_id;
+	dma_addr_t                      image_buf_addr[CAM_PACKET_MAX_PLANES];
+	struct cam_smmu_buffer_tracker *buffer_tracker;
 };
 
 /**
@@ -257,27 +263,29 @@ struct cam_hw_mgr_pf_request_info {
  * @reg_dump_buf_desc:     cmd buffer descriptors for reg dump
  * @num_reg_dump_buf:      Count of descriptors in reg_dump_buf_desc
  * @priv:                  Private pointer of hw update
+ * @buf_tracker:           Ptr to list of buffers we want to keep ref counts on
  * @pf_data:               Debug data for page fault
  *
  */
 struct cam_hw_prepare_update_args {
-	struct cam_packet                      *packet;
-	size_t                                  remain_len;
-	void                                   *ctxt_to_hw_map;
-	uint32_t                                max_hw_update_entries;
-	struct cam_hw_update_entry             *hw_update_entries;
-	uint32_t                                num_hw_update_entries;
-	uint32_t                                max_out_map_entries;
-	struct cam_hw_fence_map_entry          *out_map_entries;
-	uint32_t                                num_out_map_entries;
-	uint32_t                                max_in_map_entries;
-	struct cam_hw_fence_map_entry          *in_map_entries;
-	uint32_t                                num_in_map_entries;
-	struct cam_cmd_buf_desc                 reg_dump_buf_desc[
-						CAM_REG_DUMP_MAX_BUF_ENTRIES];
-	uint32_t                                num_reg_dump_buf;
-	void                                   *priv;
-	struct cam_hw_mgr_pf_request_info      *pf_data;
+	struct cam_packet              *packet;
+	size_t                          remain_len;
+	void                           *ctxt_to_hw_map;
+	uint32_t                        max_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	uint32_t                        num_hw_update_entries;
+	uint32_t                        max_out_map_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	uint32_t                        max_in_map_entries;
+	struct cam_hw_fence_map_entry  *in_map_entries;
+	uint32_t                        num_in_map_entries;
+	struct cam_cmd_buf_desc         reg_dump_buf_desc[
+					CAM_REG_DUMP_MAX_BUF_ENTRIES];
+	uint32_t                        num_reg_dump_buf;
+	void                           *priv;
+	struct list_head                   *buf_tracker;
+	struct cam_hw_mgr_pf_request_info  *pf_data;
 };
 
 /**

+ 7 - 6
drivers/cam_cre/cam_cre_hw_mgr/cam_cre_hw_mgr.c

@@ -121,7 +121,7 @@ static void cam_cre_free_io_config(struct cam_cre_request *req)
 
 static int cam_cre_mgr_process_cmd_io_buf_req(struct cam_cre_hw_mgr *hw_mgr,
 	struct cam_packet *packet, struct cam_cre_ctx *ctx_data,
-	uint32_t req_idx)
+	uint32_t req_idx, struct list_head *buf_tracker)
 {
 	int rc = 0;
 	int i, j, k;
@@ -179,16 +179,17 @@ static int cam_cre_mgr_process_cmd_io_buf_req(struct cam_cre_hw_mgr *hw_mgr,
 			for (k = 0; k < io_buf->num_planes; k++) {
 				is_secure = cam_mem_is_secure_buf(
 					io_cfg_ptr[j].mem_handle[k]);
+
 				if (is_secure)
 					rc = cam_mem_get_io_buf(
 						io_cfg_ptr[j].mem_handle[k],
 						hw_mgr->iommu_sec_hdl,
-						&iova_addr, &len, NULL);
+						&iova_addr, &len, NULL, buf_tracker);
 				else
 					rc = cam_mem_get_io_buf(
 						io_cfg_ptr[j].mem_handle[k],
 						hw_mgr->iommu_hdl,
-						&iova_addr, &len, NULL);
+						&iova_addr, &len, NULL, buf_tracker);
 
 				if (rc) {
 					CAM_ERR(CAM_CRE, "get buf failed: %d",
@@ -1337,7 +1338,7 @@ static int cam_cre_mgr_process_io_cfg(struct cam_cre_hw_mgr *hw_mgr,
 	struct cam_cre_request *cre_request;
 
 	rc = cam_cre_mgr_process_cmd_io_buf_req(hw_mgr, packet, ctx_data,
-		req_idx);
+		req_idx, prep_arg->buf_tracker);
 	if (rc) {
 		CAM_ERR(CAM_CRE, "Process CRE cmd io request is failed: %d",
 			rc);
@@ -2223,8 +2224,8 @@ static int cam_cre_mgr_prepare_hw_update(void *hw_priv,
 		return -EINVAL;
 	}
 
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
-			hw_mgr->iommu_sec_hdl, true);
+	rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker,
+			hw_mgr->iommu_hdl, hw_mgr->iommu_sec_hdl, true);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		CAM_ERR(CAM_CRE, "Patch processing failed %d", rc);

+ 1 - 1
drivers/cam_cust/cam_custom_hw_mgr/cam_custom_hw_mgr.c

@@ -1183,7 +1183,7 @@ static int cam_custom_add_io_buffers(
 					io_cfg[i].mem_handle[plane_id],
 					iommu_hdl,
 					&prepare_hw_data->io_addr[plane_id],
-					&size, NULL);
+					&size, NULL, NULL);
 				if (rc) {
 					CAM_ERR(CAM_CUSTOM,
 						"No io addr for plane: %d",

+ 3 - 2
drivers/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c

@@ -597,7 +597,8 @@ static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
 			if (need_io_map) {
 				rc = cam_mem_get_io_buf(
 					io_cfg[i].mem_handle[plane],
-					iommu_hdl, &io_addr[plane], &size, NULL);
+					iommu_hdl, &io_addr[plane], &size, NULL,
+					prepare->buf_tracker);
 				if (rc) {
 					CAM_ERR(CAM_FD,
 						"Failed to get io buf %u %u %u %d",
@@ -1774,7 +1775,7 @@ static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
 		kmd_buf.size, kmd_buf.used_bytes);
 
 	/* We do not expect any patching, but just do it anyway */
-	rc = cam_packet_util_process_patches(prepare->packet,
+	rc = cam_packet_util_process_patches(prepare->packet, prepare->buf_tracker,
 		hw_mgr->device_iommu.non_secure, -1, false);
 	if (rc) {
 		CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);

+ 11 - 10
drivers/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c

@@ -5452,7 +5452,7 @@ static int cam_icp_mgr_pkt_validation(struct cam_icp_hw_ctx_data *ctx_data,
 
 static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
 	struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data,
-	uint32_t *fw_cmd_buf_iova_addr)
+	uint32_t *fw_cmd_buf_iova_addr, struct list_head *buf_tracker)
 {
 	int rc = 0;
 	int i;
@@ -5471,12 +5471,13 @@ static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
 	for (i = 0; i < packet->num_cmd_buf; i++) {
 		if (cmd_desc[i].type == CAM_CMD_BUF_FW) {
 			rc = cam_mem_get_io_buf(cmd_desc[i].mem_handle,
-				hw_mgr->iommu_hdl, &addr, &len, NULL);
+				hw_mgr->iommu_hdl, &addr, &len, NULL, buf_tracker);
 			if (rc) {
 				CAM_ERR(CAM_ICP, "%s: get cmd buf failed %x",
 					ctx_data->ctx_id_string, hw_mgr->iommu_hdl);
 				return rc;
 			}
+
 			/* FW buffers are expected to be within 32-bit address range */
 			*fw_cmd_buf_iova_addr = addr;
 
@@ -5624,7 +5625,7 @@ static int cam_icp_process_stream_settings(
 	for (i = 0; i < cmd_mem_regions->num_regions; i++) {
 		rc = cam_mem_get_io_buf(
 			cmd_mem_regions->map_info_array[i].mem_handle,
-			hw_mgr->iommu_hdl, &iova, &len, NULL);
+			hw_mgr->iommu_hdl, &iova, &len, NULL, NULL);
 		if (rc) {
 			CAM_ERR(CAM_ICP,
 				"%s: Failed to get cmd region iova for handle %u",
@@ -5909,7 +5910,7 @@ static int cam_icp_packet_generic_blob_handler(void *user_data,
 		CAM_DBG(CAM_ICP, "%s: buf handle %d", ctx_data->ctx_id_string,
 			dev_io_info.io_config_cmd_handle);
 		rc = cam_mem_get_io_buf(dev_io_info.io_config_cmd_handle, hw_mgr->iommu_hdl,
-			blob->io_buf_addr, &io_buf_size, NULL);
+			blob->io_buf_addr, &io_buf_size, NULL, NULL);
 		if (rc)
 			CAM_ERR(CAM_ICP, "%s: Failed in blob update", ctx_data->ctx_id_string);
 		else
@@ -6173,7 +6174,7 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
 	}
 
 	rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
-		ctx_data, &fw_cmd_buf_iova_addr);
+		ctx_data, &fw_cmd_buf_iova_addr, prepare_args->buf_tracker);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
@@ -6182,8 +6183,8 @@ static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
 	CAM_DBG(CAM_REQ, "%s: req id = %lld", ctx_data->ctx_id_string,
 		packet->header.request_id);
 	/* Update Buffer Address from handles and patch information */
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
-		hw_mgr->iommu_sec_hdl, true);
+	rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker,
+		hw_mgr->iommu_hdl, hw_mgr->iommu_sec_hdl, true);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		return rc;
@@ -6646,7 +6647,7 @@ static int cam_icp_mgr_synx_send_test_cmd(
 	synx_test_cmd.size = sizeof(synx_test_cmd);
 
 	rc = cam_mem_get_io_buf(synx_test_params->ip_mem_hdl, hw_mgr->iommu_hdl,
-		&iova, &size, NULL);
+		&iova, &size, NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "Failed to get buf for hdl: %d rc: %d",
 			synx_test_params->ip_mem_hdl, rc);
@@ -6657,7 +6658,7 @@ static int cam_icp_mgr_synx_send_test_cmd(
 	synx_test_cmd.input_size = (uint32_t)size;
 
 	rc = cam_mem_get_io_buf(synx_test_params->op_mem_hdl, hw_mgr->iommu_hdl,
-		&iova, &size, NULL);
+		&iova, &size, NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "Failed to get buf for hdl: %d rc: %d",
 			synx_test_params->ip_mem_hdl, rc);
@@ -7165,7 +7166,7 @@ static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
 	rc = cam_mem_get_io_buf(
 		icp_dev_acquire_info->io_config_cmd_handle,
 		hw_mgr->iommu_hdl,
-		&io_buf_addr, &io_buf_size, NULL);
+		&io_buf_addr, &io_buf_size, NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_ICP, "%s: unable to get src buf info from io desc",
 			ctx_data->ctx_id_string);

+ 37 - 1
drivers/cam_isp/cam_isp_context.c

@@ -1143,6 +1143,7 @@ static int __cam_isp_ctx_enqueue_init_request(
 					req_isp_new->hw_update_data.num_exp;
 			}
 			req_old->request_id = req->request_id;
+			list_splice_init(&req->buf_tracker, &req_old->buf_tracker);
 
 			list_add_tail(&req->list, &ctx->free_req_list);
 		}
@@ -1713,6 +1714,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 		ctx_isp->bubble_frame_cnt = 0;
 
 		if (buf_done_req_id <= ctx->last_flush_req) {
+			cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 			for (i = 0; i < req_isp->num_fence_map_out; i++)
 				rc = cam_sync_signal(
 					req_isp->fence_map_out[i].sync_id,
@@ -1742,6 +1744,7 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 			}
 		}
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		list_add_tail(&req->list, &ctx->free_req_list);
 		req_isp->reapply_type = CAM_CONFIG_REAPPLY_NONE;
 		req_isp->cdm_reset_before_apply = false;
@@ -1757,7 +1760,6 @@ static int __cam_isp_ctx_handle_buf_done_for_req_list(
 			ctx_isp->bubble_frame_cnt = 0;
 			req_isp->bubble_detected = false;
 		}
-
 		CAM_DBG(CAM_REQ,
 			"Move active request %lld to free list(cnt = %d) [all fences done], ctx %u link: 0x%x",
 			buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl);
@@ -1897,6 +1899,9 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
 				req_isp->fence_map_out[j].sync_id,
 				ctx->ctx_id, ctx->link_hdl);
 
+			cam_smmu_buffer_tracker_buffer_putref(
+				req_isp->fence_map_out[j].buffer_tracker);
+
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS,
 				CAM_SYNC_COMMON_EVENT_SUCCESS);
@@ -1911,6 +1916,9 @@ static int __cam_isp_ctx_handle_buf_done_for_request(
 				req_isp->fence_map_out[j].sync_id,
 				ctx->ctx_id, ctx->link_hdl);
 
+			cam_smmu_buffer_tracker_buffer_putref(
+				req_isp->fence_map_out[j].buffer_tracker);
+
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR,
 				CAM_SYNC_ISP_EVENT_BUBBLE);
@@ -2218,6 +2226,9 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 				req_isp->fence_map_out[j].sync_id,
 				ctx->ctx_id, ctx->link_hdl);
 
+			cam_smmu_buffer_tracker_buffer_putref(
+				req_isp->fence_map_out[j].buffer_tracker);
+
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_SUCCESS,
 				CAM_SYNC_COMMON_EVENT_SUCCESS);
@@ -2236,6 +2247,7 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 			/* Reset fence */
 			req_isp->fence_map_out[j].sync_id = -1;
 		} else if (!req_isp->bubble_report) {
+
 			CAM_DBG(CAM_ISP,
 				"Sync with failure: req %lld res 0x%x fd 0x%x, ctx:%u link[0x%x]",
 				req->request_id,
@@ -2243,6 +2255,9 @@ static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
 				req_isp->fence_map_out[j].sync_id,
 				ctx->ctx_id, ctx->link_hdl);
 
+			cam_smmu_buffer_tracker_buffer_putref(
+				req_isp->fence_map_out[j].buffer_tracker);
+
 			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
 				CAM_SYNC_STATE_SIGNALED_ERROR,
 				CAM_SYNC_ISP_EVENT_BUBBLE);
@@ -2878,6 +2893,7 @@ static int __cam_isp_ctx_reg_upd_in_applied_state(
 		__cam_isp_ctx_update_event_record(ctx_isp,
 			CAM_ISP_CTX_EVENT_RUP, req);
 	} else {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		/* no io config, so the request is completed. */
 		list_add_tail(&req->list, &ctx->free_req_list);
 		CAM_DBG(CAM_ISP,
@@ -3139,6 +3155,7 @@ static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
 		req = list_first_entry(&ctx->wait_req_list,
 			struct cam_ctx_request, list);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		if (req_isp->num_fence_map_out == req_isp->num_acked)
 			list_add_tail(&req->list, &ctx->free_req_list);
@@ -3866,6 +3883,7 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
 
 	list_for_each_entry_safe(req, req_temp,
 		&ctx->active_req_list, list) {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		if (!req_isp->bubble_report) {
 			CAM_ERR(CAM_ISP, "signalled error for req %llu, ctx:%u on link 0x%x",
@@ -3900,6 +3918,7 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
 
 	list_for_each_entry_safe(req, req_temp,
 		&ctx->wait_req_list, list) {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		if (!req_isp->bubble_report) {
 			CAM_ERR(CAM_ISP, "signalled error for req %llu, ctx %u, link 0x%x",
@@ -3961,6 +3980,7 @@ end:
 		}
 		req = list_first_entry(&ctx->pending_req_list,
 			struct cam_ctx_request, list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		error_request_id = ctx_isp->last_applied_req_id;
 
@@ -4132,6 +4152,7 @@ static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
 		req = list_first_entry(&ctx->wait_req_list,
 			struct cam_ctx_request, list);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		if (req_isp->num_fence_map_out == req_isp->num_acked)
 			list_add_tail(&req->list, &ctx->free_req_list);
@@ -4174,6 +4195,7 @@ static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
 		CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx:%u,link:0x%x",
 			 req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id, ctx->link_hdl);
 	} else {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		/* no io config, so the request is completed. */
 		list_add_tail(&req->list, &ctx->free_req_list);
 	}
@@ -5463,6 +5485,8 @@ static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
 	}
 
 	list_for_each_entry_safe(req, req_temp, &flush_list, list) {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
+
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		for (i = 0; i < req_isp->num_fence_map_out; i++) {
 			if (req_isp->fence_map_out[i].sync_id != -1) {
@@ -6001,6 +6025,7 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
 		req = list_first_entry(&ctx->active_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d, ctx %u link: 0x%x",
 			req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl);
@@ -6092,6 +6117,7 @@ static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
 		/* if packet has buffers, set correct request id */
 		request_id = req->request_id;
 	} else {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		/* no io config, so the request is completed. */
 		list_add_tail(&req->list, &ctx->free_req_list);
 		CAM_DBG(CAM_ISP,
@@ -6582,7 +6608,12 @@ static int __cam_isp_ctx_config_dev_in_top_state(
 	cfg.pf_data = &(req->pf_data);
 	cfg.num_out_map_entries = 0;
 	cfg.num_in_map_entries = 0;
+	cfg.buf_tracker = &req->buf_tracker;
 	memset(&req_isp->hw_update_data, 0, sizeof(req_isp->hw_update_data));
+	memset(req_isp->fence_map_out, 0, sizeof(struct cam_hw_fence_map_entry)
+		* ctx->max_out_map_entries);
+
+	INIT_LIST_HEAD(cfg.buf_tracker);
 
 	rc = ctx->hw_mgr_intf->hw_prepare_update(
 		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
@@ -6698,6 +6729,7 @@ put_ref:
 				req_isp->fence_map_out[i].sync_id, ctx->ctx_id, ctx->link_hdl);
 	}
 free_req:
+	cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 	spin_lock_bh(&ctx->lock);
 	list_add_tail(&req->list, &ctx->free_req_list);
 	spin_unlock_bh(&ctx->lock);
@@ -7713,6 +7745,7 @@ static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
 	list_del_init(&req->list);
 
 	if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		list_add_tail(&req->list, &ctx->free_req_list);
 		atomic_set(&ctx_isp->rxd_epoch, 1);
 		CAM_DBG(CAM_REQ,
@@ -7824,6 +7857,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 		req = list_first_entry(&ctx->pending_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d ctx:%u, link: 0x%x",
 			 req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl);
@@ -7841,6 +7875,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 		req = list_first_entry(&ctx->wait_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d ctx: %u, link: 0x%x",
 			 req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl);
@@ -7858,6 +7893,7 @@ static int __cam_isp_ctx_stop_dev_in_activated_unlock(
 		req = list_first_entry(&ctx->active_req_list,
 				struct cam_ctx_request, list);
 		list_del_init(&req->list);
+		cam_smmu_buffer_tracker_putref(&req->buf_tracker);
 		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
 		CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d ctx: %u, link: 0x%x",
 			 req_isp->num_fence_map_out, ctx->ctx_id, ctx->link_hdl);

+ 9 - 8
drivers/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c

@@ -8155,7 +8155,7 @@ static int cam_isp_scratch_buf_update_util(
 	}
 
 	rc = cam_mem_get_io_buf(buffer_info->mem_handle,
-		mmu_hdl, &io_addr, &size, NULL);
+		mmu_hdl, &io_addr, &size, NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_ISP,
 			"no scratch buf addr for res: 0x%x",
@@ -10854,7 +10854,8 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
 
 static int cam_ife_mgr_util_insert_frame_header(
 	struct cam_kmd_buf_info *kmd_buf,
-	struct cam_isp_prepare_hw_update_data *prepare_hw_data)
+	struct cam_isp_prepare_hw_update_data *prepare_hw_data,
+	struct list_head *buf_tracker)
 {
 	int mmu_hdl = -1, rc = 0;
 	dma_addr_t iova_addr;
@@ -10868,7 +10869,7 @@ static int cam_ife_mgr_util_insert_frame_header(
 			hw_mgr->mgr_common.img_iommu_hdl;
 
 	rc = cam_mem_get_io_buf(kmd_buf->handle, mmu_hdl,
-		&iova_addr, &len, NULL);
+		&iova_addr, &len, NULL, buf_tracker);
 	if (rc) {
 		CAM_ERR(CAM_ISP,
 			"Failed to get io addr for handle = %d for mmu_hdl = %u",
@@ -12087,7 +12088,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 	struct list_head                        *res_list_ife_rd_tmp = NULL;
 	struct cam_isp_cmd_buf_count             cmd_buf_count = {0};
 	struct cam_isp_check_io_cfg_for_scratch  check_for_scratch = {0};
-	struct cam_isp_io_buf_info          io_buf_info = {0};
+	struct cam_isp_io_buf_info               io_buf_info = {0};
 
 	if (!hw_mgr_priv || !prepare_hw_update_args) {
 		CAM_ERR(CAM_ISP, "Invalid args");
@@ -12099,7 +12100,6 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 	ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
 	hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
 
-
 	CAM_DBG(CAM_REQ, "ctx[%pK][%u] Enter for req_id %lld",
 		ctx, ctx->ctx_index, prepare->packet->header.request_id);
 
@@ -12114,7 +12114,7 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 
 	if (ctx->ctx_config & CAM_IFE_CTX_CFG_FRAME_HEADER_TS) {
 		rc = cam_ife_mgr_util_insert_frame_header(&prepare_hw_data->kmd_cmd_buff_info,
-			prepare_hw_data);
+			prepare_hw_data, prepare->buf_tracker);
 		if (rc)
 			return rc;
 
@@ -12124,11 +12124,11 @@ static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
 
 	if (ctx->flags.internal_cdm)
 		rc = cam_packet_util_process_patches(prepare->packet,
-			hw_mgr->mgr_common.img_iommu_hdl,
+			prepare->buf_tracker, hw_mgr->mgr_common.img_iommu_hdl,
 			hw_mgr->mgr_common.img_iommu_hdl_secure, true);
 	else
 		rc = cam_packet_util_process_patches(prepare->packet,
-			hw_mgr->mgr_common.cmd_iommu_hdl,
+			prepare->buf_tracker, hw_mgr->mgr_common.cmd_iommu_hdl,
 			hw_mgr->mgr_common.cmd_iommu_hdl_secure, true);
 
 	if (rc) {
@@ -15672,6 +15672,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl,
 		CAM_ERR(CAM_ISP, "Unable to create worker, ctx_idx: %u", ctx_pool->ctx_index);
 		goto end;
 	}
+
 	/* Populate sys cache info */
 	rc = cam_ife_mgr_populate_sys_cache_id();
 	if (rc == -EFAULT) {

+ 2 - 2
drivers/cam_isp/isp_hw_mgr/cam_tfe_hw_mgr.c

@@ -2768,7 +2768,7 @@ static int cam_tfe_mgr_config_hw(void *hw_mgr_priv,
 				cdm_cmd->cmd[i].bl_addr.mem_handle,
 				g_tfe_hw_mgr.mgr_common.cmd_iommu_hdl,
 				&ctx->last_submit_bl_cmd.cmd[i].hw_addr,
-				&ctx->last_submit_bl_cmd.cmd[i].len, NULL);
+				&ctx->last_submit_bl_cmd.cmd[i].len, NULL, NULL);
 		} else if (cdm_cmd->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
 			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
 				CAM_ERR(CAM_CDM, "Submitted Hw bl hw_iova is invalid %d:%d",
@@ -4411,7 +4411,7 @@ static int cam_tfe_mgr_prepare_hw_update(void *hw_mgr_priv,
 		return rc;
 
 	rc = cam_packet_util_process_patches(prepare->packet,
-		hw_mgr->mgr_common.cmd_iommu_hdl,
+		prepare->track_buf_list, hw_mgr->mgr_common.cmd_iommu_hdl,
 		hw_mgr->mgr_common.cmd_iommu_hdl_secure, false);
 	if (rc) {
 		CAM_ERR(CAM_ISP, "Patch ISP packet failed.");

+ 17 - 1
drivers/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c

@@ -839,6 +839,7 @@ static int cam_isp_add_io_buffers_util(
 	struct cam_isp_hw_get_cmd_update    update_buf;
 	struct cam_isp_hw_get_wm_update     bus_port_update;
 	struct cam_hw_fence_map_entry      *out_map_entry = NULL;
+	struct cam_smmu_buffer_tracker     *old_head_entry, *new_head_entry;
 	uint32_t                            kmd_buf_remain_size;
 	uint32_t                            plane_id, num_entries;
 	dma_addr_t                         *image_buf_addr;
@@ -873,6 +874,9 @@ static int cam_isp_add_io_buffers_util(
 		return -EINVAL;
 	}
 
+	old_head_entry = list_first_entry_or_null(buf_info->prepare->buf_tracker,
+		struct cam_smmu_buffer_tracker, list);
+
 	memset(io_addr, 0, sizeof(io_addr));
 	for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES; plane_id++) {
 		if (!io_cfg->mem_handle[plane_id])
@@ -905,7 +909,8 @@ static int cam_isp_add_io_buffers_util(
 		}
 
 		rc = cam_mem_get_io_buf(io_cfg->mem_handle[plane_id],
-			mmu_hdl, &io_addr[plane_id], &size, NULL);
+			mmu_hdl, &io_addr[plane_id], &size, NULL,
+			(!plane_id) ? buf_info->prepare->buf_tracker : NULL);
 		if (rc) {
 			CAM_ERR(CAM_ISP, "no io addr for plane%d", plane_id);
 			rc = -ENOMEM;
@@ -1009,6 +1014,17 @@ static int cam_isp_add_io_buffers_util(
 					image_buf_addr[plane_id] = io_addr[plane_id] +
 						image_buf_offset[plane_id];
 			}
+
+			new_head_entry =
+				list_first_entry_or_null(buf_info->prepare->buf_tracker,
+					struct cam_smmu_buffer_tracker, list);
+			if (new_head_entry && old_head_entry != new_head_entry) {
+				out_map_entry->buffer_tracker = new_head_entry;
+				CAM_DBG(CAM_ISP,
+					"[SMMU_BT] Tracking io_buf, buf_handle: 0x%x, fd: 0x%x, res_id: %d",
+					io_cfg->mem_handle[0],
+					out_map_entry->buffer_tracker->ion_fd, res->res_id);
+			}
 		}
 	}
 

+ 2 - 1
drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c

@@ -1033,7 +1033,8 @@ static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
 		return -EINVAL;
 	}
 
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl, -1, false);
+	rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker,
+		hw_mgr->iommu_hdl, -1, false);
 	if (rc) {
 		CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
 		return rc;

+ 5 - 4
drivers/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -146,7 +146,8 @@ static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet,
 static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
 	struct cam_hw_prepare_update_args *prepare,
 	struct cam_lrme_hw_io_buffer *input_buf,
-	struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size)
+	struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size,
+	struct list_head *buf_tracker)
 {
 	int rc = -EINVAL;
 	uint32_t num_in_buf, num_out_buf, i, j, plane;
@@ -173,7 +174,7 @@ static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
 				break;
 
 			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
-				iommu_hdl, &io_addr[plane], &size, NULL);
+				iommu_hdl, &io_addr[plane], &size, NULL, buf_tracker);
 			if (rc) {
 				CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d",
 					plane, rc);
@@ -930,7 +931,7 @@ static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv,
 	rc = cam_lrme_mgr_util_prepare_io_buffer(
 		hw_mgr->device_iommu.non_secure, args,
 		config_args.input_buf, config_args.output_buf,
-		CAM_LRME_MAX_IO_BUFFER);
+		CAM_LRME_MAX_IO_BUFFER, args->buf_tracker);
 	if (rc) {
 		CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc);
 		goto error;

+ 24 - 16
drivers/cam_ope/ope_hw_mgr/cam_ope_hw_mgr.c

@@ -355,7 +355,7 @@ static int cam_ope_dump_hang_patches(struct cam_packet *packet,
 
 	for (i = 0; i < packet->num_patches; i++) {
 		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
-			iommu_hdl, &iova_addr, &src_buf_size, NULL);
+			iommu_hdl, &iova_addr, &src_buf_size, NULL, NULL);
 		if (rc < 0) {
 			CAM_ERR(CAM_UTIL,
 				"get src buf address failed for handle 0x%x",
@@ -381,7 +381,7 @@ static int cam_ope_dump_direct(struct ope_cmd_buf_info *cmd_buf_info,
 	int rc = 0;
 
 	rc = cam_mem_get_io_buf(cmd_buf_info->mem_handle,
-		ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL);
+		ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL, NULL);
 	if (rc < 0) {
 		CAM_ERR(CAM_UTIL, "get cmd buf addressfailed for handle 0x%x",
 			cmd_buf_info->mem_handle);
@@ -499,7 +499,7 @@ static int cam_ope_mgr_dump_frame_set(uintptr_t frame_process_addr,
 		for (i = 0; i < frame_process->frame_set[j].num_io_bufs; i++) {
 			io_buf = &frame_process->frame_set[j].io_buf[i];
 			rc = cam_mem_get_io_buf(io_buf->mem_handle[0],
-				ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL);
+				ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL, NULL);
 			if (rc) {
 				CAM_ERR(CAM_OPE, "get io buf fail 0x%x",
 					io_buf->mem_handle[0]);
@@ -573,7 +573,7 @@ static int cam_ope_dump_bls(struct cam_ope_request *ope_req,
 	cdm_cmd = ope_req->cdm_cmd;
 	for (i = 0; i < cdm_cmd->cmd_arrary_count; i++) {
 		rc = cam_mem_get_io_buf(cdm_cmd->cmd[i].bl_addr.mem_handle,
-				ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL);
+				ope_hw_mgr->iommu_hdl, &iova_addr, &size, NULL, NULL);
 		if (rc) {
 			CAM_ERR(CAM_OPE, "get io buf fail 0x%x",
 				cdm_cmd->cmd[i].bl_addr.mem_handle);
@@ -1918,7 +1918,8 @@ static void cam_ope_mgr_print_stripe_info(uint32_t batch,
 
 static int cam_ope_mgr_process_cmd_io_buf_req(struct cam_ope_hw_mgr *hw_mgr,
 	struct cam_packet *packet, struct cam_ope_ctx *ctx_data,
-	uintptr_t frame_process_addr, size_t length, struct cam_ope_request *ope_request)
+	uintptr_t frame_process_addr, size_t length, struct cam_ope_request *ope_request
+	struct list_head *buf_tracker)
 {
 	int rc = 0;
 	int i, j, k, l;
@@ -2029,12 +2030,12 @@ static int cam_ope_mgr_process_cmd_io_buf_req(struct cam_ope_hw_mgr *hw_mgr,
 					rc = cam_mem_get_io_buf(
 						in_io_buf->mem_handle[k],
 						hw_mgr->iommu_sec_hdl,
-						&iova_addr, &len, NULL);
+						&iova_addr, &len, NULL, buf_tracker);
 				else
 					rc = cam_mem_get_io_buf(
 						in_io_buf->mem_handle[k],
 						hw_mgr->iommu_hdl,
-						&iova_addr, &len, NULL);
+						&iova_addr, &len, NULL, buf_tracker);
 
 				if (rc) {
 					CAM_ERR(CAM_OPE, "get buf failed: %d",
@@ -2045,6 +2046,7 @@ static int cam_ope_mgr_process_cmd_io_buf_req(struct cam_ope_hw_mgr *hw_mgr,
 					CAM_ERR(CAM_OPE, "Invalid length");
 					return -EINVAL;
 				}
+
 				iova_addr += in_io_buf->plane_offset[k];
 				CAM_DBG(CAM_OPE,
 					"E rsc %d stripes %d dir %d plane %d",
@@ -2095,7 +2097,8 @@ static int cam_ope_mgr_process_cmd_io_buf_req(struct cam_ope_hw_mgr *hw_mgr,
 
 static int cam_ope_mgr_process_cmd_buf_req(struct cam_ope_hw_mgr *hw_mgr,
 	struct cam_packet *packet, struct cam_ope_ctx *ctx_data,
-	uintptr_t frame_process_addr, size_t length, struct cam_ope_request *ope_request)
+	uintptr_t frame_process_addr, size_t length, struct cam_ope_request *ope_request
+	struct list_head *buf_tracker)
 {
 	int rc = 0;
 	int i, j;
@@ -2152,21 +2155,25 @@ static int cam_ope_mgr_process_cmd_buf_req(struct cam_ope_hw_mgr *hw_mgr,
 			switch (cmd_buf->cmd_buf_scope) {
 			case OPE_CMD_BUF_SCOPE_FRAME: {
 				rc = cam_mem_get_io_buf(cmd_buf->mem_handle,
-					hw_mgr->iommu_hdl, &iova_addr, &len, NULL);
+					hw_mgr->iommu_hdl, &iova_addr, &len, NULL,
+						buf_tracker);
 				if (rc) {
 					CAM_ERR(CAM_OPE, "get cmd buffailed %x",
 						hw_mgr->iommu_hdl);
 					goto end;
 				}
+
 				iova_addr = iova_addr + cmd_buf->offset;
 
 				rc = cam_mem_get_io_buf(cmd_buf->mem_handle,
-					hw_mgr->iommu_cdm_hdl, &iova_cdm_addr, &len, NULL);
+					hw_mgr->iommu_cdm_hdl, &iova_cdm_addr, &len, NULL,
+					buf_tracker);
 				if (rc) {
 					CAM_ERR(CAM_OPE, "get cmd buffailed %x",
 						hw_mgr->iommu_hdl);
 					goto end;
 				}
+
 				iova_cdm_addr = iova_cdm_addr + cmd_buf->offset;
 
 				rc = cam_mem_get_cpu_buf(cmd_buf->mem_handle,
@@ -2261,7 +2268,8 @@ end:
 
 static int cam_ope_mgr_process_cmd_desc(struct cam_ope_hw_mgr *hw_mgr,
 	struct cam_packet *packet, struct cam_ope_ctx *ctx_data,
-	uintptr_t *ope_cmd_buf_addr, struct cam_ope_request *ope_request)
+	uintptr_t *ope_cmd_buf_addr, struct cam_ope_request *ope_request
+	struct list_head *buf_tracker)
 {
 	int rc = 0;
 	int i;
@@ -2305,14 +2313,14 @@ static int cam_ope_mgr_process_cmd_desc(struct cam_ope_hw_mgr *hw_mgr,
 	ope_request->request_id = packet->header.request_id;
 
 	rc = cam_ope_mgr_process_cmd_buf_req(hw_mgr, packet, ctx_data,
-		cpu_addr, len, ope_request);
+		cpu_addr, len, ope_request, buf_tracker);
 	if (rc) {
 		CAM_ERR(CAM_OPE, "Process OPE cmd request is failed: %d", rc);
 		goto end;
 	}
 
 	rc = cam_ope_mgr_process_cmd_io_buf_req(hw_mgr, packet, ctx_data,
-		cpu_addr, len, ope_request);
+		cpu_addr, len, ope_request, buf_tracker);
 	if (rc) {
 		CAM_ERR(CAM_OPE, "Process OPE cmd io request is failed: %d",
 			rc);
@@ -3251,8 +3259,8 @@ static int cam_ope_mgr_prepare_hw_update(void *hw_priv,
 		return -EINVAL;
 	}
 
-	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_cdm_hdl,
-		hw_mgr->iommu_sec_cdm_hdl, false);
+	rc = cam_packet_util_process_patches(packet, prepare_args->buf_tracker,
+		hw_mgr->iommu_cdm_hdl, hw_mgr->iommu_sec_cdm_hdl, false);
 	if (rc) {
 		mutex_unlock(&ctx_data->ctx_mutex);
 		CAM_ERR(CAM_OPE, "Patching failed: %d req_id: %d ctx: %d",
@@ -3291,7 +3299,7 @@ static int cam_ope_mgr_prepare_hw_update(void *hw_priv,
 	}
 
 	rc = cam_ope_mgr_process_cmd_desc(hw_mgr, packet,
-		ctx_data, &ope_cmd_buf_addr, ope_req);
+		ctx_data, &ope_cmd_buf_addr, ope_req, prepare_args->buf_tracker);
 	if (rc) {
 		CAM_ERR(CAM_OPE,
 			"cmd desc processing failed :%d ctx: %d req_id:%d",

+ 4 - 4
drivers/cam_ope/ope_hw_mgr/ope_hw/ope_core.c

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/of.h>
@@ -547,7 +547,7 @@ static uint32_t *ope_create_frame_cmd_batch(struct cam_ope_hw_mgr *hw_mgr,
 
 		rc = cam_mem_get_io_buf(
 			frm_proc->cmd_buf[i][j].mem_handle,
-			hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL);
+			hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL, NULL);
 		if (rc) {
 			CAM_ERR(CAM_OPE, "get cmd buf failed %x",
 				hw_mgr->iommu_hdl);
@@ -704,7 +704,7 @@ static uint32_t *ope_create_frame_cmd(struct cam_ope_hw_mgr *hw_mgr,
 
 			rc = cam_mem_get_io_buf(
 				frm_proc->cmd_buf[i][j].mem_handle,
-				hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL);
+				hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL, NULL);
 			if (rc) {
 				CAM_ERR(CAM_OPE, "get cmd buf failed %x",
 					hw_mgr->iommu_hdl);
@@ -819,7 +819,7 @@ static uint32_t *ope_create_stripe_cmd(struct cam_ope_hw_mgr *hw_mgr,
 
 		CAM_DBG(CAM_OPE, "process stripe %d", stripe_idx);
 		rc = cam_mem_get_io_buf(frm_proc->cmd_buf[i][k].mem_handle,
-			hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL);
+			hw_mgr->iommu_cdm_hdl, &iova_addr, &buf_len, NULL, NULL);
 		if (rc) {
 			CAM_DBG(CAM_OPE, "get cmd buf fail %x",
 				hw_mgr->iommu_hdl);

+ 40 - 23
drivers/cam_req_mgr/cam_mem_mgr.c

@@ -24,7 +24,6 @@
 #include "cam_trace.h"
 #include "cam_common_util.h"
 #include "cam_presil_hw_access.h"
-#include "cam_compat.h"
 
 #define CAM_MEM_SHARED_BUFFER_PAD_4K (4 * 1024)
 
@@ -362,7 +361,7 @@ static void cam_mem_put_slot(int32_t idx)
 static void cam_mem_mgr_update_iova_info_locked(
 	struct cam_mem_buf_hw_hdl_info *hw_vaddr_info_arr,
 	dma_addr_t vaddr, int32_t iommu_hdl, size_t len,
-	bool valid_mapping)
+	bool valid_mapping, struct kref *ref_count)
 {
 	int entry;
 	struct cam_mem_buf_hw_hdl_info *vaddr_entry;
@@ -378,11 +377,13 @@ static void cam_mem_mgr_update_iova_info_locked(
 	vaddr_entry->addr_updated = true;
 	vaddr_entry->valid_mapping = valid_mapping;
 	vaddr_entry->len = len;
+	vaddr_entry->ref_count = ref_count;
 }
 
 /* Utility to be invoked with bufq entry lock held */
 static int cam_mem_mgr_try_retrieving_hwva_locked(
-	int idx, int32_t mmu_handle, dma_addr_t *iova_ptr, size_t *len_ptr)
+	int idx, int32_t mmu_handle, dma_addr_t *iova_ptr, size_t *len_ptr,
+	struct list_head *buf_tracker)
 {
 	int rc = -EINVAL, entry;
 	struct cam_mem_buf_hw_hdl_info *hdl_info = NULL;
@@ -395,6 +396,10 @@ static int cam_mem_mgr_try_retrieving_hwva_locked(
 		if ((hdl_info->iommu_hdl == mmu_handle) && (hdl_info->addr_updated)) {
 			*iova_ptr = hdl_info->vaddr;
 			*len_ptr = hdl_info->len;
+			if (buf_tracker)
+				cam_smmu_add_buf_to_track_list(tbl.bufq[idx].fd,
+					tbl.bufq[idx].i_ino, &hdl_info->ref_count, buf_tracker,
+					GET_SMMU_TABLE_IDX(mmu_handle));
 			rc = 0;
 		}
 	}
@@ -403,10 +408,12 @@ static int cam_mem_mgr_try_retrieving_hwva_locked(
 }
 
 int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
-	dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags)
+	dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags,
+	struct list_head *buf_tracker)
 {
 	int rc = 0, idx;
 	bool retrieved_iova = false;
+	struct kref *ref_count;
 
 	*len_ptr = 0;
 
@@ -435,18 +442,19 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
 		*flags = tbl.bufq[idx].flags;
 
 	/* Try retrieving iova if saved previously */
-	rc = cam_mem_mgr_try_retrieving_hwva_locked(idx, mmu_handle, iova_ptr, len_ptr);
+	rc = cam_mem_mgr_try_retrieving_hwva_locked(idx, mmu_handle, iova_ptr, len_ptr,
+		buf_tracker);
 	if (!rc) {
 		retrieved_iova = true;
 		goto end;
 	}
 
 	if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
-		rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd,
-			tbl.bufq[idx].dma_buf, iova_ptr, len_ptr);
+		rc = cam_smmu_get_stage2_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
+			iova_ptr, len_ptr, buf_tracker, &ref_count);
 	else
 		rc = cam_smmu_get_iova(mmu_handle, tbl.bufq[idx].fd, tbl.bufq[idx].dma_buf,
-			iova_ptr, len_ptr);
+			iova_ptr, len_ptr, buf_tracker, &ref_count);
 
 	if (rc) {
 		CAM_ERR(CAM_MEM,
@@ -457,7 +465,7 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
 
 	/* Save iova in bufq for future use */
 	cam_mem_mgr_update_iova_info_locked(tbl.bufq[idx].hdls_info,
-		*iova_ptr, mmu_handle, *len_ptr, false);
+		*iova_ptr, mmu_handle, *len_ptr, false, ref_count);
 
 end:
 	CAM_DBG(CAM_MEM,
@@ -1158,6 +1166,7 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
 	int dir = cam_mem_util_get_dma_dir(flags);
 	bool dis_delayed_unmap = false;
 	dma_addr_t hw_vaddr;
+	struct kref *ref_count;
 
 	if (dir < 0) {
 		CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
@@ -1180,10 +1189,11 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
 			region = CAM_SMMU_REGION_SHARED;
 
 		if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
-			rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dmabuf, dir, &hw_vaddr, len);
+			rc = cam_smmu_map_stage2_iova(mmu_hdls[i], fd, dmabuf, dir, &hw_vaddr, len,
+				&ref_count);
 		else
 			rc = cam_smmu_map_user_iova(mmu_hdls[i], fd, dmabuf, dis_delayed_unmap, dir,
-				&hw_vaddr, len, region, is_internal);
+				&hw_vaddr, len, region, is_internal, &ref_count);
 		if (rc) {
 			CAM_ERR(CAM_MEM,
 					"Failed %s map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
@@ -1194,16 +1204,17 @@ static int cam_mem_util_map_hw_va(uint32_t flags,
 
 		/* cache hw va */
 		cam_mem_mgr_update_iova_info_locked(hw_vaddr_info_arr,
-			hw_vaddr, mmu_hdls[i], *len, true);
+			hw_vaddr, mmu_hdls[i], *len, true, ref_count);
 	}
 
 	return rc;
 multi_map_fail:
 	for (--i; i>= 0; i--) {
 		if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
-			cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dmabuf);
+			cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd, dmabuf, false);
 		else
-			cam_smmu_unmap_user_iova(mmu_hdls[i], fd, dmabuf, CAM_SMMU_REGION_IO);
+			cam_smmu_unmap_user_iova(mmu_hdls[i], fd, dmabuf, CAM_SMMU_REGION_IO,
+				false);
 	}
 
 	/* reset any updated entries */
@@ -1497,7 +1508,7 @@ slot_fail:
 
 static int cam_mem_util_unmap_hw_va(int32_t idx,
 	enum cam_smmu_region_id region,
-	enum cam_smmu_mapping_client client)
+	enum cam_smmu_mapping_client client, bool force_unmap)
 {
 	int i, fd, num_hdls;
 	uint32_t flags;
@@ -1533,9 +1544,11 @@ static int cam_mem_util_unmap_hw_va(int32_t idx,
 		hdl_info = &tbl.bufq[idx].hdls_info[i];
 
 		if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
-			rc = cam_smmu_unmap_stage2_iova(hdl_info->iommu_hdl, fd, dma_buf);
+			rc = cam_smmu_unmap_stage2_iova(hdl_info->iommu_hdl, fd, dma_buf,
+				force_unmap);
 		else if (client == CAM_SMMU_MAPPING_USER)
-			rc = cam_smmu_unmap_user_iova(hdl_info->iommu_hdl, fd, dma_buf, region);
+			rc = cam_smmu_unmap_user_iova(hdl_info->iommu_hdl, fd, dma_buf, region,
+				force_unmap);
 		else if (client == CAM_SMMU_MAPPING_KERNEL)
 			rc = cam_smmu_unmap_kernel_iova(hdl_info->iommu_hdl,
 				tbl.bufq[idx].dma_buf, region);
@@ -1576,7 +1589,7 @@ static void cam_mem_mgr_unmap_active_buf(int idx)
 	else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
 		region = CAM_SMMU_REGION_IO;
 
-	cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
+	cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER, true);
 
 	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
 		cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
@@ -1637,6 +1650,7 @@ void cam_mem_mgr_deinit(void)
 
 	atomic_set(&cam_mem_mgr_state, CAM_MEM_MGR_UNINITIALIZED);
 	cam_mem_mgr_cleanup_table();
+	cam_smmu_driver_deinit();
 	mutex_lock(&tbl.m_lock);
 	bitmap_zero(tbl.bitmap, tbl.bits);
 	kfree(tbl.bitmap);
@@ -1702,7 +1716,8 @@ static int cam_mem_util_unmap(int32_t idx,
 	if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
 		(tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
 		(tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
-		if (cam_mem_util_unmap_hw_va(idx, region, client))
+		rc = cam_mem_util_unmap_hw_va(idx, region, client, false);
+		if (rc)
 			CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
 				tbl.bufq[idx].dma_buf);
 	}
@@ -1878,7 +1893,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
 	tbl.bufq[idx].kmdvaddr = kvaddr;
 
 	cam_mem_mgr_update_iova_info_locked(tbl.bufq[idx].hdls_info,
-		iova, inp->smmu_hdl, inp->size, true);
+		iova, inp->smmu_hdl, inp->size, true, NULL);
 
 	tbl.bufq[idx].len = inp->size;
 	tbl.bufq[idx].num_hdls = 1;
@@ -2029,7 +2044,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
 	tbl.bufq[idx].kmdvaddr = kvaddr;
 
 	cam_mem_mgr_update_iova_info_locked(tbl.bufq[idx].hdls_info,
-		iova, inp->smmu_hdl, request_len, true);
+		iova, inp->smmu_hdl, request_len, true, NULL);
 
 	tbl.bufq[idx].len = request_len;
 	tbl.bufq[idx].num_hdls = 1;
@@ -2383,7 +2398,8 @@ int cam_mem_mgr_send_buffer_to_presil(int32_t iommu_hdl, int32_t buf_handle)
 		return -EINVAL;
 	}
 
-	rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size, NULL);
+	rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size,
+		NULL, NULL);
 	if (rc || NULL == (void *)io_buf_addr) {
 		CAM_DBG(CAM_PRESIL, "Invalid ioaddr : 0x%x, fd = %d,  dmabuf = %pK",
 			io_buf_addr, fd, dmabuf);
@@ -2464,7 +2480,8 @@ int cam_mem_mgr_retrieve_buffer_from_presil(int32_t buf_handle, uint32_t buf_siz
 
 
 	CAM_DBG(CAM_PRESIL, "buf handle 0x%0x ", buf_handle);
-	rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size, NULL);
+	rc = cam_mem_get_io_buf(buf_handle, iommu_hdl, &io_buf_addr, &io_buf_size,
+		NULL, NULL);
 	if (rc) {
 		CAM_ERR(CAM_PRESIL, "Unable to get IOVA for buffer buf_hdl: 0x%0x iommu_hdl: 0x%0x",
 			buf_handle, iommu_hdl);

+ 2 - 0
drivers/cam_req_mgr/cam_mem_mgr.h

@@ -46,6 +46,7 @@ struct cam_presil_dmabuf_params {
  * @iommu_hdl:     IOMMU handle for the given bank
  * @vaddr:         IOVA of the buffer
  * @len:           cached length for a given handle
+ * @ref_count:     ref count for buffer
  * @addr_updated:  Indicates if entry is updated only for addr caching
  * @valid_mapping: Indicates if entry is indeed a valid mapping for this buf
  *
@@ -54,6 +55,7 @@ struct cam_mem_buf_hw_hdl_info {
 	int32_t iommu_hdl;
 	dma_addr_t vaddr;
 	size_t len;
+	struct kref *ref_count;
 
 	bool addr_updated;
 	bool valid_mapping;

+ 4 - 1
drivers/cam_req_mgr/cam_mem_mgr_api.h

@@ -1,6 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _CAM_MEM_MGR_API_H_
@@ -71,11 +72,13 @@ int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
  * @iova_ptr  : Pointer to mmu's iova
  * @len_ptr   : Length of the buffer
  * @flags     : Flags the buffer was allocated with
+ * @buf_tracker: List of buffers we want to keep ref counts on
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
-	dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags);
+	dma_addr_t *iova_ptr, size_t *len_ptr, uint32_t *flags,
+	struct list_head *buf_tracker);
 
 /**
  * @brief: This indicates begin of CPU access.

+ 234 - 41
drivers/cam_smmu/cam_smmu_api.c

@@ -27,7 +27,6 @@
 #include "camera_main.h"
 #include "cam_trace.h"
 #include "cam_common_util.h"
-#include "cam_compat.h"
 
 #define SHARED_MEM_POOL_GRANULARITY 16
 
@@ -42,6 +41,7 @@
 #define CAM_SMMU_HDL_VALIDATE(x, y) ((x) != ((y) & CAM_SMMU_HDL_MASK))
 
 #define CAM_SMMU_MONITOR_MAX_ENTRIES   100
+#define CAM_SMMU_BUF_TRACKING_POOL     600
 #define CAM_SMMU_INC_MONITOR_HEAD(head, ret) \
 	div_u64_rem(atomic64_add_return(1, head),\
 	CAM_SMMU_MONITOR_MAX_ENTRIES, (ret))
@@ -50,6 +50,7 @@ static int g_num_pf_handled = 1;
 module_param(g_num_pf_handled, int, 0644);
 
 struct cam_fw_alloc_info icp_fw;
+struct cam_smmu_buffer_tracker *buf_tracking_pool;
 
 struct cam_smmu_work_payload {
 	int idx;
@@ -112,9 +113,10 @@ struct cam_smmu_monitor {
 
 struct cam_smmu_debug {
 	struct dentry *dentry;
+	uint32_t fatal_pf_mask;
 	bool cb_dump_enable;
 	bool map_profile_enable;
-	uint32_t fatal_pf_mask;
+	bool disable_buf_tracking;
 };
 
 struct cam_smmu_subregion_info {
@@ -227,10 +229,13 @@ struct cam_iommu_cb_set {
 	struct mutex payload_list_lock;
 	struct list_head payload_list;
 	struct cam_smmu_debug debug_cfg;
+	struct list_head buf_tracker_free_list;
+	struct cam_csf_version csf_version;
+	spinlock_t   s_lock;
 	bool force_cache_allocs;
 	bool need_shared_buffer_padding;
 	bool is_expanded_memory;
-	struct cam_csf_version csf_version;
+	bool is_track_buf_disabled;
 };
 
 static const struct of_device_id msm_cam_smmu_dt_match[] = {
@@ -247,7 +252,8 @@ struct cam_dma_buff_info {
 	enum dma_data_direction dir;
 	enum cam_smmu_region_id region_id;
 	int iommu_dir;
-	int ref_count;
+	int map_count;
+	struct kref ref_count;
 	dma_addr_t paddr;
 	struct list_head list;
 	int ion_fd;
@@ -263,7 +269,8 @@ struct cam_sec_buff_info {
 	struct dma_buf_attachment *attach;
 	struct sg_table *table;
 	enum dma_data_direction dir;
-	int ref_count;
+	int map_count;
+	struct kref ref_count;
 	dma_addr_t paddr;
 	struct list_head list;
 	int ion_fd;
@@ -357,7 +364,8 @@ static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
 static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
 	bool dis_delayed_unmap, enum dma_data_direction dma_dir,
 	dma_addr_t *paddr_ptr, size_t *len_ptr,
-	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf);
+	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *dmabuf,
+	struct kref **ref_count);
 
 static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
 	struct dma_buf *buf, enum dma_data_direction dma_dir,
@@ -2596,6 +2604,46 @@ int cam_smmu_release_buf_region(enum cam_smmu_region_id region,
 }
 EXPORT_SYMBOL(cam_smmu_release_buf_region);
 
+static int cam_smmu_util_return_map_entry(struct cam_smmu_buffer_tracker *entry)
+{
+	spin_lock_bh(&iommu_cb_set.s_lock);
+	list_add_tail(&entry->list, &iommu_cb_set.buf_tracker_free_list);
+	spin_unlock_bh(&iommu_cb_set.s_lock);
+
+	return 0;
+}
+
+void cam_smmu_buffer_tracker_putref(struct list_head *track_list)
+{
+	struct cam_smmu_buffer_tracker *buffer_tracker, *temp;
+
+	if (iommu_cb_set.is_track_buf_disabled)
+		return;
+
+	if (!track_list || list_empty(track_list))
+		return;
+
+	list_for_each_entry_safe(buffer_tracker, temp, track_list, list) {
+		if (refcount_dec_and_test(&buffer_tracker->ref_count->refcount))
+			CAM_ERR(CAM_SMMU,
+				"[SMMU_BT] Unexpected - buffer reference [fd: 0x%x ino: 0x%x cb: %s] zeroed prior to unmap invocation",
+				buffer_tracker->ion_fd, buffer_tracker->i_ino,
+				buffer_tracker->cb_name);
+		else
+			CAM_DBG(CAM_SMMU,
+				"[SMMU_BT] kref_count after put, [fd: 0x%x ino: 0x%x cb: %s], count: %d",
+				buffer_tracker->ion_fd, buffer_tracker->i_ino,
+				buffer_tracker->cb_name,
+				kref_read(buffer_tracker->ref_count));
+
+		list_del_init(&buffer_tracker->list);
+
+		cam_smmu_util_return_map_entry(buffer_tracker);
+
+	}
+}
+EXPORT_SYMBOL(cam_smmu_buffer_tracker_putref);
+
 static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
 	int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
 	size_t *len_ptr, enum cam_smmu_region_id region_id,
@@ -2742,7 +2790,7 @@ static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
 	(*mapping_info)->paddr = *paddr_ptr;
 	(*mapping_info)->len = *len_ptr;
 	(*mapping_info)->dir = dma_dir;
-	(*mapping_info)->ref_count = 1;
+	(*mapping_info)->map_count = 1;
 	(*mapping_info)->region_id = region_id;
 
 	if (!*paddr_ptr || !*len_ptr) {
@@ -2786,7 +2834,8 @@ err_out:
 static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
 	bool dis_delayed_unmap, enum dma_data_direction dma_dir,
 	dma_addr_t *paddr_ptr, size_t *len_ptr,
-	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf)
+	enum cam_smmu_region_id region_id, bool is_internal, struct dma_buf *buf,
+	struct kref **ref_count)
 {
 	int rc = -1;
 	struct cam_dma_buff_info *mapping_info = NULL;
@@ -2802,6 +2851,8 @@ static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
 	mapping_info->ion_fd = ion_fd;
 	mapping_info->i_ino = file_inode(buf->file)->i_ino;
 	mapping_info->is_internal = is_internal;
+	kref_init(&mapping_info->ref_count);
+	*ref_count = &mapping_info->ref_count;
 	CAM_GET_TIMESTAMP(mapping_info->ts);
 	/* add to the list */
 	list_add(&mapping_info->list,
@@ -2938,9 +2989,58 @@ static int cam_smmu_unmap_buf_and_remove_from_list(
 	return 0;
 }
 
+static int cam_smmu_util_get_free_map_entry(struct cam_smmu_buffer_tracker **entry)
+{
+	spin_lock_bh(&iommu_cb_set.s_lock);
+	if (list_empty(&iommu_cb_set.buf_tracker_free_list)) {
+		CAM_WARN(CAM_SMMU, "[SMMU_BT] Not enough mem to track buffer");
+		spin_unlock_bh(&iommu_cb_set.s_lock);
+		return -ENOMEM;
+	}
+
+	*entry = list_first_entry(&iommu_cb_set.buf_tracker_free_list,
+		struct cam_smmu_buffer_tracker, list);
+
+	list_del_init(&(*entry)->list);
+	spin_unlock_bh(&iommu_cb_set.s_lock);
+
+	return 0;
+}
+
+int cam_smmu_add_buf_to_track_list(int ion_fd, unsigned long inode,
+	struct kref **ref_count, struct list_head *buf_tracker, int idx)
+{
+	int rc = 0;
+	struct cam_smmu_buffer_tracker *buf;
+
+	if (iommu_cb_set.is_track_buf_disabled)
+		return rc;
+
+	rc = cam_smmu_util_get_free_map_entry(&buf);
+	if (rc == -ENOMEM) {
+		rc = 0;
+		return rc;
+	}
+
+	kref_get(*ref_count);
+
+	buf->ion_fd = ion_fd;
+	buf->i_ino = inode;
+	buf->ref_count = *ref_count;
+	buf->cb_name = iommu_cb_set.cb_info[idx].name[0];
+
+	CAM_DBG(CAM_SMMU,
+		"[SMMU_BT] ref_cnt increased for fd 0x%x, ino 0x%x: %d, cb: %s",
+		buf->ion_fd, buf->i_ino, kref_read(buf->ref_count), buf->cb_name);
+
+	list_add(&buf->list, buf_tracker);
+
+	return rc;
+}
+
 static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
 	int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
-	struct timespec64 **ts_mapping)
+	struct timespec64 **ts_mapping, unsigned long *inode, struct kref **ref_count)
 {
 	struct cam_dma_buff_info *mapping;
 	unsigned long i_ino;
@@ -2953,6 +3053,8 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
 			*paddr_ptr = mapping->paddr;
 			*len_ptr = mapping->len;
 			*ts_mapping = &mapping->ts;
+			*inode = i_ino;
+			*ref_count = &mapping->ref_count;
 			return CAM_SMMU_BUFF_EXIST;
 		}
 	}
@@ -2962,7 +3064,7 @@ static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
 
 static enum cam_smmu_buf_state cam_smmu_user_reuse_fd_in_list(int idx,
 	int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
-	struct timespec64 **ts_mapping)
+	struct timespec64 **ts_mapping, struct kref **ref_count)
 {
 	struct cam_dma_buff_info *mapping;
 	unsigned long i_ino;
@@ -2975,7 +3077,8 @@ static enum cam_smmu_buf_state cam_smmu_user_reuse_fd_in_list(int idx,
 			*paddr_ptr = mapping->paddr;
 			*len_ptr = mapping->len;
 			*ts_mapping = &mapping->ts;
-			mapping->ref_count++;
+			mapping->map_count++;
+			*ref_count = &mapping->ref_count;
 			return CAM_SMMU_BUFF_EXIST;
 		}
 	}
@@ -3001,7 +3104,8 @@ static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
 }
 
 static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
-	int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr)
+	int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
+	struct kref **ref_count)
 {
 	struct cam_sec_buff_info *mapping;
 	unsigned long i_ino;
@@ -3014,7 +3118,8 @@ static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
 		if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
 			*paddr_ptr = mapping->paddr;
 			*len_ptr = mapping->len;
-			mapping->ref_count++;
+			mapping->map_count++;
+			*ref_count = &mapping->ref_count;
 			return CAM_SMMU_BUFF_EXIST;
 		}
 	}
@@ -3023,7 +3128,8 @@ static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
 }
 
 static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
-	int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr)
+	int ion_fd, struct dma_buf *dmabuf, dma_addr_t *paddr_ptr, size_t *len_ptr,
+		unsigned long *inode, struct kref **ref_count)
 {
 	struct cam_sec_buff_info *mapping;
 	unsigned long i_ino;
@@ -3036,6 +3142,8 @@ static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
 		if ((mapping->ion_fd == ion_fd) && (mapping->i_ino == i_ino)) {
 			*paddr_ptr = mapping->paddr;
 			*len_ptr = mapping->len;
+			*inode = i_ino;
+			*ref_count = &mapping->ref_count;
 			return CAM_SMMU_BUFF_EXIST;
 		}
 	}
@@ -3200,7 +3308,7 @@ static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
 	mapping_info->paddr = iova;
 	mapping_info->len = virt_len;
 	mapping_info->iommu_dir = iommu_dir;
-	mapping_info->ref_count = 1;
+	mapping_info->map_count = 1;
 	mapping_info->phys_len = phys_len;
 	mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
 
@@ -3432,8 +3540,8 @@ handle_err:
 }
 
 static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
-		 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
-		 size_t *len_ptr, struct dma_buf *dmabuf)
+	enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, struct dma_buf *dmabuf, struct kref **ref_count)
 {
 	int rc = 0;
 	struct dma_buf_attachment *attach = NULL;
@@ -3493,11 +3601,14 @@ static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
 	mapping_info->paddr = *paddr_ptr;
 	mapping_info->len = *len_ptr;
 	mapping_info->dir = dma_dir;
-	mapping_info->ref_count = 1;
+	mapping_info->map_count = 1;
 	mapping_info->buf = dmabuf;
 	mapping_info->attach = attach;
 	mapping_info->table = table;
 
+	kref_init(&mapping_info->ref_count);
+	*ref_count = &mapping_info->ref_count;
+
 	CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, i_ino=%lu, dev=%pOFfp, paddr=0x%llx, len=%zu",
 		idx, ion_fd, mapping_info->i_ino,
 		iommu_cb_set.cb_info[idx].dev->of_node,
@@ -3517,7 +3628,8 @@ err_out:
 }
 
 int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
-	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, size_t *len_ptr)
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr, size_t *len_ptr,
+	struct kref **ref_count)
 {
 	int idx, rc;
 	enum dma_data_direction dma_dir;
@@ -3567,7 +3679,7 @@ int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
 	}
 
 	buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr,
-			len_ptr);
+			len_ptr, ref_count);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
 		CAM_DBG(CAM_SMMU,
 			"fd:%d already in list idx:%d, handle=%d give same addr back",
@@ -3576,7 +3688,7 @@ int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
 		goto get_addr_end;
 	}
 	rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
-			paddr_ptr, len_ptr, dmabuf);
+			paddr_ptr, len_ptr, dmabuf, ref_count);
 	if (rc < 0) {
 		CAM_ERR(CAM_SMMU,
 			"Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d",
@@ -3624,7 +3736,8 @@ static int cam_smmu_secure_unmap_buf_and_remove_from_list(
 	return 0;
 }
 
-int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf)
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
+	bool force_unmap)
 {
 	int idx, rc;
 	struct cam_sec_buff_info *mapping_info;
@@ -3665,15 +3778,23 @@ int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf)
 		goto put_addr_end;
 	}
 
-	mapping_info->ref_count--;
-	if (mapping_info->ref_count > 0) {
+	mapping_info->map_count--;
+	if (mapping_info->map_count > 0) {
 		CAM_DBG(CAM_SMMU,
-			"idx: %d fd = %d ref_count: %d",
-			idx, ion_fd, mapping_info->ref_count);
+			"idx: %d fd = %d map_count: %d",
+			idx, ion_fd, mapping_info->map_count);
 		rc = 0;
 		goto put_addr_end;
 	}
-	mapping_info->ref_count = 0;
+	mapping_info->map_count = 0;
+	if (!force_unmap && kref_read(&mapping_info->ref_count) > 1) {
+		CAM_ERR(CAM_SMMU,
+			"[SMMU_BT] Error: can't unmap buffer as it's still active, idx: %d, cb: %s, fd: 0x%x, ino: 0x%x, ref_count: %d",
+			idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, mapping_info->i_ino,
+			kref_read(&mapping_info->ref_count));
+		rc = -EPERM;
+		goto put_addr_end;
+	}
 
 	/* unmapping one buffer from device */
 	rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx);
@@ -3739,10 +3860,34 @@ bool cam_smmu_supports_shared_region(int handle)
 	return is_shared;
 }
 
+void cam_smmu_buffer_tracker_buffer_putref(struct cam_smmu_buffer_tracker *entry)
+{
+
+	if (!entry) {
+		CAM_WARN(CAM_ISP, "[SMMU_BT] track buffer entry is NULL");
+		return;
+	}
+
+	if (refcount_dec_and_test(&entry->ref_count->refcount))
+		CAM_ERR(CAM_SMMU,
+			"[SMMU_BT] Unexpected - buffer reference [fd: 0x%x ino: 0x%x cb: %s] zeroed prior to unmap invocation",
+			entry->ion_fd,  entry->i_ino, entry->cb_name);
+	else
+		CAM_DBG(CAM_SMMU,
+			"[SMMU_BT] kref_count after put, [fd: 0x%x ino: 0x%x cb: %s], count: %d",
+			entry->ion_fd, entry->i_ino, entry->cb_name, kref_read(entry->ref_count));
+
+
+	list_del_init(&entry->list);
+
+	cam_smmu_util_return_map_entry(entry);
+
+}
+
 int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
 	bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
 	size_t *len_ptr, enum cam_smmu_region_id region_id,
-	bool is_internal)
+	bool is_internal, struct kref **ref_count)
 {
 	int idx, rc = 0;
 	struct timespec64 *ts = NULL;
@@ -3784,7 +3929,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
 	}
 
 	buf_state = cam_smmu_user_reuse_fd_in_list(idx, ion_fd, dmabuf, paddr_ptr,
-		len_ptr, &ts);
+		len_ptr, &ts, ref_count);
 	if (buf_state == CAM_SMMU_BUFF_EXIST) {
 		uint64_t ms = 0, hrs = 0, min = 0, sec = 0;
 
@@ -3801,7 +3946,7 @@ int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
 
 	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd,
 		dis_delayed_unmap, dma_dir, paddr_ptr, len_ptr,
-		region_id, is_internal, dmabuf);
+		region_id, is_internal, dmabuf, ref_count);
 	if (rc < 0) {
 		CAM_ERR(CAM_SMMU,
 			"mapping or add list fail cb:%s idx=%d, fd=%d, region=%d, rc=%d",
@@ -3877,11 +4022,13 @@ get_addr_end:
 EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
 
 int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
-	dma_addr_t *paddr_ptr, size_t *len_ptr)
+	dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker,
+	struct kref **ref_count)
 {
 	int idx, rc = 0;
 	struct timespec64 *ts = NULL;
 	enum cam_smmu_buf_state buf_state;
+	unsigned long i_ino;
 
 	if (!paddr_ptr || !len_ptr) {
 		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
@@ -3921,7 +4068,7 @@ int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
 	}
 
 	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr,
-		len_ptr, &ts);
+		len_ptr, &ts, &i_ino, ref_count);
 	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
 		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
 		rc = -EINVAL;
@@ -3929,6 +4076,9 @@ int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
 		goto get_addr_end;
 	}
 
+	if (buf_tracker)
+		rc = cam_smmu_add_buf_to_track_list(ion_fd, i_ino, ref_count, buf_tracker, idx);
+
 get_addr_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	return rc;
@@ -3936,10 +4086,12 @@ get_addr_end:
 EXPORT_SYMBOL(cam_smmu_get_iova);
 
 int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
-	dma_addr_t *paddr_ptr, size_t *len_ptr)
+	dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker,
+	struct kref **ref_count)
 {
 	int idx, rc = 0;
 	enum cam_smmu_buf_state buf_state;
+	unsigned long i_ino;
 
 	if (!paddr_ptr || !len_ptr) {
 		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
@@ -3978,7 +4130,8 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
 		goto get_addr_end;
 	}
 
-	buf_state = cam_smmu_validate_secure_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, len_ptr);
+	buf_state = cam_smmu_validate_secure_fd_in_list(idx, ion_fd, dma_buf, paddr_ptr, len_ptr,
+		&i_ino, ref_count);
 
 	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
 		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
@@ -3986,6 +4139,9 @@ int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
 		goto get_addr_end;
 	}
 
+	if (buf_tracker)
+		rc = cam_smmu_add_buf_to_track_list(ion_fd, i_ino, ref_count, buf_tracker, idx);
+
 get_addr_end:
 	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
 	return rc;
@@ -4014,7 +4170,8 @@ static int cam_smmu_unmap_validate_params(int handle)
 }
 
 int cam_smmu_unmap_user_iova(int handle,
-	int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id)
+	int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id,
+	bool force_unmap)
 {
 	int idx, rc;
 	struct cam_dma_buff_info *mapping_info;
@@ -4053,15 +4210,25 @@ int cam_smmu_unmap_user_iova(int handle,
 		goto unmap_end;
 	}
 
-	mapping_info->ref_count--;
-	if (mapping_info->ref_count > 0) {
+	mapping_info->map_count--;
+	if (mapping_info->map_count > 0) {
 		CAM_DBG(CAM_SMMU,
-			"idx: %d fd = %d ref_count: %d",
-			idx, ion_fd, mapping_info->ref_count);
+			"idx: %d, cb: %s fd = %d , ino: 0x%x, map_count: %d, ref_count: %d",
+			idx, iommu_cb_set.cb_info[idx].name[0], ion_fd,
+			mapping_info->i_ino, mapping_info->map_count,
+			kref_read(&mapping_info->ref_count));
 		rc = 0;
 		goto unmap_end;
 	}
-	mapping_info->ref_count = 0;
+	mapping_info->map_count = 0;
+	if (!force_unmap && kref_read(&mapping_info->ref_count) > 1) {
+		CAM_ERR(CAM_SMMU,
+			"[SMMU_BT] Error: can't unmap buffer as it's still active, idx: %d, cb: %s, fd: 0x%x, ino: 0x%x, ref_count: %d",
+			idx, iommu_cb_set.cb_info[idx].name[0], ion_fd, mapping_info->i_ino,
+			kref_read(&mapping_info->ref_count));
+		rc = -EPERM;
+		goto unmap_end;
+	}
 
 	/* Unmapping one buffer from device */
 	CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
@@ -5344,6 +5511,8 @@ static int cam_smmu_create_debug_fs(void)
 		iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.map_profile_enable);
 	debugfs_create_file("fatal_pf_mask", 0644,
 		iommu_cb_set.debug_cfg.dentry, NULL, &cam_smmu_fatal_pf_mask);
+	debugfs_create_bool("disable_buf_tracking", 0644,
+		iommu_cb_set.debug_cfg.dentry, &iommu_cb_set.debug_cfg.disable_buf_tracking);
 
 end:
 	return rc;
@@ -5351,6 +5520,7 @@ end:
 
 int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs)
 {
+	int i;
 	/* expect inputs to be valid */
 	if (!csf_ver || !num_cbs) {
 		CAM_ERR(CAM_SMMU, "Invalid params csf: %p num_cbs: %p",
@@ -5360,12 +5530,34 @@ int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs)
 
 	*num_cbs = iommu_cb_set.cb_num;
 	memcpy(csf_ver, &iommu_cb_set.csf_version, sizeof(*csf_ver));
+
+	iommu_cb_set.is_track_buf_disabled = iommu_cb_set.debug_cfg.disable_buf_tracking;
+
+	if (!iommu_cb_set.is_track_buf_disabled) {
+		buf_tracking_pool = kcalloc(CAM_SMMU_BUF_TRACKING_POOL,
+			sizeof(struct cam_smmu_buffer_tracker), GFP_KERNEL);
+
+		if (!buf_tracking_pool) {
+			CAM_WARN(CAM_SMMU, "[SMMU_BT] Not enough mem for buffer tracker pool");
+			goto end;
+		}
+
+		INIT_LIST_HEAD(&iommu_cb_set.buf_tracker_free_list);
+		for (i = 0; i < CAM_SMMU_BUF_TRACKING_POOL; i++) {
+			INIT_LIST_HEAD(&buf_tracking_pool[i].list);
+			list_add_tail(&buf_tracking_pool[i].list,
+				&iommu_cb_set.buf_tracker_free_list);
+		}
+	}
+
+end:
 	return 0;
 }
 
 void cam_smmu_driver_deinit(void)
 {
-	/* no-op */
+	INIT_LIST_HEAD(&iommu_cb_set.buf_tracker_free_list);
+	kfree(buf_tracking_pool);
 }
 
 static int cam_smmu_fw_dev_component_bind(struct device *dev,
@@ -5459,6 +5651,7 @@ static int cam_smmu_component_bind(struct device *dev,
 
 	INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
 	mutex_init(&iommu_cb_set.payload_list_lock);
+	spin_lock_init(&iommu_cb_set.s_lock);
 	INIT_LIST_HEAD(&iommu_cb_set.payload_list);
 	cam_smmu_create_debug_fs();
 

+ 53 - 6
drivers/cam_smmu/cam_smmu_api.h

@@ -83,6 +83,23 @@ struct cam_csf_version {
 	uint32_t              min_ver;
 };
 
+/**
+ * @brief : cam_smmu_buffer_tracker
+ *
+ * @param: list      : list to be inserted into list of tracked buggers
+ * @param: ref_count : Ptr to kref object of a physical buffer allocated per CB
+ * @param: ion_fd    : fd of buffer
+ * @param: i_ino     : inode of buffer
+ * @param: cb_name   : CB which this buffer belongs to
+ */
+struct cam_smmu_buffer_tracker {
+	struct list_head list;
+	struct kref *ref_count;
+	int ion_fd;
+	unsigned long i_ino;
+	const char *cb_name;
+};
+
 /**
  * @brief          : cam_smmu_pf_info
  *
@@ -186,11 +203,12 @@ int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
  * @len_ptr     : Length of buffer mapped returned by CAM SMMU driver.
  * @region_id   : Memory region identifier
  * @is_internal: Specifies if this buffer is kernel allocated.
+ * @ref_count:   Double ptr to store ref_cnt object in memmgr.
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_map_user_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
 	bool dis_delayed_unmap, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
-	enum cam_smmu_region_id region_id, bool is_internal);
+	enum cam_smmu_region_id region_id, bool is_internal, struct kref **ref_count);
 
 /**
  * @brief        : Maps kernel space IOVA for calling driver
@@ -219,11 +237,13 @@ int cam_smmu_map_kernel_iova(int handle,
  * @param ion_fd: ION handle identifying the memory buffer.
  * @param dma_buf: DMA Buf handle identifying the memory buffer.
  * @param region_id: Region id from which to unmap buffer.
+ * @param force_unmap: If this unmap operation is part of memmgr cleanup
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_unmap_user_iova(int handle,
-	int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id);
+	int ion_fd, struct dma_buf *dma_buf, enum cam_smmu_region_id region_id,
+	bool force_unmap);
 
 /**
  * @brief       : Unmaps kernel IOVA for calling driver
@@ -328,11 +348,14 @@ void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
  * @param dma_buf: DMA buf of memory to map to
  * @param paddr_ptr: Pointer IOVA address that will be returned
  * @param len_ptr: Length of memory mapped
+ * @param buf_tracker: List to add tracked buffers to
+ * @param ref_count: Double ptr to ref_count object for memmgr table
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
-	dma_addr_t *paddr_ptr, size_t *len_ptr);
+	dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker,
+	struct kref **ref_count);
 
 /**
  * @brief Maps memory from an ION fd into IOVA space
@@ -342,11 +365,14 @@ int cam_smmu_get_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
  * @param dma_buf: DMA Buf of memory to map to
  * @param paddr_ptr: Pointer IOVA address that will be returned
  * @param len_ptr: Length of memory mapped
+ * @param buf_tracker: List to add tracked buffers to
+ * @param ref_count: Double ptr to ref_count object for memmgr table
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_get_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
-	dma_addr_t *paddr_ptr, size_t *len_ptr);
+	dma_addr_t *paddr_ptr, size_t *len_ptr, struct list_head *buf_tracker,
+	struct kref **ref_count);
 
 /**
  * @brief Unmaps memory from context bank
@@ -368,11 +394,13 @@ int cam_smmu_put_iova(int handle, int ion_fd, struct dma_buf *dma_buf);
  * @param dir: DMA Direction for the mapping
  * @param dma_addr: Returned IOVA address after mapping
  * @param len_ptr: Length of memory mapped
+ * @param ref_count: Double ptr to store ref_cnt object in memmgr
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
 int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
-	enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr);
+	enum cam_smmu_map_dir dir, dma_addr_t *dma_addr, size_t *len_ptr,
+	struct kref **ref_count);
 
 /**
  * @brief Unmaps secure memopry for SMMU handle
@@ -380,10 +408,12 @@ int cam_smmu_map_stage2_iova(int handle, int ion_fd, struct dma_buf *dmabuf,
  * @param handle: SMMU handle identifying secure context bank
  * @param ion_fd: ION fd to unmap
  * @param dma_buf: DMA Buf to unmap
+ * @param force_unmap: If this unmap operation is part of memmgr cleanup
  *
  * @return Status of operation. Negative in case of error. Zero otherwise.
  */
-int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf);
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd, struct dma_buf *dma_buf,
+	bool force_unmap);
 
 /**
  * @brief Allocates firmware for context bank
@@ -541,4 +571,21 @@ int cam_smmu_driver_init(struct cam_csf_version *csf_ver, int32_t *num_cbs);
  */
 void cam_smmu_driver_deinit(void);
 
+/**
+ * @brief : API to putref on tracked buffers whoose ref counts
+ *          are incremented
+ */
+void cam_smmu_buffer_tracker_putref(struct list_head *mapped_io_list);
+
+/**
+ * @brief : API to putref on a specific tracked buffer
+ */
+void cam_smmu_buffer_tracker_buffer_putref(struct cam_smmu_buffer_tracker *entry);
+
+/**
+ * @brief : Add tracked buffers to list that belongs to a context
+ */
+int cam_smmu_add_buf_to_track_list(int ion_fd, unsigned long inode,
+	struct kref **ref_count, struct list_head *buf_tracker, int idx);
+
 #endif /* _CAM_SMMU_API_H_ */

+ 13 - 10
drivers/cam_utils/cam_packet_util.c

@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/types.h>
@@ -240,7 +240,7 @@ void cam_packet_util_dump_patch_info(struct cam_packet *packet,
 		hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
 			sec_iommu_hdl : iommu_hdl;
 		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
-			hdl, &iova_addr, &src_buf_size, &flags);
+			hdl, &iova_addr, &src_buf_size, &flags, NULL);
 		if (rc < 0) {
 			CAM_ERR(CAM_UTIL,
 				"unable to get src buf address for hdl 0x%x",
@@ -293,7 +293,7 @@ void cam_packet_util_dump_patch_info(struct cam_packet *packet,
 static int cam_packet_util_get_patch_iova(
 	struct cam_patch_unique_src_buf_tbl *tbl,
 	int32_t hdl, uint32_t buf_hdl, dma_addr_t *iova,
-	size_t *buf_size, uint32_t *flags)
+	size_t *buf_size, uint32_t *flags, struct list_head *mapped_io_list)
 {
 	int idx = 0;
 	int rc = 0;
@@ -324,7 +324,8 @@ static int cam_packet_util_get_patch_iova(
 	if (!is_found) {
 		CAM_DBG(CAM_UTIL, "src_hdl 0x%x not found in table entries",
 			buf_hdl);
-		rc = cam_mem_get_io_buf(buf_hdl, hdl, &iova_addr, &src_buf_size, flags);
+		rc = cam_mem_get_io_buf(buf_hdl, hdl, &iova_addr, &src_buf_size, flags,
+			mapped_io_list);
 		if (rc < 0) {
 			CAM_ERR(CAM_UTIL,
 				"unable to get iova for src_hdl: 0x%x",
@@ -349,7 +350,8 @@ static int cam_packet_util_get_patch_iova(
 }
 
 int cam_packet_util_process_patches(struct cam_packet *packet,
-	int32_t iommu_hdl, int32_t sec_mmu_hdl, bool exp_mem)
+	struct list_head *mapped_io_list, int32_t iommu_hdl, int32_t sec_mmu_hdl,
+	bool exp_mem)
 {
 	struct cam_patch_desc *patch_desc = NULL;
 	dma_addr_t iova_addr;
@@ -361,7 +363,7 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
 	int        i  = 0;
 	int        rc = 0;
 	uint32_t   flags = 0;
-	int32_t    hdl;
+	int32_t hdl;
 	struct cam_patch_unique_src_buf_tbl
 		tbl[CAM_UNIQUE_SRC_HDL_MAX];
 
@@ -380,8 +382,9 @@ int cam_packet_util_process_patches(struct cam_packet *packet,
 		hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
 			sec_mmu_hdl : iommu_hdl;
 
-		rc = cam_packet_util_get_patch_iova(&tbl[0], hdl,
-			patch_desc[i].src_buf_hdl, &iova_addr, &src_buf_size, &flags);
+		rc = cam_packet_util_get_patch_iova(&tbl[0], hdl, patch_desc[i].src_buf_hdl,
+			&iova_addr, &src_buf_size, &flags, mapped_io_list);
+
 		if (rc) {
 			CAM_ERR(CAM_UTIL,
 				"get_iova failed for patch[%d], src_buf_hdl: 0x%x: rc: %d",
@@ -492,7 +495,7 @@ void cam_packet_util_dump_io_bufs(struct cam_packet *packet,
 				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
 				iommu_hdl;
 			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
-				mmu_hdl, &iova_addr, &src_buf_size, NULL);
+				mmu_hdl, &iova_addr, &src_buf_size, NULL, NULL);
 			if (rc < 0) {
 				CAM_ERR(CAM_UTIL,
 					"get src buf address fail mem_handle 0x%x",
@@ -661,7 +664,7 @@ int cam_presil_retrieve_buffers_from_packet(struct cam_packet *packet, int iommu
 				break;
 
 			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j], iommu_hdl, &io_addr[j],
-				&size, NULL);
+				&size, NULL, NULL);
 			if (rc) {
 				CAM_ERR(CAM_PRESIL, "no io addr for plane%d", j);
 				rc = -ENOMEM;

+ 4 - 2
drivers/cam_utils/cam_packet_util.h

@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _CAM_PACKET_UTIL_H_
@@ -122,6 +122,7 @@ void cam_packet_util_dump_patch_info(struct cam_packet *packet,
  *                      information from patches.
  *
  * @packet:             Input packet containing Command Buffers and Patches
+ * @mapped_io_list:     List in to add patches/buffers to for reference counting
  * @iommu_hdl:          IOMMU handle of the HW Device that received the packet
  * @sec_iommu_hdl:      Secure IOMMU handle of the HW Device that
  *                      received the packet
@@ -132,7 +133,8 @@ void cam_packet_util_dump_patch_info(struct cam_packet *packet,
  *                      Negative: Failure
  */
 int cam_packet_util_process_patches(struct cam_packet *packet,
-	int32_t iommu_hdl, int32_t sec_mmu_hdl, bool exp_mem);
+	struct list_head *mapped_io_list,  int32_t iommu_hdl, int32_t sec_mmu_hdl,
+	bool exp_mem);
 
 /**
  * cam_packet_util_dump_io_bufs()