Selaa lähdekoodia

msm: eva: Fix fd reuse problem.

Fix KW issue and fd reuse issue in persistent buffer mapping.

Change-Id: I9d971abe14460ac57d9f48ee086f97abec1b6f2b
Signed-off-by: George Shen <[email protected]>
George Shen 2 vuotta sitten
vanhempi
sitoutus
04232ceea4
4 muutettua tiedostoa jossa 29 lisäystä ja 12 poistoa
  1. 3 2
      msm/eva/hfi_response_handler.c
  2. 5 2
      msm/eva/msm_cvp.c
  3. 13 1
      msm/eva/msm_cvp_buf.c
  4. 8 7
      msm/eva/msm_cvp_core.c

+ 3 - 2
msm/eva/hfi_response_handler.c

@@ -105,8 +105,9 @@ static int hfi_process_session_error(u32 device_id,
 		break;
 	default:
 		dprintk(CVP_ERR,
-			"%s: session %x data1 %#x, data2 %#x\n", __func__,
-			pkt->session_id, pkt->event_data1, pkt->event_data2);
+			"%s: session %x id %#x, data1 %#x, data2 %#x\n",
+			__func__, pkt->session_id, pkt->event_id,
+			pkt->event_data1, pkt->event_data2);
 		info->response_type = HAL_RESPONSE_UNUSED;
 		break;
 	}

+ 5 - 2
msm/eva/msm_cvp.c

@@ -473,6 +473,9 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
 
 	dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
 
+	if (!inst || !inst->core)
+		return -EINVAL;
+
 	hdev = inst->core->device;
 	sq = &inst->session_queue_fence;
 	ktid = pkt->client_data.kdata;
@@ -509,10 +512,10 @@ static int cvp_fence_proc(struct msm_cvp_inst *inst,
 			dprintk(CVP_PWR, "busy cycle %d, total %d\n",
 				fhdr->busy_cycles, fhdr->total_cycles);
 
-			if (core && (core->dyn_clk.sum_fps[HFI_HW_FDU] ||
+			if (core->dyn_clk.sum_fps[HFI_HW_FDU] ||
 				core->dyn_clk.sum_fps[HFI_HW_MPU] ||
 				core->dyn_clk.sum_fps[HFI_HW_OD] ||
-				core->dyn_clk.sum_fps[HFI_HW_ICA])) {
+				core->dyn_clk.sum_fps[HFI_HW_ICA]) {
 				clock_check = true;
 			}
 		} else {

+ 13 - 1
msm/eva/msm_cvp_buf.c

@@ -1235,26 +1235,38 @@ static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
 	struct msm_cvp_smem *smem = NULL;
 	struct list_head *ptr, *next;
 	struct cvp_internal_buf *pbuf;
+	struct dma_buf *dma_buf;
 
 	if (!inst) {
 		dprintk(CVP_ERR, "%s: invalid params\n", __func__);
 		return -EINVAL;
 	}
 
+	dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
+	if (!dma_buf)
+		return -EINVAL;
+
 	mutex_lock(&inst->persistbufs.lock);
 	list_for_each_safe(ptr, next, &inst->persistbufs.list) {
 		pbuf = list_entry(ptr, struct cvp_internal_buf, list);
-		if (buf->fd == pbuf->fd) {
+		if (dma_buf == pbuf->smem->dma_buf) {
 			pbuf->size =
 				(pbuf->size >= buf->size) ?
 				pbuf->size : buf->size;
 			iova = pbuf->smem->device_addr + buf->offset;
 			mutex_unlock(&inst->persistbufs.lock);
+			atomic_inc(&pbuf->smem->refcount);
+			dma_buf_put(dma_buf);
+			dprintk(CVP_MEM,
+				"map persist Reuse fd %d, dma_buf %#llx\n",
+				pbuf->fd, pbuf->smem->dma_buf);
 			return iova;
 		}
 	}
 	mutex_unlock(&inst->persistbufs.lock);
 
+	dma_buf_put(dma_buf);
+
 	pbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
 	if (!pbuf) {
 		dprintk(CVP_ERR, "%s failed to allocate kmem obj\n",

+ 8 - 7
msm/eva/msm_cvp_core.c

@@ -49,16 +49,16 @@ int msm_cvp_private(void *cvp_inst, unsigned int cmd,
 }
 EXPORT_SYMBOL(msm_cvp_private);
 
-static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core)
+static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core,
+		u32 *instance_count)
 {
-	u32 instance_count = 0;
 	u32 secure_instance_count = 0;
 	struct msm_cvp_inst *inst = NULL;
 	bool overload = false;
 
 	mutex_lock(&core->lock);
 	list_for_each_entry(inst, &core->instances, list) {
-		instance_count++;
+		(*instance_count)++;
 		/* This flag is not updated yet for the current instance */
 		if (inst->flags & CVP_SECURE)
 			secure_instance_count++;
@@ -67,7 +67,7 @@ static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core)
 
 	/* Instance count includes current instance as well. */
 
-	if ((instance_count >= core->resources.max_inst_count) ||
+	if ((*instance_count >= core->resources.max_inst_count) ||
 		(secure_instance_count >=
 			core->resources.max_secure_inst_count))
 		overload = true;
@@ -130,6 +130,7 @@ void *msm_cvp_open(int core_id, int session_type, struct task_struct *task)
 	struct msm_cvp_core *core = NULL;
 	int rc = 0;
 	int i = 0;
+	u32 instance_count;
 
 	if (core_id >= MSM_CVP_CORES_MAX ||
 			session_type >= MSM_CVP_MAX_DEVICES) {
@@ -150,7 +151,7 @@ void *msm_cvp_open(int core_id, int session_type, struct task_struct *task)
 	}
 
 	core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
-	if (msm_cvp_check_for_inst_overload(core)) {
+	if (msm_cvp_check_for_inst_overload(core, &instance_count)) {
 		dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
 		mutex_lock(&core->lock);
 		list_for_each_entry(inst, &core->instances, list)
@@ -168,8 +169,8 @@ void *msm_cvp_open(int core_id, int session_type, struct task_struct *task)
 	}
 
 	pr_info(
-		CVP_DBG_TAG "%s opening cvp instance: %pK type %d\n",
-		"sess", task->comm, inst, session_type);
+		CVP_DBG_TAG "%s opening cvp instance: %pK type %d cnt %d\n",
+		"sess", task->comm, inst, session_type, instance_count);
 	mutex_init(&inst->sync_lock);
 	mutex_init(&inst->lock);
 	spin_lock_init(&inst->event_handler.lock);