Browse Source

msm: adsprpc: enable ramdump collection for CMA persistent buffer

Ramdump collection is missing for peristent mappings. Added change
to enable ramdump collection for CMA persistent buffer

Change-Id: Ic8484c9d9f2814610de78fbafba9cdc65a75d862
Acked-by: DEEPAK SANNAPAREDDY <[email protected]>
Signed-off-by: Vamsi Krishna Gattupalli <[email protected]>
Vamsi Krishna Gattupalli 2 năm trước cách đây
mục cha
commit
21968b8049
1 tập tin đã thay đổi với 78 bổ sung65 xóa
  1. 78 65
      dsp/adsprpc.c

+ 78 - 65
dsp/adsprpc.c

@@ -4884,7 +4884,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
 {
 	struct fastrpc_mmap *match = NULL, *map = NULL;
 	struct hlist_node *n = NULL;
-	int err = 0, ret = 0;
+	int err = 0, ret = 0, lock = 0;
 	struct fastrpc_apps *me = &gfa;
 	struct qcom_dump_segment ramdump_segments_rh;
 	struct list_head head;
@@ -4898,75 +4898,82 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
 			goto bail;
 		}
 	}
-	do {
-		match = NULL;
-		spin_lock_irqsave(&me->hlock, irq_flags);
-		hlist_for_each_entry_safe(map, n, &me->maps, hn) {
-			/* In hibernation suspend case fl is NULL, check !fl to cleanup */
-			if (!fl || (fl && map->servloc_name && fl->servloc_name
-				&& !strcmp(map->servloc_name, fl->servloc_name))) {
-				match = map;
-				if (map->is_persistent && map->in_use) {
-					struct secure_vm *rhvm = &me->channel[RH_CID].rhvm;
-					uint64_t phys = map->phys;
-					size_t size = map->size;
+	spin_lock_irqsave(&me->hlock, irq_flags);
+	lock = 1;
+	hlist_for_each_entry_safe(map, n, &me->maps, hn) {
+		if (!lock) {
+			spin_lock_irqsave(&me->hlock, irq_flags);
+			lock = 1;
+		}
+		/* In hibernation suspend case fl is NULL, check !fl to cleanup */
+		if (!fl || (fl && map->servloc_name && fl->servloc_name
+			&& !strcmp(map->servloc_name, fl->servloc_name))) {
+			match = map;
+			if (map->is_persistent && map->in_use) {
+				struct secure_vm *rhvm = &me->channel[RH_CID].rhvm;
+				uint64_t phys = map->phys;
+				size_t size = map->size;
 
+				if (lock) {
 					spin_unlock_irqrestore(&me->hlock, irq_flags);
-					//scm assign it back to HLOS
-					if (rhvm->vmid) {
-						u64 src_perms = 0;
-						struct qcom_scm_vmperm dst_perms = {0};
-						uint32_t i = 0;
-
-						for (i = 0; i < rhvm->vmcount; i++) {
-							src_perms |= BIT(rhvm->vmid[i]);
-						}
-
-						dst_perms.vmid = QCOM_SCM_VMID_HLOS;
-						dst_perms.perm = QCOM_SCM_PERM_RWX;
-						err = qcom_scm_assign_mem(phys, (uint64_t)size,
-									&src_perms, &dst_perms, 1);
-					}
-					if (err) {
-						ADSPRPC_ERR(
-						"rh hyp unassign failed with %d for phys 0x%llx, size %zu\n",
-						err, phys, size);
-						err = -EADDRNOTAVAIL;
-						return err;
+					lock = 0;
+				}
+				//scm assign it back to HLOS
+				if (rhvm->vmid) {
+					u64 src_perms = 0;
+					struct qcom_scm_vmperm dst_perms = {0};
+					uint32_t i = 0;
+
+					for (i = 0; i < rhvm->vmcount; i++) {
+						src_perms |= BIT(rhvm->vmid[i]);
 					}
-					spin_lock_irqsave(&me->hlock, irq_flags);
-					map->in_use = false;
-					/*
-					 * decrementing refcount for persistent mappings
-					 * as incrementing it in fastrpc_get_persistent_map
-					 */
-					map->refs--;
+
+					dst_perms.vmid = QCOM_SCM_VMID_HLOS;
+					dst_perms.perm = QCOM_SCM_PERM_RWX;
+					err = qcom_scm_assign_mem(phys, (uint64_t)size,
+								&src_perms, &dst_perms, 1);
+				}
+				if (err) {
+					ADSPRPC_ERR(
+					"rh hyp unassign failed with %d for phys 0x%llx, size %zu\n",
+					err, phys, size);
+					err = -EADDRNOTAVAIL;
+					goto bail;
 				}
-				if (map->is_persistent) {
-					match = NULL;
-					continue;
+				if (!lock) {
+					spin_lock_irqsave(&me->hlock, irq_flags);
+					lock = 1;
 				}
-				hlist_del_init(&map->hn);
-				break;
+				map->in_use = false;
+				/*
+				 * decrementing refcount for persistent mappings
+				 * as incrementing it in fastrpc_get_persistent_map
+				 */
+				map->refs--;
 			}
+			if (!match->is_persistent)
+				hlist_del_init(&map->hn);
+		}
+		if (lock) {
+			spin_unlock_irqrestore(&me->hlock, irq_flags);
+			lock = 0;
 		}
-		spin_unlock_irqrestore(&me->hlock, irq_flags);
 
 		if (match) {
-			if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
-				err = fastrpc_munmap_rh(match->phys,
-						match->size, match->flags);
-			} else if (match->flags == ADSP_MMAP_HEAP_ADDR) {
-				if (fl)
-					err = fastrpc_munmap_on_dsp_rh(fl, match->phys,
-							match->size, match->flags, 0);
-				else {
-					pr_err("Cannot communicate with DSP, ADSP is down\n");
-					fastrpc_mmap_add(match);
+			if (!match->is_persistent) {
+				if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+					err = fastrpc_munmap_rh(match->phys,
+							match->size, match->flags);
+				} else if (match->flags == ADSP_MMAP_HEAP_ADDR) {
+					if (fl)
+						err = fastrpc_munmap_on_dsp_rh(fl, match->phys,
+								match->size, match->flags, 0);
+					else {
+						pr_err("Cannot communicate with DSP, ADSP is down\n");
+						fastrpc_mmap_add(match);
+					}
 				}
 			}
-			if (err)
-				goto bail;
 			memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh));
 			ramdump_segments_rh.da = match->phys;
 			ramdump_segments_rh.va = (void *)page_address((struct page *)match->va);
@@ -4979,14 +4986,20 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
 					pr_err("adsprpc: %s: unable to dump heap (err %d)\n",
 								__func__, ret);
 			}
-			if (!locked)
-				mutex_lock(&fl->map_mutex);
-			fastrpc_mmap_free(match, 0);
-			if (!locked)
-				mutex_unlock(&fl->map_mutex);
+			if (!match->is_persistent) {
+				if (!locked)
+					mutex_lock(&fl->map_mutex);
+				fastrpc_mmap_free(match, 0);
+				if (!locked)
+					mutex_unlock(&fl->map_mutex);
+			}
 		}
-	} while (match);
+	}
 bail:
+	if (lock) {
+		spin_unlock_irqrestore(&me->hlock, irq_flags);
+		lock = 0;
+	}
 	if (err && match) {
 		if (!locked)
 			mutex_lock(&fl->map_mutex);