msm: adsprpc: enable ramdump collection for CMA persistent buffer

Ramdump collection is missing for peristent mappings. Added change
to enable ramdump collection for CMA persistent buffer

Change-Id: Ic8484c9d9f2814610de78fbafba9cdc65a75d862
Acked-by: DEEPAK SANNAPAREDDY <sdeeredd@qti.qualcomm.com>
Signed-off-by: Vamsi Krishna Gattupalli <quic_vgattupa@quicinc.com>
此提交包含在:
Vamsi Krishna Gattupalli
2023-04-26 12:33:47 +05:30
父節點 2c1d233879
當前提交 21968b8049

查看文件

@@ -4884,7 +4884,7 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
{ {
struct fastrpc_mmap *match = NULL, *map = NULL; struct fastrpc_mmap *match = NULL, *map = NULL;
struct hlist_node *n = NULL; struct hlist_node *n = NULL;
int err = 0, ret = 0; int err = 0, ret = 0, lock = 0;
struct fastrpc_apps *me = &gfa; struct fastrpc_apps *me = &gfa;
struct qcom_dump_segment ramdump_segments_rh; struct qcom_dump_segment ramdump_segments_rh;
struct list_head head; struct list_head head;
@@ -4898,75 +4898,82 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
goto bail; goto bail;
} }
} }
do { spin_lock_irqsave(&me->hlock, irq_flags);
match = NULL; lock = 1;
spin_lock_irqsave(&me->hlock, irq_flags); hlist_for_each_entry_safe(map, n, &me->maps, hn) {
hlist_for_each_entry_safe(map, n, &me->maps, hn) { if (!lock) {
/* In hibernation suspend case fl is NULL, check !fl to cleanup */ spin_lock_irqsave(&me->hlock, irq_flags);
if (!fl || (fl && map->servloc_name && fl->servloc_name lock = 1;
&& !strcmp(map->servloc_name, fl->servloc_name))) { }
match = map; /* In hibernation suspend case fl is NULL, check !fl to cleanup */
if (map->is_persistent && map->in_use) { if (!fl || (fl && map->servloc_name && fl->servloc_name
struct secure_vm *rhvm = &me->channel[RH_CID].rhvm; && !strcmp(map->servloc_name, fl->servloc_name))) {
uint64_t phys = map->phys; match = map;
size_t size = map->size; if (map->is_persistent && map->in_use) {
struct secure_vm *rhvm = &me->channel[RH_CID].rhvm;
spin_unlock_irqrestore(&me->hlock, irq_flags); uint64_t phys = map->phys;
//scm assign it back to HLOS size_t size = map->size;
if (rhvm->vmid) {
u64 src_perms = 0; if (lock) {
struct qcom_scm_vmperm dst_perms = {0}; spin_unlock_irqrestore(&me->hlock, irq_flags);
uint32_t i = 0; lock = 0;
}
for (i = 0; i < rhvm->vmcount; i++) { //scm assign it back to HLOS
src_perms |= BIT(rhvm->vmid[i]); if (rhvm->vmid) {
} u64 src_perms = 0;
struct qcom_scm_vmperm dst_perms = {0};
dst_perms.vmid = QCOM_SCM_VMID_HLOS; uint32_t i = 0;
dst_perms.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(phys, (uint64_t)size, for (i = 0; i < rhvm->vmcount; i++) {
&src_perms, &dst_perms, 1); src_perms |= BIT(rhvm->vmid[i]);
} }
if (err) {
ADSPRPC_ERR( dst_perms.vmid = QCOM_SCM_VMID_HLOS;
"rh hyp unassign failed with %d for phys 0x%llx, size %zu\n", dst_perms.perm = QCOM_SCM_PERM_RWX;
err, phys, size); err = qcom_scm_assign_mem(phys, (uint64_t)size,
err = -EADDRNOTAVAIL; &src_perms, &dst_perms, 1);
return err; }
} if (err) {
spin_lock_irqsave(&me->hlock, irq_flags); ADSPRPC_ERR(
map->in_use = false; "rh hyp unassign failed with %d for phys 0x%llx, size %zu\n",
/* err, phys, size);
* decrementing refcount for persistent mappings err = -EADDRNOTAVAIL;
* as incrementing it in fastrpc_get_persistent_map goto bail;
*/ }
map->refs--; if (!lock) {
} spin_lock_irqsave(&me->hlock, irq_flags);
if (map->is_persistent) { lock = 1;
match = NULL; }
continue; map->in_use = false;
} /*
hlist_del_init(&map->hn); * decrementing refcount for persistent mappings
break; * as incrementing it in fastrpc_get_persistent_map
} */
map->refs--;
}
if (!match->is_persistent)
hlist_del_init(&map->hn);
}
if (lock) {
spin_unlock_irqrestore(&me->hlock, irq_flags);
lock = 0;
} }
spin_unlock_irqrestore(&me->hlock, irq_flags);
if (match) { if (match) {
if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) { if (!match->is_persistent) {
err = fastrpc_munmap_rh(match->phys, if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
match->size, match->flags); err = fastrpc_munmap_rh(match->phys,
} else if (match->flags == ADSP_MMAP_HEAP_ADDR) { match->size, match->flags);
if (fl) } else if (match->flags == ADSP_MMAP_HEAP_ADDR) {
err = fastrpc_munmap_on_dsp_rh(fl, match->phys, if (fl)
match->size, match->flags, 0); err = fastrpc_munmap_on_dsp_rh(fl, match->phys,
else { match->size, match->flags, 0);
pr_err("Cannot communicate with DSP, ADSP is down\n"); else {
fastrpc_mmap_add(match); pr_err("Cannot communicate with DSP, ADSP is down\n");
fastrpc_mmap_add(match);
}
} }
} }
if (err)
goto bail;
memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh)); memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh));
ramdump_segments_rh.da = match->phys; ramdump_segments_rh.da = match->phys;
ramdump_segments_rh.va = (void *)page_address((struct page *)match->va); ramdump_segments_rh.va = (void *)page_address((struct page *)match->va);
@@ -4979,14 +4986,20 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
pr_err("adsprpc: %s: unable to dump heap (err %d)\n", pr_err("adsprpc: %s: unable to dump heap (err %d)\n",
__func__, ret); __func__, ret);
} }
if (!locked) if (!match->is_persistent) {
mutex_lock(&fl->map_mutex); if (!locked)
fastrpc_mmap_free(match, 0); mutex_lock(&fl->map_mutex);
if (!locked) fastrpc_mmap_free(match, 0);
mutex_unlock(&fl->map_mutex); if (!locked)
mutex_unlock(&fl->map_mutex);
}
} }
} while (match); }
bail: bail:
if (lock) {
spin_unlock_irqrestore(&me->hlock, irq_flags);
lock = 0;
}
if (err && match) { if (err && match) {
if (!locked) if (!locked)
mutex_lock(&fl->map_mutex); mutex_lock(&fl->map_mutex);