msm: adsprpc: Handle UAF in fastrpc_mmap_remove_ssr
Currently unlocking the spinlock during maps list iteration can lead to use after free. Fix is to lock, read one map from list, stop iteration and unlock, repeate same for all the maps complete in the list. Acked-by: Ramesh Nallagopu <rnallago@qti.qualcomm.com> Change-Id: I834bdcb9dd55a33f6308188ec1f844b7d81cb30e Signed-off-by: Ansa Ahmed <quic_ansa@quicinc.com>
This commit is contained in:
104
dsp/adsprpc.c
104
dsp/adsprpc.c
@@ -5193,44 +5193,20 @@ bail:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
|
||||
static int fastrpc_mmap_dump(struct fastrpc_mmap *map, struct fastrpc_file *fl, int locked)
|
||||
{
|
||||
struct fastrpc_mmap *match = NULL, *map = NULL;
|
||||
struct hlist_node *n = NULL;
|
||||
int err = 0, ret = 0, lock = 0;
|
||||
struct fastrpc_mmap *match = map;
|
||||
int err = 0, ret = 0;
|
||||
struct fastrpc_apps *me = &gfa;
|
||||
struct qcom_dump_segment ramdump_segments_rh;
|
||||
struct list_head head;
|
||||
unsigned long irq_flags = 0;
|
||||
|
||||
INIT_LIST_HEAD(&head);
|
||||
if (fl) {
|
||||
VERIFY(err, fl->cid == RH_CID);
|
||||
if (err) {
|
||||
err = -EBADR;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(&me->hlock, irq_flags);
|
||||
lock = 1;
|
||||
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
|
||||
if (!lock) {
|
||||
spin_lock_irqsave(&me->hlock, irq_flags);
|
||||
lock = 1;
|
||||
}
|
||||
/* In hibernation suspend case fl is NULL, check !fl to cleanup */
|
||||
if (!fl || (fl && map->servloc_name && fl->servloc_name
|
||||
&& !strcmp(map->servloc_name, fl->servloc_name))) {
|
||||
match = map;
|
||||
if (map->is_persistent && map->in_use) {
|
||||
struct secure_vm *rhvm = &me->channel[RH_CID].rhvm;
|
||||
uint64_t phys = map->phys;
|
||||
size_t size = map->size;
|
||||
|
||||
if (lock) {
|
||||
spin_unlock_irqrestore(&me->hlock, irq_flags);
|
||||
lock = 0;
|
||||
}
|
||||
//scm assign it back to HLOS
|
||||
if (rhvm->vmid) {
|
||||
u64 src_perms = 0;
|
||||
@@ -5251,28 +5227,17 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
|
||||
"rh hyp unassign failed with %d for phys 0x%llx, size %zu\n",
|
||||
err, phys, size);
|
||||
err = -EADDRNOTAVAIL;
|
||||
goto bail;
|
||||
return err;
|
||||
}
|
||||
if (!lock) {
|
||||
spin_lock_irqsave(&me->hlock, irq_flags);
|
||||
lock = 1;
|
||||
}
|
||||
map->in_use = false;
|
||||
/*
|
||||
* decrementing refcount for persistent mappings
|
||||
* as incrementing it in fastrpc_get_persistent_map
|
||||
*/
|
||||
map->refs--;
|
||||
}
|
||||
if (!match->is_persistent)
|
||||
hlist_del_init(&map->hn);
|
||||
}
|
||||
if (lock) {
|
||||
spin_unlock_irqrestore(&me->hlock, irq_flags);
|
||||
lock = 0;
|
||||
}
|
||||
|
||||
if (match) {
|
||||
if (!match->is_persistent) {
|
||||
if (match->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
|
||||
err = fastrpc_munmap_rh(match->phys,
|
||||
@@ -5286,6 +5251,8 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
|
||||
fastrpc_mmap_add(match);
|
||||
}
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
memset(&ramdump_segments_rh, 0, sizeof(ramdump_segments_rh));
|
||||
ramdump_segments_rh.da = match->phys;
|
||||
@@ -5300,26 +5267,69 @@ static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
|
||||
__func__, ret);
|
||||
}
|
||||
if (!match->is_persistent) {
|
||||
if (!locked)
|
||||
if (!locked && fl)
|
||||
mutex_lock(&fl->map_mutex);
|
||||
fastrpc_mmap_free(match, 0);
|
||||
if (!locked)
|
||||
if (!locked && fl)
|
||||
mutex_unlock(&fl->map_mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fastrpc_mmap_remove_ssr(struct fastrpc_file *fl, int locked)
|
||||
{
|
||||
struct fastrpc_mmap *match = NULL, *map = NULL;
|
||||
struct hlist_node *n = NULL;
|
||||
int err = 0;
|
||||
struct fastrpc_apps *me = &gfa;
|
||||
struct list_head head;
|
||||
unsigned long irq_flags = 0;
|
||||
|
||||
INIT_LIST_HEAD(&head);
|
||||
if (fl) {
|
||||
VERIFY(err, fl->cid == RH_CID);
|
||||
if (err) {
|
||||
err = -EBADR;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
||||
do {
|
||||
match = NULL;
|
||||
spin_lock_irqsave(&me->hlock, irq_flags);
|
||||
|
||||
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
|
||||
if (!map->is_dumped && (!fl ||
|
||||
(fl && map->servloc_name && fl->servloc_name &&
|
||||
!strcmp(map->servloc_name, fl->servloc_name)))) {
|
||||
map->is_dumped = true;
|
||||
match = map;
|
||||
if (!match->is_persistent)
|
||||
hlist_del_init(&map->hn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
bail:
|
||||
if (lock) {
|
||||
spin_unlock_irqrestore(&me->hlock, irq_flags);
|
||||
lock = 0;
|
||||
}
|
||||
if (match)
|
||||
err = fastrpc_mmap_dump(match, fl, locked);
|
||||
} while (match && !err);
|
||||
|
||||
bail:
|
||||
if (err && match) {
|
||||
if (!locked)
|
||||
if (!locked && fl)
|
||||
mutex_lock(&fl->map_mutex);
|
||||
fastrpc_mmap_add(match);
|
||||
if (!locked)
|
||||
if (!locked && fl)
|
||||
mutex_unlock(&fl->map_mutex);
|
||||
}
|
||||
spin_lock_irqsave(&me->hlock, irq_flags);
|
||||
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
|
||||
if (map->is_dumped && ((!fl && map->servloc_name) ||
|
||||
(fl && map->servloc_name && fl->servloc_name &&
|
||||
!strcmp(map->servloc_name, fl->servloc_name))))
|
||||
map->is_dumped = false;
|
||||
}
|
||||
spin_unlock_irqrestore(&me->hlock, irq_flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -785,6 +785,7 @@ struct fastrpc_mmap {
|
||||
struct timespec64 map_end_time;
|
||||
/* Mapping for fastrpc shell */
|
||||
bool is_filemap;
|
||||
bool is_dumped; /* flag to indicate map is dumped during SSR */
|
||||
char *servloc_name; /* Indicate which daemon mapped this */
|
||||
/* Indicates map is being used by a pending RPC call */
|
||||
unsigned int ctx_refs;
|
||||
|
Reference in New Issue
Block a user