|
@@ -1876,8 +1876,9 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
|
|
|
}
|
|
|
|
|
|
INIT_HLIST_NODE(&ctx->hn);
|
|
|
- INIT_LIST_HEAD(&ctx->asyncn);
|
|
|
+ INIT_HLIST_NODE(&ctx->asyncn);
|
|
|
hlist_add_fake(&ctx->hn);
|
|
|
+ hlist_add_fake(&ctx->asyncn);
|
|
|
ctx->fl = fl;
|
|
|
ctx->maps = (struct fastrpc_mmap **)(&ctx[1]);
|
|
|
ctx->lpra = (remote_arg_t *)(&ctx->maps[bufs]);
|
|
@@ -2098,10 +2099,12 @@ static void fastrpc_queue_completed_async_job(struct smq_invoke_ctx *ctx)
|
|
|
spin_lock_irqsave(&fl->aqlock, flags);
|
|
|
if (ctx->is_early_wakeup)
|
|
|
goto bail;
|
|
|
- list_add_tail(&ctx->asyncn, &fl->clst.async_queue);
|
|
|
- atomic_add(1, &fl->async_queue_job_count);
|
|
|
- ctx->is_early_wakeup = true;
|
|
|
- wake_up_interruptible(&fl->async_wait_queue);
|
|
|
+ if (!hlist_unhashed(&ctx->asyncn)) {
|
|
|
+ hlist_add_head(&ctx->asyncn, &fl->clst.async_queue);
|
|
|
+ atomic_add(1, &fl->async_queue_job_count);
|
|
|
+ ctx->is_early_wakeup = true;
|
|
|
+ wake_up_interruptible(&fl->async_wait_queue);
|
|
|
+ }
|
|
|
bail:
|
|
|
spin_unlock_irqrestore(&fl->aqlock, flags);
|
|
|
}
|
|
@@ -2355,7 +2358,7 @@ static void context_list_ctor(struct fastrpc_ctx_lst *me)
|
|
|
INIT_HLIST_HEAD(&me->interrupted);
|
|
|
INIT_HLIST_HEAD(&me->pending);
|
|
|
me->num_active_ctxs = 0;
|
|
|
- INIT_LIST_HEAD(&me->async_queue);
|
|
|
+ INIT_HLIST_HEAD(&me->async_queue);
|
|
|
INIT_LIST_HEAD(&me->notif_queue);
|
|
|
}
|
|
|
|
|
@@ -3502,10 +3505,11 @@ static int fastrpc_wait_on_async_queue(
|
|
|
struct fastrpc_file *fl)
|
|
|
{
|
|
|
int err = 0, ierr = 0, interrupted = 0, perfErr = 0;
|
|
|
- struct smq_invoke_ctx *ctx = NULL, *ictx = NULL, *n = NULL;
|
|
|
+ struct smq_invoke_ctx *ctx = NULL, *ictx = NULL;
|
|
|
unsigned long flags;
|
|
|
uint64_t *perf_counter = NULL;
|
|
|
bool isworkdone = false;
|
|
|
+ struct hlist_node *n;
|
|
|
|
|
|
read_async_job:
|
|
|
interrupted = wait_event_interruptible(fl->async_wait_queue,
|
|
@@ -3523,8 +3527,8 @@ read_async_job:
|
|
|
goto bail;
|
|
|
|
|
|
spin_lock_irqsave(&fl->aqlock, flags);
|
|
|
- list_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) {
|
|
|
- list_del_init(&ictx->asyncn);
|
|
|
+ hlist_for_each_entry_safe(ictx, n, &fl->clst.async_queue, asyncn) {
|
|
|
+ hlist_del_init(&ictx->asyncn);
|
|
|
atomic_sub(1, &fl->async_queue_job_count);
|
|
|
ctx = ictx;
|
|
|
break;
|
|
@@ -4019,12 +4023,9 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl,
|
|
|
struct smq_phy_page pages[PAGESLEN_WITH_SHAREDBUF];
|
|
|
struct fastrpc_mmap *file = NULL;
|
|
|
struct fastrpc_buf *imem = NULL;
|
|
|
- unsigned long imem_dma_attr = 0, irq_flags = 0;
|
|
|
+ unsigned long imem_dma_attr = 0;
|
|
|
remote_arg_t ra[6];
|
|
|
int fds[6];
|
|
|
- struct fastrpc_apps *me = &gfa;
|
|
|
- struct hlist_node *n = NULL;
|
|
|
- struct fastrpc_file *fl_curr = NULL;
|
|
|
unsigned int gid = 0, one_mb = 1024*1024;
|
|
|
unsigned int dsp_userpd_memlen = 0;
|
|
|
struct fastrpc_buf *init_mem;
|
|
@@ -4072,20 +4073,6 @@ static int fastrpc_init_create_dynamic_process(struct fastrpc_file *fl,
|
|
|
if (uproc->attrs & FASTRPC_MODE_UNSIGNED_MODULE)
|
|
|
fl->is_unsigned_pd = true;
|
|
|
|
|
|
- /* Validate that any existing sessions of process are of same pd type */
|
|
|
- spin_lock_irqsave(&me->hlock, irq_flags);
|
|
|
- hlist_for_each_entry_safe(fl_curr, n, &me->drivers, hn) {
|
|
|
- if ((fl != fl_curr) && (fl->tgid == fl_curr->tgid) && (fl->cid == fl_curr->cid)) {
|
|
|
- err = (fl->is_unsigned_pd != fl_curr->is_unsigned_pd) ? -ECONNREFUSED : 0;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&me->hlock, irq_flags);
|
|
|
- if (err) {
|
|
|
- ADSPRPC_ERR("existing session pd type %u not same as requested pd type %u \n",
|
|
|
- fl_curr->is_unsigned_pd, fl->is_unsigned_pd);
|
|
|
- goto bail;
|
|
|
- }
|
|
|
/* Check if file memory passed by userspace is valid */
|
|
|
VERIFY(err, access_ok((void __user *)init->file, init->filelen));
|
|
|
if (err)
|