Potential use of freed ctx in async invoke

After message is sent to DSP, async response thread
could immediately get the response and free context,
which will result in a use-after-free in invoke send.
To fix this, add local copy of ctx to trace and gmsg
logging. To fix async response and SSR race, we rely
on is_job_sent_to_remote_ss of ctx, now check valid
ctx from ctxtable to set is_job_sent_to_remote_ss.

Change-Id: I1ebbed61443beda7b5ffcbe858481a54cca96acb
Signed-off-by: nishant chaubey <quic_chaubey@quicinc.com>
This commit is contained in:
nishant chaubey
2023-05-18 14:31:58 +05:30
committed by Gerrit - the friendly Code Review server
parent 37d9c2e46e
commit 20d4dc0060

View File

@@ -2825,6 +2825,7 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
{ {
struct smq_msg *msg = &ctx->msg; struct smq_msg *msg = &ctx->msg;
struct smq_msg msg_temp; struct smq_msg msg_temp;
struct smq_invoke_ctx ctx_temp;
struct fastrpc_file *fl = ctx->fl; struct fastrpc_file *fl = ctx->fl;
struct fastrpc_channel_ctx *channel_ctx = NULL; struct fastrpc_channel_ctx *channel_ctx = NULL;
int err = 0, cid = -1; int err = 0, cid = -1;
@@ -2832,6 +2833,8 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
int64_t ns = 0; int64_t ns = 0;
uint64_t xo_time_in_us = 0; uint64_t xo_time_in_us = 0;
int isasync = (ctx->asyncjob.isasyncjob ? true : false); int isasync = (ctx->asyncjob.isasyncjob ? true : false);
unsigned long irq_flags = 0;
uint32_t index = 0;
if (!fl) { if (!fl) {
err = -EBADF; err = -EBADF;
@@ -2870,16 +2873,27 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
/* /*
* After message is sent to DSP, async response thread could immediately * After message is sent to DSP, async response thread could immediately
* get the response and free context, which will result in a use-after-free * get the response and free context, which will result in a use-after-free
* in this function. So use a local variable for message. * in this function. So use a local variable for message and context.
*/ */
memcpy(&msg_temp, msg, sizeof(struct smq_msg)); memcpy(&msg_temp, msg, sizeof(struct smq_msg));
msg = &msg_temp; msg = &msg_temp;
memcpy(&ctx_temp, ctx, sizeof(struct smq_invoke_ctx));
index = (uint32_t)GET_TABLE_IDX_FROM_CTXID(ctx->ctxid);
} }
err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->tvm_remote_domain); err = fastrpc_transport_send(cid, (void *)msg, sizeof(*msg), fl->tvm_remote_domain);
if (isasync && !err) { if (isasync) {
spin_lock(&fl->hlock); if (!err) {
ctx->is_job_sent_to_remote_ss = true; /*
spin_unlock(&fl->hlock); * Validate the ctx as this could have been already
* freed by async response.
*/
spin_lock_irqsave(&channel_ctx->ctxlock, irq_flags);
if (index < FASTRPC_CTX_MAX && channel_ctx->ctxtable[index] == ctx)
ctx->is_job_sent_to_remote_ss = true;
spin_unlock_irqrestore(&channel_ctx->ctxlock, irq_flags);
}
ctx = &ctx_temp;
} }
trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx, trace_fastrpc_transport_send(cid, (uint64_t)ctx, msg->invoke.header.ctx,
handle, sc, msg->invoke.page.addr, msg->invoke.page.size); handle, sc, msg->invoke.page.addr, msg->invoke.page.size);