video: driver: fix race issues with msm_vidc_stats_handler api

There are 2 possible race issues with msm_vidc_stats_handler.

[1] msm_vidc_close acquired inst->lock and called cancel_delayed_work,
by that time stats_handler already fired and incremented inst->kref
via get_inst_ref. Close sequence releases the lock and called
put_inst().
So now inst strong ref is held by stats_handler, which will acquire
inst->lock and schedules new stats_work and does put_inst. inst->kref
count reaches zero and it will free inst struct(using close_helper).
So that will lead to use-after-free crash at core kernel side.

[2] msm_vidc_close acquired inst->lock and called cancel_delayed_work,
by that time stats_handler is scheduled. So process_one_work() from
workqueue is ready to call stats_handler api. But before it invokes
stats handler(context switch), msm_vidc_close sequence continued to
run and completed the close sequence and called put_inst. So inst
struct got freed up(because inst->kref count reached to zero). Now if
core kernel(workqueue) attempts to invoke stats_handler by calling
worker->current_func(work), will lead to again use-after-free crash.

Added changes to avoid above mentioned issues.

Change-Id: I55bc33a753f4dbae4a8cdc6373b5465d183da3bc
Signed-off-by: Govindaraj Rajagopal <grajagop@codeaurora.org>
This commit is contained in:
Govindaraj Rajagopal
2021-10-12 16:54:37 +05:30
parent b1c38f2133
commit 2eb5672463
3 changed files with 19 additions and 9 deletions

View File

@@ -353,7 +353,8 @@ void msm_vidc_update_stats(struct msm_vidc_inst *inst,
struct msm_vidc_buffer *buf, enum msm_vidc_debugfs_event etype); struct msm_vidc_buffer *buf, enum msm_vidc_debugfs_event etype);
void msm_vidc_stats_handler(struct work_struct *work); void msm_vidc_stats_handler(struct work_struct *work);
int schedule_stats_work(struct msm_vidc_inst *inst); int schedule_stats_work(struct msm_vidc_inst *inst);
int cancel_stats_work(struct msm_vidc_inst *inst); int cancel_stats_work_sync(struct msm_vidc_inst *inst);
void msm_vidc_print_stats(struct msm_vidc_inst *inst);
enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type,
const char *func); const char *func);
int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst,

View File

@@ -961,11 +961,13 @@ int msm_vidc_close(void *instance)
i_vpr_h(inst, "%s()\n", __func__); i_vpr_h(inst, "%s()\n", __func__);
inst_lock(inst, __func__); inst_lock(inst, __func__);
cancel_response_work(inst); cancel_response_work(inst);
cancel_stats_work(inst); /* print final stats */
msm_vidc_print_stats(inst);
msm_vidc_session_close(inst); msm_vidc_session_close(inst);
msm_vidc_remove_session(inst); msm_vidc_remove_session(inst);
msm_vidc_destroy_buffers(inst); msm_vidc_destroy_buffers(inst);
inst_unlock(inst, __func__); inst_unlock(inst, __func__);
cancel_stats_work_sync(inst);
msm_vidc_show_stats(inst); msm_vidc_show_stats(inst);
put_inst(inst); put_inst(inst);
msm_vidc_schedule_core_deinit(core); msm_vidc_schedule_core_deinit(core);

View File

@@ -2868,7 +2868,7 @@ void msm_vidc_update_stats(struct msm_vidc_inst *inst,
msm_vidc_debugfs_update(inst, etype); msm_vidc_debugfs_update(inst, etype);
} }
static void msm_vidc_print_stats(struct msm_vidc_inst *inst) void msm_vidc_print_stats(struct msm_vidc_inst *inst)
{ {
u32 frame_rate, operating_rate, achieved_fps, priority, etb, ebd, ftb, fbd, dt_ms; u32 frame_rate, operating_rate, achieved_fps, priority, etb, ebd, ftb, fbd, dt_ms;
u64 bitrate_kbps = 0, time_ms = ktime_get_ns() / 1000 / 1000; u64 bitrate_kbps = 0, time_ms = ktime_get_ns() / 1000 / 1000;
@@ -2909,6 +2909,16 @@ int schedule_stats_work(struct msm_vidc_inst *inst)
d_vpr_e("%s: invalid params\n", __func__); d_vpr_e("%s: invalid params\n", __func__);
return -EINVAL; return -EINVAL;
} }
/**
* Hfi session is already closed and inst also going to be
* closed soon. So skip scheduling new stats_work to avoid
* use-after-free issues with close sequence.
*/
if (!inst->packet) {
i_vpr_e(inst, "skip scheduling stats_work\n");
return 0;
}
core = inst->core; core = inst->core;
mod_delayed_work(inst->response_workq, &inst->stats_work, mod_delayed_work(inst->response_workq, &inst->stats_work,
msecs_to_jiffies(core->capabilities[STATS_TIMEOUT_MS].value)); msecs_to_jiffies(core->capabilities[STATS_TIMEOUT_MS].value));
@@ -2916,16 +2926,13 @@ int schedule_stats_work(struct msm_vidc_inst *inst)
return 0; return 0;
} }
int cancel_stats_work(struct msm_vidc_inst *inst) int cancel_stats_work_sync(struct msm_vidc_inst *inst)
{ {
if (!inst) { if (!inst) {
d_vpr_e("%s: Invalid arguments\n", __func__); d_vpr_e("%s: Invalid arguments\n", __func__);
return -EINVAL; return -EINVAL;
} }
cancel_delayed_work(&inst->stats_work); cancel_delayed_work_sync(&inst->stats_work);
/* print final stats */
msm_vidc_print_stats(inst);
return 0; return 0;
} }
@@ -2936,7 +2943,7 @@ void msm_vidc_stats_handler(struct work_struct *work)
inst = container_of(work, struct msm_vidc_inst, stats_work.work); inst = container_of(work, struct msm_vidc_inst, stats_work.work);
inst = get_inst_ref(g_core, inst); inst = get_inst_ref(g_core, inst);
if (!inst) { if (!inst || !inst->packet) {
d_vpr_e("%s: invalid params\n", __func__); d_vpr_e("%s: invalid params\n", __func__);
return; return;
} }