Преглед на файлове

qcacmn: Handle race condition in ce interrupt handler

Fix race condition in dispatch interrupt when CE0 interrupt is called
from irq context in one core while it is called from user context in
ring full condition. This case has to be handled using locks since
calling paralelly from multiple contextx is leading to interrupt
getting disabled and tasklet not getting scheduled. This is leading
to interrupt being in disabled state. Also change th diff_time variable
signed integer to handle a case where taklet is scheduled just after
the workaround kicks in leading to diff_time becoming negative.

Change-Id: I3894ef90ca48f23404bc9529c4b1623841698293
CRs-Fixed: 3297372
Nandha Kishore Easwaran преди 2 години
родител
ревизия
3e59523895
променени са 3 файла, в които са добавени 71 реда и са изтрити 3 реда
  1. 4 1
      hif/src/ce/ce_internal.h
  2. 7 1
      hif/src/ce/ce_main.c
  3. 60 1
      hif/src/ce/ce_tasklet.c

+ 4 - 1
hif/src/ce/ce_internal.h

@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -135,6 +135,9 @@ struct CE_state {
 	atomic_t rx_pending;
 
 	qdf_spinlock_t ce_index_lock;
+#ifdef CE_TASKLET_SCHEDULE_ON_FULL
+	qdf_spinlock_t ce_interrupt_lock;
+#endif
 	/* Flag to indicate whether to break out the DPC context */
 	bool force_break;
 

+ 7 - 1
hif/src/ce/ce_main.c

@@ -2284,6 +2284,9 @@ struct CE_handle *ce_init(struct hif_softc *scn,
 
 		malloc_CE_state = true;
 		qdf_spinlock_create(&CE_state->ce_index_lock);
+#ifdef CE_TASKLET_SCHEDULE_ON_FULL
+		qdf_spinlock_create(&CE_state->ce_interrupt_lock);
+#endif
 
 		CE_state->id = CE_id;
 		CE_state->ctrl_addr = ctrl_addr;
@@ -2757,6 +2760,9 @@ void ce_fini(struct CE_handle *copyeng)
 	ce_deinit_ce_desc_event_log(scn, CE_id);
 
 	qdf_spinlock_destroy(&CE_state->ce_index_lock);
+#ifdef CE_TASKLET_SCHEDULE_ON_FULL
+	qdf_spinlock_destroy(&CE_state->ce_interrupt_lock);
+#endif
 	qdf_mem_free(CE_state);
 }
 
@@ -2902,7 +2908,7 @@ void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
 {
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
-	uint64_t diff_time = qdf_get_log_timestamp_usecs() -
+	int64_t diff_time = qdf_get_log_timestamp_usecs() -
 			hif_state->stats.tasklet_sched_entry_ts[pipe];
 
 	hif_state->stats.ce_ring_full_count[pipe]++;

+ 60 - 1
hif/src/ce/ce_tasklet.c

@@ -812,6 +812,53 @@ int hif_drain_fw_diag_ce(struct hif_softc *scn)
 	return ce_poll_reap_by_id(scn, ce_id);
 }
 
+#ifdef CE_TASKLET_SCHEDULE_ON_FULL
+static inline int ce_check_tasklet_status(int ce_id,
+					  struct ce_tasklet_entry *entry)
+{
+	struct HIF_CE_state *hif_ce_state = entry->hif_ce_state;
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
+	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
+
+	if (hif_napi_enabled(hif_hdl, ce_id)) {
+		struct qca_napi_info *napi;
+
+		napi = scn->napi_data.napis[ce_id];
+		if (test_bit(NAPI_STATE_SCHED, &napi->napi.state))
+			return -EBUSY;
+	} else {
+		if (test_bit(TASKLET_STATE_SCHED,
+			     &hif_ce_state->tasklets[ce_id].intr_tq.state))
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static inline void ce_interrupt_lock(struct CE_state *ce_state)
+{
+	qdf_spin_lock_irqsave(&ce_state->ce_interrupt_lock);
+}
+
+static inline void ce_interrupt_unlock(struct CE_state *ce_state)
+{
+	qdf_spin_unlock_irqrestore(&ce_state->ce_interrupt_lock);
+}
+#else
+static inline int ce_check_tasklet_status(int ce_id,
+					  struct ce_tasklet_entry *entry)
+{
+	return 0;
+}
+
+static inline void ce_interrupt_lock(struct CE_state *ce_state)
+{
+}
+
+static inline void ce_interrupt_unlock(struct CE_state *ce_state)
+{
+}
+#endif
+
 /**
  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
  * @ce_id: ce_id
@@ -825,6 +872,7 @@ irqreturn_t ce_dispatch_interrupt(int ce_id,
 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
+	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
 
 	if (tasklet_entry->ce_id != ce_id) {
 		bool rl;
@@ -845,10 +893,18 @@ irqreturn_t ce_dispatch_interrupt(int ce_id,
 		return IRQ_NONE;
 	}
 
+	ce_interrupt_lock(ce_state);
+	if (ce_check_tasklet_status(ce_id, tasklet_entry)) {
+		ce_interrupt_unlock(ce_state);
+		return IRQ_NONE;
+	}
+
 	hif_irq_disable(scn, ce_id);
 
-	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
+	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
+		ce_interrupt_unlock(ce_state);
 		return IRQ_HANDLED;
+	}
 
 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
 				NULL, NULL, 0, 0);
@@ -857,6 +913,7 @@ irqreturn_t ce_dispatch_interrupt(int ce_id,
 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
 		hif_ut_fw_resume(scn);
 		hif_irq_enable(scn, ce_id);
+		ce_interrupt_unlock(ce_state);
 		return IRQ_HANDLED;
 	}
 
@@ -867,6 +924,8 @@ irqreturn_t ce_dispatch_interrupt(int ce_id,
 	else
 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
 
+	ce_interrupt_unlock(ce_state);
+
 	return IRQ_HANDLED;
 }