|
@@ -750,17 +750,14 @@ inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
|
|
* @scn: hif context
|
|
* @scn: hif context
|
|
* @ce_id: index of napi instance
|
|
* @ce_id: index of napi instance
|
|
*
|
|
*
|
|
- * Return: void
|
|
|
|
|
|
+ * Return: false if napi didn't enable or already scheduled, otherwise true
|
|
*/
|
|
*/
|
|
-int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
|
|
|
|
|
|
+bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
|
|
{
|
|
{
|
|
int cpu = smp_processor_id();
|
|
int cpu = smp_processor_id();
|
|
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
|
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
|
|
struct qca_napi_info *napii;
|
|
struct qca_napi_info *napii;
|
|
|
|
|
|
- hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
|
|
|
|
- NULL, NULL, 0, 0);
|
|
|
|
-
|
|
|
|
napii = scn->napi_data.napis[ce_id];
|
|
napii = scn->napi_data.napis[ce_id];
|
|
if (qdf_unlikely(!napii)) {
|
|
if (qdf_unlikely(!napii)) {
|
|
HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
|
|
HIF_ERROR("%s, scheduling unallocated napi (ce:%d)",
|
|
@@ -769,6 +766,14 @@ int hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) {
|
|
|
|
+ NAPI_DEBUG("napi scheduled, return");
|
|
|
|
+ qdf_atomic_dec(&scn->active_tasklet_cnt);
|
|
|
|
+ return false;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ hif_record_ce_desc_event(scn, ce_id, NAPI_SCHEDULE,
|
|
|
|
+ NULL, NULL, 0, 0);
|
|
napii->stats[cpu].napi_schedules++;
|
|
napii->stats[cpu].napi_schedules++;
|
|
NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
|
|
NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
|
|
napi_schedule(&(napii->napi));
|
|
napi_schedule(&(napii->napi));
|