qcacmn: Get rid of spinlock lro_unloading_lock

Spinlock "lro_unloading_lock" was required to synchronize the LRO
instance being deleted when there is LRO packet inflight.
With LRO moved to qdf and LRO is tied to hif_napi, LRO instance is
active with life time of the driver. So no need to protect via lock.

Change-Id: I06f7b43e80ddf0ce5e096351b38ce954eb634a95
CRs-Fixed: 2028318
This commit is contained in:
Manjunathappa Prakash
2017-10-23 23:09:14 -07:00
committed by snandini
parent 3cbcfa519f
commit 96713609aa
5 changed files with 0 additions and 13 deletions

View File

@@ -228,7 +228,6 @@ struct qca_napi_info {
/* will only be present for data rx CE's */
void (*lro_flush_cb)(void *);
qdf_lro_ctx_t lro_ctx;
qdf_spinlock_t lro_unloading_lock;
};
enum qca_napi_tput_state {

View File

@@ -151,7 +151,6 @@ struct CE_state {
bool htt_tx_data;
bool htt_rx_data;
qdf_lro_ctx_t lro_data;
qdf_spinlock_t lro_unloading_lock;
};
/* Descriptor rings must be aligned to this boundary */

View File

@@ -916,7 +916,6 @@ struct CE_handle *ce_init(struct hif_softc *scn,
CE_state->ctrl_addr = ctrl_addr;
CE_state->state = CE_RUNNING;
CE_state->attr_flags = attr->flags;
qdf_spinlock_create(&CE_state->lro_unloading_lock);
}
CE_state->scn = scn;
@@ -1299,8 +1298,6 @@ void ce_fini(struct CE_handle *copyeng)
CE_state->state = CE_UNUSED;
scn->ce_id_to_state[CE_id] = NULL;
qdf_spinlock_destroy(&CE_state->lro_unloading_lock);
qdf_lro_deinit(CE_state->lro_data);
if (CE_state->src_ring) {

View File

@@ -171,13 +171,10 @@ static void ce_tasklet(unsigned long data)
QDF_BUG(0);
}
qdf_spin_lock_bh(&CE_state->lro_unloading_lock);
ce_per_engine_service(scn, tasklet_entry->ce_id);
qdf_lro_flush(CE_state->lro_data);
qdf_spin_unlock_bh(&CE_state->lro_unloading_lock);
if (ce_check_rx_pending(CE_state)) {
/*
* There are frames pending, schedule tasklet to process them.

View File

@@ -159,7 +159,6 @@ int hif_napi_create(struct hif_opaque_softc *hif_ctx,
HIF_WARN("%s: bad IRQ value for CE %d: %d",
__func__, i, napii->irq);
qdf_spinlock_create(&napii->lro_unloading_lock);
init_dummy_netdev(&(napii->netdev));
NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)",
@@ -286,7 +285,6 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
napii->netdev.napi_list.prev,
napii->netdev.napi_list.next);
qdf_spinlock_destroy(&napii->lro_unloading_lock);
qdf_lro_deinit(napii->lro_ctx);
netif_napi_del(&(napii->napi));
@@ -769,14 +767,11 @@ int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
NAPI_POLL_ENTER, NULL, NULL, cpu);
qdf_spin_lock_bh(&napi_info->lro_unloading_lock);
rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
__func__, rc);
qdf_lro_flush(napi_info->lro_ctx);
qdf_spin_unlock_bh(&napi_info->lro_unloading_lock);
/* do not return 0, if there was some work done,
* even if it is below the scale