Ver código fonte

qcacmn: Block non-wmi traffic in ce layer durring suspend

Upper layers are failing to block disallowed traffic durring suspend.
Hif can enqueue the messages on the ce rings without letting hardware
know they are there.  When fw is ready to recieve non wmi packets,
hif can inform hw of the queued packets.

Change-Id: I32810d9548416021c1da4f48a4ac539f75f1d907
CRs-Fixed: 2003582
Houston Hoffman 8 anos atrás
pai
commit
cbcd8397ea

+ 11 - 0
hif/inc/hif.h

@@ -718,6 +718,17 @@ enum ipa_hw_type hif_get_ipa_hw_type(void)
 }
 #endif
 int hif_bus_resume(struct hif_opaque_softc *);
+/**
+ * hif_bus_ealry_suspend() - stop non wmi tx traffic
+ * @context: hif context
+ */
+int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
+
+/**
+ * hif_bus_late_resume() - resume non wmi traffic
+ * @context: hif context
+ */
+int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
 int hif_bus_suspend(struct hif_opaque_softc *);
 int hif_bus_resume_noirq(struct hif_opaque_softc *);
 int hif_bus_suspend_noirq(struct hif_opaque_softc *);

+ 3 - 0
hif/src/ce/ce_api.h

@@ -549,4 +549,7 @@ struct ce_ops {
 			    int *num_shadow_registers_configured);
 
 };
+
+int hif_ce_bus_early_suspend(struct hif_softc *scn);
+int hif_ce_bus_late_resume(struct hif_softc *scn);
 #endif /* __COPY_ENGINE_API_H__ */

+ 8 - 1
hif/src/ce/ce_internal.h

@@ -34,6 +34,7 @@ enum CE_op_state {
 	CE_UNUSED,
 	CE_PAUSED,
 	CE_RUNNING,
+	CE_PENDING,
 };
 
 enum ol_ath_hif_ce_ecodes {
@@ -389,6 +390,8 @@ union ce_desc {
  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
+ * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
+ *	index in a normal tx
  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
  *	of the RX ring in fastpath
@@ -396,9 +399,10 @@ union ce_desc {
  *	index of the RX ring in fastpath
  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
  *	of the TX ring in fastpath
+ * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recored when dropping a write to
+ *	the wirte index in fastpath
  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
  *	index of the RX ring in fastpath
- *
  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
@@ -415,11 +419,14 @@ enum hif_ce_event_type {
 	HIF_RX_DESC_COMPLETION,
 	HIF_TX_GATHER_DESC_POST,
 	HIF_TX_DESC_POST,
+	HIF_TX_DESC_SOFTWARE_POST,
 	HIF_TX_DESC_COMPLETION,
 	FAST_RX_WRITE_INDEX_UPDATE,
 	FAST_RX_SOFTWARE_INDEX_UPDATE,
 	FAST_TX_WRITE_INDEX_UPDATE,
+	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
 	FAST_TX_SOFTWARE_INDEX_UPDATE,
+	RESUME_WRITE_INDEX_UPDATE,
 
 	HIF_IRQ_EVENT = 0x10,
 	HIF_CE_TASKLET_ENTRY,

+ 62 - 0
hif/src/ce/ce_main.c

@@ -777,6 +777,68 @@ static void ce_ring_setup(struct hif_softc *scn, uint8_t ring_type,
 	hif_state->ce_services->ce_ring_setup(scn, ring_type, ce_id, ring, attr);
 }
 
+int hif_ce_bus_early_suspend(struct hif_softc *scn)
+{
+	uint8_t ul_pipe, dl_pipe;
+	int ce_id, status, ul_is_polled, dl_is_polled;
+	struct CE_state *ce_state;
+	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
+					 &ul_pipe, &dl_pipe,
+					 &ul_is_polled, &dl_is_polled);
+	if (status) {
+		HIF_ERROR("%s: pipe_mapping failure", __func__);
+		return status;
+	}
+
+	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
+		if (ce_id == ul_pipe)
+			continue;
+		if (ce_id == dl_pipe)
+			continue;
+
+		ce_state = scn->ce_id_to_state[ce_id];
+		qdf_spin_lock_bh(&ce_state->ce_index_lock);
+		if (ce_state->state == CE_RUNNING)
+			ce_state->state = CE_PAUSED;
+		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
+	}
+
+	return status;
+}
+
+int hif_ce_bus_late_resume(struct hif_softc *scn)
+{
+	int ce_id;
+	struct CE_state *ce_state;
+	int write_index;
+	bool index_updated;
+
+	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
+		ce_state = scn->ce_id_to_state[ce_id];
+		qdf_spin_lock_bh(&ce_state->ce_index_lock);
+		if (ce_state->state == CE_PENDING) {
+			write_index = ce_state->src_ring->write_index;
+			CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
+					write_index);
+			ce_state->state = CE_RUNNING;
+			index_updated = true;
+		} else {
+			index_updated = false;
+		}
+
+		if (ce_state->state == CE_PAUSED)
+			ce_state->state = CE_RUNNING;
+		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
+
+		if (index_updated)
+			hif_record_ce_desc_event(scn, ce_id,
+				RESUME_WRITE_INDEX_UPDATE,
+				NULL, NULL, write_index);
+	}
+
+	return 0;
+}
+
 /*
  * Initialize a Copy Engine based on caller-supplied attributes.
  * This may be called once to initialize both source and destination

+ 16 - 12
hif/src/ce/ce_service.c

@@ -352,7 +352,7 @@ ce_send_nolock_legacy(struct CE_handle *copyeng,
 		return QDF_STATUS_E_FAILURE;
 	}
 	{
-		enum hif_ce_event_type event_type = HIF_TX_GATHER_DESC_POST;
+		enum hif_ce_event_type event_type;
 		struct CE_src_desc *src_ring_base =
 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
 		struct CE_src_desc *shadow_base =
@@ -399,7 +399,12 @@ ce_send_nolock_legacy(struct CE_handle *copyeng,
 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
 
 		/* WORKAROUND */
-		if (!shadow_src_desc->gather) {
+		if (shadow_src_desc->gather) {
+			event_type = HIF_TX_GATHER_DESC_POST;
+		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
+			event_type = HIF_TX_DESC_SOFTWARE_POST;
+			CE_state->state = CE_PENDING;
+		} else {
 			event_type = HIF_TX_DESC_POST;
 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
 						      write_index);
@@ -612,6 +617,7 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
 	unsigned int frag_len;
 	uint64_t dma_addr;
 	uint32_t user_flags;
+	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
 
 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
 	Q_TARGET_ACCESS_BEGIN(scn);
@@ -720,18 +726,16 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
 	src_ring->write_index = write_index;
 
 	if (hif_pm_runtime_get(hif_hdl) == 0) {
-		hif_record_ce_desc_event(scn, ce_state->id,
-					 FAST_TX_WRITE_INDEX_UPDATE,
-					 NULL, NULL, write_index);
-
-		/* Don't call WAR_XXX from here
-		 * Just call XXX instead, that has the reqd. intel
-		 */
-		war_ce_src_ring_write_idx_set(scn, ctrl_addr,
+		if (qdf_likely(ce_state->state == CE_RUNNING)) {
+			type = FAST_TX_WRITE_INDEX_UPDATE;
+			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
 				write_index);
+		} else
+			ce_state->state = CE_PENDING;
 		hif_pm_runtime_put(hif_hdl);
 	}
-
+	hif_record_ce_desc_event(scn, ce_state->id, type,
+				 NULL, NULL, write_index);
 
 	Q_TARGET_ACCESS_END(scn);
 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
@@ -2401,7 +2405,7 @@ void ce_ipa_get_resource(struct CE_handle *ce,
 	qdf_dma_addr_t phy_mem_base;
 	struct hif_softc *scn = CE_state->scn;
 
-	if (CE_RUNNING != CE_state->state) {
+	if (CE_UNUSED == CE_state->state) {
 		*ce_sr_base_paddr = 0;
 		*ce_sr_ring_size = 0;
 		return;

+ 14 - 0
hif/src/dispatcher/multibus.c

@@ -62,6 +62,8 @@ static void hif_intialize_default_ops(struct hif_softc *hif_sc)
 	bus_ops->hif_bus_reset_resume = &hif_dummy_bus_reset_resume;
 	bus_ops->hif_bus_suspend_noirq = &hif_dummy_bus_suspend_noirq;
 	bus_ops->hif_bus_resume_noirq = &hif_dummy_bus_resume_noirq;
+	bus_ops->hif_bus_early_suspend = &hif_dummy_bus_suspend;
+	bus_ops->hif_bus_late_resume = &hif_dummy_bus_resume;
 	bus_ops->hif_grp_irq_disable = &hif_dummy_grp_irq_disable;
 	bus_ops->hif_grp_irq_enable = &hif_dummy_grp_irq_enable;
 }
@@ -191,6 +193,18 @@ void hif_reset_soc(struct hif_opaque_softc *hif_ctx)
 	hif_sc->bus_ops.hif_reset_soc(hif_sc);
 }
 
+int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx);
+	return hif_sc->bus_ops.hif_bus_early_suspend(hif_sc);
+}
+
+int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx)
+{
+	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx);
+	return hif_sc->bus_ops.hif_bus_late_resume(hif_sc);
+}
+
 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx)
 {
 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_ctx);

+ 2 - 0
hif/src/dispatcher/multibus.h

@@ -40,6 +40,8 @@ struct hif_bus_ops {
 	void (*hif_bus_close)(struct hif_softc *hif_sc);
 	void (*hif_bus_prevent_linkdown)(struct hif_softc *hif_sc, bool flag);
 	void (*hif_reset_soc)(struct hif_softc *hif_sc);
+	int (*hif_bus_early_suspend)(struct hif_softc *hif_ctx);
+	int (*hif_bus_late_resume)(struct hif_softc *hif_ctx);
 	int (*hif_bus_suspend)(struct hif_softc *hif_ctx);
 	int (*hif_bus_resume)(struct hif_softc *hif_ctx);
 	int (*hif_bus_suspend_noirq)(struct hif_softc *hif_ctx);

+ 2 - 0
hif/src/dispatcher/multibus_snoc.c

@@ -45,6 +45,8 @@ QDF_STATUS hif_initialize_snoc_ops(struct hif_bus_ops *bus_ops)
 	bus_ops->hif_bus_close = &hif_snoc_close;
 	bus_ops->hif_bus_prevent_linkdown = &hif_dummy_bus_prevent_linkdown;
 	bus_ops->hif_reset_soc = &hif_dummy_reset_soc;
+	bus_ops->hif_bus_early_suspend = &hif_ce_bus_early_suspend;
+	bus_ops->hif_bus_late_resume = &hif_ce_bus_late_resume;
 	bus_ops->hif_bus_suspend = &hif_snoc_bus_suspend;
 	bus_ops->hif_bus_resume = &hif_snoc_bus_resume;
 	bus_ops->hif_bus_suspend_noirq = &hif_snoc_bus_suspend_noirq;