diff --git a/dp/wifi3.0/dp_htt.c b/dp/wifi3.0/dp_htt.c index c2bad7d5c7..349aca11f4 100644 --- a/dp/wifi3.0/dp_htt.c +++ b/dp/wifi3.0/dp_htt.c @@ -3765,7 +3765,8 @@ QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev, dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg), qdf_nbuf_len(msg), soc->htc_endpoint, - 1); /* tag - not relevant here */ + /* tag for FW response msg not guaranteed */ + HTC_TX_PACKET_TAG_RUNTIME_PUT); SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg); DP_HTT_SEND_HTC_PKT(soc, pkt, HTT_H2T_MSG_TYPE_EXT_STATS_REQ, diff --git a/dp/wifi3.0/dp_stats.c b/dp/wifi3.0/dp_stats.c index d33d9e424f..f9880e136f 100644 --- a/dp/wifi3.0/dp_stats.c +++ b/dp/wifi3.0/dp_stats.c @@ -4587,6 +4587,9 @@ dp_print_ring_stats(struct dp_pdev *pdev) uint32_t i; int mac_id; + if (hif_pm_runtime_get_sync(pdev->soc->hif_handle)) + return; + dp_print_ring_stat_from_hal(pdev->soc, &pdev->soc->reo_exception_ring, REO_EXCEPTION); @@ -4645,6 +4648,8 @@ dp_print_ring_stats(struct dp_pdev *pdev) dp_print_ring_stat_from_hal(pdev->soc, &pdev->rxdma_err_dst_ring[i], RXDMA_DST); + + hif_pm_runtime_put(pdev->soc->hif_handle); } /** diff --git a/hif/inc/hif.h b/hif/inc/hif.h index bb4b1ea302..de46bd4600 100644 --- a/hif/inc/hif.h +++ b/hif/inc/hif.h @@ -844,6 +844,7 @@ void hif_clear_stats(struct hif_opaque_softc *hif_ctx); #ifdef FEATURE_RUNTIME_PM struct hif_pm_runtime_lock; void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx); +int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx); int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx); int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx); void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx); @@ -867,6 +868,8 @@ struct hif_pm_runtime_lock { const char *name; }; static inline void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {} +static inline int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx) +{ return 0; } static inline int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) { return 0; } diff --git a/hif/src/pcie/if_pci.c b/hif/src/pcie/if_pci.c index 2bf4d788cd..c7c1a63cfa 100644 --- a/hif/src/pcie/if_pci.c +++ b/hif/src/pcie/if_pci.c @@ -3867,6 +3867,45 @@ void hif_pci_irq_disable(struct hif_softc *scn, int ce_id) } #ifdef FEATURE_RUNTIME_PM +/** + * hif_pm_runtime_get_sync() - do a get opperation with sync resume + * + * A get opperation will prevent a runtime suspend until a corresponding + * put is done. Unlike hif_pm_runtime_get(), this API will do a sync + * resume instead of requesting a resume if it is runtime PM suspended + * so it can only be called in non-atomic context. + * + * @hif_ctx: pointer of HIF context + * + * Return: 0 if it is runtime PM resumed otherwise an error code. + */ +int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx) +{ + struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx); + int ret; + + if (!sc) + return -EINVAL; + + sc->pm_stats.runtime_get++; + ret = pm_runtime_get_sync(sc->dev); + + /* Get can return 1 if the device is already active, just return + * success in that case. + */ + if (ret > 0) + ret = 0; + + if (ret) { + sc->pm_stats.runtime_get_err++; + HIF_ERROR("Runtime PM Get Sync error in pm_state: %d, ret: %d", + qdf_atomic_read(&sc->pm_state), ret); + hif_pm_runtime_put(hif_ctx); + } + + return ret; +} + int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx) { struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);