|
@@ -157,6 +157,49 @@ dp_tx_me_exit(struct dp_pdev *pdev)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+ * @pdev: DP_PDEV handle
|
|
|
+ *
|
|
|
+ * This function will free all outstanding ME buffer
|
|
|
+ * for which either free during
|
|
|
+ * completion didn't happened or completion is not
|
|
|
+ * received.
|
|
|
+ */
|
|
|
+void dp_tx_me_desc_flush(struct dp_pdev *pdev)
|
|
|
+{
|
|
|
+ uint8_t i, num_pool;
|
|
|
+ uint32_t j;
|
|
|
+ uint32_t num_desc, page_id, offset;
|
|
|
+ uint16_t num_desc_per_page;
|
|
|
+ struct dp_soc *soc = pdev->soc;
|
|
|
+ struct dp_tx_desc_s *tx_desc = NULL;
|
|
|
+ struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
|
|
|
+
|
|
|
+ num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
|
|
|
+ num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
|
|
|
+
|
|
|
+ for (i = 0; i < num_pool; i++) {
|
|
|
+ tx_desc_pool = &soc->tx_desc[i];
|
|
|
+ if (!tx_desc_pool || !tx_desc_pool->desc_pages.cacheable_pages)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ num_desc_per_page =
|
|
|
+ tx_desc_pool->desc_pages.num_element_per_page;
|
|
|
+ for (j = 0; j < num_desc; j++) {
|
|
|
+ page_id = j / num_desc_per_page;
|
|
|
+ offset = j % num_desc_per_page;
|
|
|
+ tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
|
|
|
+
|
|
|
+ if (tx_desc && (tx_desc->pdev == pdev) &&
|
|
|
+ (tx_desc->flags & DP_TX_DESC_FLAG_ME) &&
|
|
|
+ (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
|
|
|
+ dp_tx_comp_free_buf(soc, tx_desc);
|
|
|
+ dp_tx_desc_release(tx_desc, i);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
|
|
|
* dp_tx_me_free_descriptor():free ME descriptor
|
|
|
* @pdev_handle:DP_PDEV handle
|
|
@@ -169,6 +212,7 @@ dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle)
|
|
|
struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
|
|
|
qdf_atomic_dec(&pdev->mc_num_vap_attached);
|
|
|
if (atomic_read(&pdev->mc_num_vap_attached) == 0) {
|
|
|
+ dp_tx_me_desc_flush(pdev);
|
|
|
dp_tx_me_exit(pdev);
|
|
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
|
|
"Disable MCAST_TO_UCAST");
|