|
@@ -94,7 +94,7 @@ cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
|
|
#define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
|
|
#define DEINIT_RX_HW_STATS_LOCK(_soc) /* no op */
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#ifdef DP_PEER_EXTENDED_API
|
|
|
|
|
|
+#if defined(DP_PEER_EXTENDED_API) || defined(WLAN_DP_PENDING_MEM_FLUSH)
|
|
#define SET_PEER_REF_CNT_ONE(_peer) \
|
|
#define SET_PEER_REF_CNT_ONE(_peer) \
|
|
qdf_atomic_set(&(_peer)->ref_cnt, 1)
|
|
qdf_atomic_set(&(_peer)->ref_cnt, 1)
|
|
#else
|
|
#else
|
|
@@ -4491,20 +4491,21 @@ static void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev)
|
|
static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
|
|
static void dp_pdev_flush_pending_vdevs(struct dp_pdev *pdev)
|
|
{
|
|
{
|
|
struct dp_vdev *vdev = NULL;
|
|
struct dp_vdev *vdev = NULL;
|
|
|
|
+ struct dp_soc *soc = pdev->soc;
|
|
|
|
+
|
|
|
|
+ if (TAILQ_EMPTY(&soc->inactive_vdev_list))
|
|
|
|
+ return;
|
|
|
|
|
|
while (true) {
|
|
while (true) {
|
|
- qdf_spin_lock_bh(&pdev->vdev_list_lock);
|
|
|
|
- TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
|
|
|
|
- if (vdev->delete.pending)
|
|
|
|
|
|
+ qdf_spin_lock_bh(&soc->inactive_vdev_list_lock);
|
|
|
|
+ TAILQ_FOREACH(vdev, &soc->inactive_vdev_list,
|
|
|
|
+ inactive_list_elem) {
|
|
|
|
+ if (vdev->pdev == pdev)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- qdf_spin_unlock_bh(&pdev->vdev_list_lock);
|
|
|
|
|
|
+ qdf_spin_unlock_bh(&soc->inactive_vdev_list_lock);
|
|
|
|
|
|
- /*
|
|
|
|
- * vdev will be freed when all peers get cleanup,
|
|
|
|
- * dp_delete_pending_vdev will remove vdev from vdev_list
|
|
|
|
- * in pdev.
|
|
|
|
- */
|
|
|
|
|
|
+ /* vdev will be freed when all peers get cleanup */
|
|
if (vdev)
|
|
if (vdev)
|
|
dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
|
|
dp_vdev_flush_peers((struct cdp_vdev *)vdev, 0);
|
|
else
|
|
else
|