|
@@ -756,6 +756,12 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
|
|
|
|
|
qdf_assert(hal_soc);
|
|
|
|
|
|
+ qdf_spin_lock_bh(&pdev->mon_lock);
|
|
|
+ if (pdev->monitor_vdev == NULL) {
|
|
|
+ qdf_spin_unlock(&pdev->mon_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
|
|
|
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
|
|
"%s %d : HAL Monitor Destination Ring access Failed -- %pK\n",
|
|
@@ -765,6 +771,7 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
|
|
|
|
|
ppdu_id = pdev->ppdu_info.com_info.ppdu_id;
|
|
|
rx_bufs_used = 0;
|
|
|
+
|
|
|
while (qdf_likely(rxdma_dst_ring_desc =
|
|
|
hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
|
|
|
qdf_nbuf_t head_msdu, tail_msdu;
|
|
@@ -797,6 +804,8 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
|
|
|
|
|
hal_srng_access_end(hal_soc, mon_dst_srng);
|
|
|
|
|
|
+ qdf_spin_unlock_bh(&pdev->mon_lock);
|
|
|
+
|
|
|
if (rx_bufs_used) {
|
|
|
dp_rx_buffers_replenish(soc, mac_id,
|
|
|
&pdev->rxdma_mon_buf_ring[mac_for_pdev],
|
|
@@ -1141,7 +1150,7 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
|
|
|
return status;
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+ qdf_spinlock_create(&pdev->mon_lock);
|
|
|
return QDF_STATUS_SUCCESS;
|
|
|
}
|
|
|
/**
|
|
@@ -1164,6 +1173,7 @@ dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
|
|
|
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
|
|
|
int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
|
|
|
|
|
|
+ qdf_spinlock_destroy(&pdev->mon_lock);
|
|
|
dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev);
|
|
|
dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev);
|
|
|
dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev);
|