ソースを参照

qcacmn: Delay allocation of rings for monitor mode

In existing implementation, for monitor mode below allocation
are done at pdev attach and init time.

	a. 64 monitor buffer allocation for RxDMA monitor buffer ring
	b. Link descriptor memory allocation for monitor link descriptor ring

This memory is waste of memory for customers not using monitor mode and
low memory profile.

To optimize this memory, allocate all buffers and link descriptor memory
at monitor vdev creation time.

Change-Id: I873c76d2f625a782532a101037915b0353928a5b
CRs-Fixed: 2829402
Amir Patel 4 年 前
コミット
76b9febd3d

+ 126 - 23
dp/wifi3.0/dp_main.c

@@ -2799,6 +2799,10 @@ QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc, uint32_t mac_id)
 			      MINIDUMP_STR_SIZE);
 	}
 
+	/* If link descriptor banks are allocated, return from here */
+	if (pages->num_pages)
+		return QDF_STATUS_SUCCESS;
+
 	/* Round up to power of 2 */
 	*total_link_descs = 1;
 	while (*total_link_descs < num_entries)
@@ -5056,7 +5060,8 @@ static QDF_STATUS dp_rxdma_ring_config(struct dp_soc *soc)
 			       hal_srng, RXDMA_BUF);
 #ifndef DISABLE_MON_CONFIG
 
-		if (soc->wlan_cfg_ctx->rxdma1_enable) {
+		if (soc->wlan_cfg_ctx->rxdma1_enable &&
+		    wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
 			htt_srng_setup(soc->htt_handle, mac_for_pdev,
 				       soc->rxdma_mon_buf_ring[lmac_id].hal_srng,
 				       RXDMA_MONITOR_BUF);
@@ -7318,6 +7323,92 @@ static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
 	return QDF_STATUS_E_FAILURE;
 }
 
+/**
+ * dp_vdev_set_monitor_mode_rings () - set monitor mode rings
+ *
+ * Allocate SW descriptor pool, buffers, link descriptor memory
+ * Initialize monitor related SRNGs
+ *
+ * @pdev: DP pdev object
+ *
+ * Return: QDF_STATUS
+ */
+static QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
+						 uint8_t delayed_replenish)
+{
+	struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
+	uint32_t mac_id;
+	uint32_t mac_for_pdev;
+	struct dp_soc *soc = pdev->soc;
+	QDF_STATUS status = QDF_STATUS_SUCCESS;
+	struct dp_srng *mon_buf_ring;
+	uint32_t num_entries;
+
+	pdev_cfg_ctx = pdev->wlan_cfg_ctx;
+
+	/* If monitor rings are aleady initilized, return from here */
+	if (!pdev->pdev_mon_init)
+		return QDF_STATUS_SUCCESS;
+
+	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
+		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
+							  pdev->pdev_id);
+
+		/* Allocate sw rx descriptor pool for mon RxDMA buffer ring */
+		status = dp_rx_pdev_mon_buf_desc_pool_alloc(pdev, mac_for_pdev);
+		if (!QDF_IS_STATUS_SUCCESS(status)) {
+			dp_err("%s: dp_rx_pdev_mon_buf_desc_pool_alloc() failed\n",
+			       __func__);
+			goto fail0;
+		}
+
+		dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
+
+		/* If monitor buffers are already allocated,
+		 * do not allocate.
+		 */
+		status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
+							  delayed_replenish);
+
+		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
+		/*
+		 * Configure low interrupt threshld when monitor mode is
+		 * configured.
+		 */
+		if (mon_buf_ring->hal_srng) {
+			num_entries = mon_buf_ring->num_entries;
+			hal_set_low_threshold(mon_buf_ring->hal_srng,
+					      num_entries >> 3);
+			htt_srng_setup(pdev->soc->htt_handle,
+				       pdev->pdev_id,
+				       mon_buf_ring->hal_srng,
+				       RXDMA_MONITOR_BUF);
+		}
+
+		/* Allocate link descriptors for the mon link descriptor ring */
+		status = dp_hw_link_desc_pool_banks_alloc(soc, mac_for_pdev);
+		if (!QDF_IS_STATUS_SUCCESS(status)) {
+			dp_err("%s: dp_hw_link_desc_pool_banks_alloc() failed",
+			       __func__);
+			goto fail0;
+		}
+		dp_link_desc_ring_replenish(soc, mac_for_pdev);
+
+		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
+			       soc->rxdma_mon_desc_ring[mac_for_pdev].hal_srng,
+			       RXDMA_MONITOR_DESC);
+		htt_srng_setup(soc->htt_handle, pdev->pdev_id,
+			       soc->rxdma_mon_dst_ring[mac_for_pdev].hal_srng,
+			       RXDMA_MONITOR_DST);
+	}
+	pdev->pdev_mon_init = 1;
+
+	return QDF_STATUS_SUCCESS;
+
+fail0:
+	return QDF_STATUS_E_FAILURE;
+}
+
 /**
  * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  * @vdev_handle: Datapath VDEV handle
@@ -7325,11 +7416,11 @@ static QDF_STATUS dp_get_peer_mac_from_peer_id(struct cdp_soc_t *soc,
  *
  * Return: 0 on success, not 0 on failure
  */
-static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc_hdl,
+static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
 					   uint8_t vdev_id,
 					   uint8_t special_monitor)
 {
-	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
+	struct dp_soc *soc = (struct dp_soc *)dp_soc;
 	uint32_t mac_id;
 	uint32_t mac_for_pdev;
 	struct dp_pdev *pdev;
@@ -7370,29 +7461,38 @@ static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *soc_hdl,
 
 	pdev->monitor_configured = true;
 
-	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
-		mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc, mac_id,
-							  pdev->pdev_id);
-		dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
-						 FALSE);
-		/*
-		 * Configure low interrupt threshld when monitor mode is
-		 * configured.
-		 */
-		mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
-		if (mon_buf_ring->hal_srng) {
-			num_entries = mon_buf_ring->num_entries;
-			hal_set_low_threshold(mon_buf_ring->hal_srng,
-					      num_entries >> 3);
-			htt_srng_setup(pdev->soc->htt_handle,
-				       pdev->pdev_id,
-				       mon_buf_ring->hal_srng,
-				       RXDMA_MONITOR_BUF);
+	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
+
+	/* If delay monitor replenish is disabled, allocate link descriptor
+	 * monitor ring buffers of ring size.
+	 */
+	if (!wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx)) {
+		dp_vdev_set_monitor_mode_rings(pdev, false);
+	} else {
+		for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
+			mac_for_pdev = dp_get_lmac_id_for_pdev_id(pdev->soc,
+								  mac_id,
+								  pdev->pdev_id);
+
+			dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
+							 FALSE);
+			mon_buf_ring = &pdev->soc->rxdma_mon_buf_ring[mac_for_pdev];
+			/*
+			 * Configure low interrupt threshld when monitor mode is
+			 * configured.
+			 */
+			if (mon_buf_ring->hal_srng) {
+				num_entries = mon_buf_ring->num_entries;
+				hal_set_low_threshold(mon_buf_ring->hal_srng,
+						      num_entries >> 3);
+				htt_srng_setup(pdev->soc->htt_handle,
+					       pdev->pdev_id,
+					       mon_buf_ring->hal_srng,
+					       RXDMA_MONITOR_BUF);
+			}
 		}
 	}
 
-	dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
-
 	dp_mon_filter_setup_mon_mode(pdev);
 	status = dp_mon_filter_update(pdev);
 	if (status != QDF_STATUS_SUCCESS) {
@@ -8415,6 +8515,9 @@ dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
 		pdev->tx_sniffer_enable = 0;
 		pdev->monitor_configured = true;
 
+		if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx))
+			dp_vdev_set_monitor_mode_rings(pdev, true);
+
 		/*
 		 * Setup the M copy mode filter.
 		 */

+ 4 - 0
dp/wifi3.0/dp_rx_mon.h

@@ -137,6 +137,10 @@ void dp_rx_pdev_mon_status_buffers_free(struct dp_pdev *pdev, uint32_t mac_id);
 QDF_STATUS
 dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
 				 bool delayed_replenish);
+QDF_STATUS
+dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id);
+void
+dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id);
 
 /**
  * dp_rx_mon_handle_status_buf_done () - Handle DMA not done case for

+ 16 - 5
dp/wifi3.0/dp_rx_mon_dest.c

@@ -1716,7 +1716,8 @@ dp_rx_pdev_mon_cmn_buffers_alloc(struct dp_pdev *pdev, int mac_id)
 		goto fail;
 	}
 
-	if (!soc->wlan_cfg_ctx->rxdma1_enable)
+	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
+	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
 		return status;
 
 	status = dp_rx_pdev_mon_buf_buffers_alloc(pdev, mac_for_pdev,
@@ -1734,7 +1735,7 @@ fail:
 	return status;
 }
 
-static void
+void
 dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
 {
 	uint8_t pdev_id = pdev->pdev_id;
@@ -1751,6 +1752,10 @@ dp_rx_pdev_mon_buf_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
 
 	rx_desc_pool = &soc->rx_desc_mon[mac_id];
 
+	/* If descriptor pool is already initialized, do not initialize it */
+	if (rx_desc_pool->freelist)
+		return;
+
 	dp_debug("Mon RX Desc buf Pool[%d] init entries=%u",
 		 pdev_id, num_entries);
 
@@ -1782,7 +1787,8 @@ dp_rx_pdev_mon_cmn_desc_pool_init(struct dp_pdev *pdev, int mac_id)
 	mac_for_pdev = dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
 	dp_rx_pdev_mon_status_desc_pool_init(pdev, mac_for_pdev);
 
-	if (!soc->wlan_cfg_ctx->rxdma1_enable)
+	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
+	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
 		return;
 
 	dp_rx_pdev_mon_buf_desc_pool_init(pdev, mac_for_pdev);
@@ -1863,7 +1869,7 @@ void dp_rx_pdev_mon_buf_buffers_free(struct dp_pdev *pdev, uint32_t mac_id)
 		dp_rx_desc_nbuf_free(soc, rx_desc_pool);
 }
 
-static QDF_STATUS
+QDF_STATUS
 dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
 {
 	uint8_t pdev_id = pdev->pdev_id;
@@ -1886,6 +1892,9 @@ dp_rx_pdev_mon_buf_desc_pool_alloc(struct dp_pdev *pdev, uint32_t mac_id)
 	rx_desc_pool_size = wlan_cfg_get_dp_soc_rx_sw_desc_weight(soc_cfg_ctx) *
 		num_entries;
 
+	if (dp_rx_desc_pool_is_allocated(rx_desc_pool) == QDF_STATUS_SUCCESS)
+		return QDF_STATUS_SUCCESS;
+
 	return dp_rx_desc_pool_alloc(soc, rx_desc_pool_size, rx_desc_pool);
 }
 
@@ -1906,7 +1915,8 @@ dp_rx_pdev_mon_cmn_desc_pool_alloc(struct dp_pdev *pdev, int mac_id)
 		goto fail;
 	}
 
-	if (!soc->wlan_cfg_ctx->rxdma1_enable)
+	if (!soc->wlan_cfg_ctx->rxdma1_enable ||
+	    !wlan_cfg_is_delay_mon_replenish(soc->wlan_cfg_ctx))
 		return status;
 
 	/* Allocate sw rx descriptor pool for monitor RxDMA buffer ring */
@@ -2003,6 +2013,7 @@ dp_rx_pdev_mon_buffers_free(struct dp_pdev *pdev)
 
 	for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++)
 		dp_rx_pdev_mon_cmn_buffers_free(pdev, mac_id);
+	pdev->pdev_mon_init = 0;
 }
 
 QDF_STATUS

+ 2 - 0
dp/wifi3.0/dp_types.h

@@ -2321,6 +2321,8 @@ struct dp_pdev {
 	/* HTT stats debugfs params */
 	struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
 #endif
+	/* Flag to inidicate monitor rings are initialized */
+	uint8_t pdev_mon_init;
 };
 
 struct dp_peer;

+ 5 - 1
wlan_cfg/cfg_dp.h

@@ -1091,6 +1091,9 @@
 		CFG_INI_BOOL("wow_check_rx_pending_enable", \
 		false, \
 		"enable rx frame pending check in WoW mode")
+#define CFG_DP_DELAY_MON_REPLENISH \
+		CFG_INI_BOOL("delay_mon_replenish", \
+		true, "Delay Monitor Replenish")
 
 /*
  * <ini>
@@ -1203,5 +1206,6 @@
 		CFG(CFG_DP_RX_RADIO_1_DEFAULT_REO) \
 		CFG(CFG_DP_RX_RADIO_2_DEFAULT_REO) \
 		CFG(CFG_DP_WOW_CHECK_RX_PENDING) \
-		CFG(CFG_FORCE_RX_64_BA)
+		CFG(CFG_FORCE_RX_64_BA) \
+		CFG(CFG_DP_DELAY_MON_REPLENISH)
 #endif /* _CFG_DP_H_ */

+ 15 - 1
wlan_cfg/wlan_cfg.c

@@ -638,7 +638,8 @@ wlan_cfg_soc_attach(struct cdp_ctrl_objmgr_psoc *psoc)
 			cfg_get(psoc, CFG_DP_RX_RADIO_2_DEFAULT_REO);
 	wlan_cfg_ctx->wow_check_rx_pending_enable =
 			cfg_get(psoc, CFG_DP_WOW_CHECK_RX_PENDING);
-
+	wlan_cfg_ctx->delay_mon_replenish = cfg_get(psoc,
+			CFG_DP_DELAY_MON_REPLENISH);
 	return wlan_cfg_ctx;
 }
 
@@ -1522,3 +1523,16 @@ bool wlan_cfg_is_dp_force_rx_64_ba(struct wlan_cfg_dp_soc_ctxt *cfg)
 {
 	return cfg->enable_force_rx_64_ba;
 }
+
+void
+wlan_cfg_set_delay_mon_replenish(struct wlan_cfg_dp_soc_ctxt *cfg,
+				 bool val)
+{
+	cfg->delay_mon_replenish = val;
+}
+
+bool
+wlan_cfg_is_delay_mon_replenish(struct wlan_cfg_dp_soc_ctxt *cfg)
+{
+	return cfg->delay_mon_replenish;
+}

+ 21 - 0
wlan_cfg/wlan_cfg.h

@@ -160,6 +160,7 @@ struct wlan_srng_cfg {
  * @enable_force_rx_64_ba: flag to enable force 64 blockack in RX
  * @disable_intra_bss_fwd: flag to disable intra bss forwarding
  * @rxdma1_enable: flag to indicate if rxdma1 is enabled
+ * @delay_mon_replenish: delay monitor buffer replenish
  * @tx_desc_limit_0: tx_desc limit for 5G H
  * @tx_desc_limit_1: tx_desc limit for 2G
  * @tx_desc_limit_2: tx_desc limit for 5G L
@@ -273,6 +274,7 @@ struct wlan_cfg_dp_soc_ctxt {
 	bool enable_force_rx_64_ba;
 	bool disable_intra_bss_fwd;
 	bool rxdma1_enable;
+	bool delay_mon_replenish;
 	int max_ast_idx;
 	int tx_desc_limit_0;
 	int tx_desc_limit_1;
@@ -1539,3 +1541,22 @@ uint8_t wlan_cfg_radio2_default_reo_get(struct wlan_cfg_dp_soc_ctxt *cfg);
  * Return: .
  */
 void wlan_cfg_set_rxdma1_enable(struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx);
+
+/**
+ * wlan_cfg_is_delay_mon_replenish() - Get if delayed monitor replenish
+ * is enabled
+ * @cfg: soc configuration context
+ *
+ * Return: .
+ */
+void
+wlan_cfg_set_delay_mon_replenish(struct wlan_cfg_dp_soc_ctxt *cfg, bool val);
+/**
+ * wlan_cfg_set_delay_mon_replenish() - Set delayed monitor replenish
+ * @cfg: soc configuration context
+ * @val: val to set
+ *
+ * Return: .
+ */
+bool
+wlan_cfg_is_delay_mon_replenish(struct wlan_cfg_dp_soc_ctxt *cfg);