浏览代码

qcacmn: Allocate buffers from WBUFF for fastpath CEs

Currently, buffers attached to fastpath CEs are not being allocated
from WBUFF. Since the buffers attached to the fastpath CEs are
re-usable in nature, allocating such buffers from WBUFF will have
added benefits like allocating from custom page frag cache which
reduces the memory used by the RX buffers.

Therefore, allocate RX buffers for fastpath CEs from WBUFF module.

Change-Id: Iaa913042473d60b184a767ece934bb9063ff4a7e
CRs-Fixed: 3581533
Manikanta Pubbisetty 1 年之前
父节点
当前提交
8993389ed1
共有 2 个文件被更改,包括 137 次插入137 次删除
  1. 134 137
      hif/src/ce/ce_main.c
  2. 3 0
      wbuff/src/wbuff.c

+ 134 - 137
hif/src/ce/ce_main.c

@@ -2900,6 +2900,137 @@ static int hif_get_pktlog_ce_num(struct hif_softc *scn)
 	return -EINVAL;
 }
 
+#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
+
+#define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32
+
+static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
+{
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
+	qdf_nbuf_t nbuf;
+
+	nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__,
+			      __LINE__);
+	if (!nbuf)
+		nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz,
+				      0, 4, false);
+
+	if (!nbuf)
+		return NULL;
+
+	return nbuf;
+}
+
+static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
+{
+	nbuf = wbuff_buff_put(nbuf);
+	if (nbuf)
+		qdf_nbuf_free(nbuf);
+}
+
+static int
+hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state)
+{
+	int ul_is_polled, dl_is_polled;
+	bool is_wmi_svc, wmi_diag_svc;
+	uint8_t ul_pipe, dl_pipe;
+	int pool_size;
+	int status;
+	int ce_id;
+
+	if (!ce_state)
+		return 0;
+
+	ce_id = ce_state->id;
+
+	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
+					 &ul_pipe, &dl_pipe,
+					 &ul_is_polled, &dl_is_polled);
+	is_wmi_svc = !status && (dl_pipe == ce_id);
+
+	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
+					 WMI_CONTROL_DIAG_SVC,
+					 &ul_pipe, &dl_pipe,
+					 &ul_is_polled, &dl_is_polled);
+	wmi_diag_svc = !status;
+
+	if (is_wmi_svc && !wmi_diag_svc)
+		pool_size = ce_state->dest_ring->nentries +
+			HIF_CE_RX_NBUF_WMI_POOL_SIZE;
+	else if (is_wmi_svc && wmi_diag_svc)
+		pool_size = ce_state->dest_ring->nentries +
+			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
+	else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe)
+		pool_size = ce_state->dest_ring->nentries +
+			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
+	else
+		pool_size = ce_state->dest_ring->nentries;
+
+	return pool_size;
+}
+
+static void hif_ce_rx_wbuff_register(struct hif_softc *scn)
+{
+	struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0};
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct HIF_CE_pipe_info *pipe_info;
+	struct CE_state *ce_state;
+	int ce_id;
+
+	for (ce_id = 0; ce_id <  scn->ce_count; ce_id++) {
+		pipe_info = &hif_state->pipe_info[ce_id];
+		ce_state = scn->ce_id_to_state[ce_id];
+
+		if (!pipe_info->buf_sz)
+			continue;
+
+		/* Only RX CEs need WBUFF registration. recv_bufs_needed
+		 * contains valid count for RX CEs during init time.
+		 */
+		if (!atomic_read(&pipe_info->recv_bufs_needed))
+			continue;
+
+		wbuff_alloc[ce_id].pool_id = ce_id;
+		wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz;
+		wbuff_alloc[ce_id].pool_size =
+				hif_calc_wbuff_pool_size(scn, ce_state);
+	}
+
+	scn->wbuff_handle =
+		wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4,
+				      WBUFF_MODULE_CE_RX);
+}
+
+static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
+{
+	wbuff_module_deregister(scn->wbuff_handle);
+	scn->wbuff_handle = NULL;
+}
+#else
+static inline qdf_nbuf_t
+hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
+{
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
+
+	return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false);
+}
+
+static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
+{
+	return qdf_nbuf_free(nbuf);
+}
+
+static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn)
+{
+}
+
+static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
+{
+}
+#endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */
+
 #ifdef WLAN_FEATURE_FASTPATH
 /**
  * hif_enable_fastpath() - Update that we have enabled fastpath mode
@@ -3035,7 +3166,7 @@ void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
 		if (nbuf) {
 			qdf_nbuf_unmap_single(ce_state->scn->qdf_dev, nbuf,
 					      QDF_DMA_FROM_DEVICE);
-			qdf_nbuf_free(nbuf);
+			hif_ce_rx_nbuf_free(nbuf);
 		}
 	}
 }
@@ -3440,141 +3571,6 @@ hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
 			&toeplitz_hash_result) == QDF_STATUS_SUCCESS);
 }
 
-#ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
-
-#define HIF_CE_RX_NBUF_WMI_POOL_SIZE 32
-
-static qdf_nbuf_t hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
-{
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
-	qdf_nbuf_t nbuf;
-
-	nbuf = wbuff_buff_get(scn->wbuff_handle, ce_id, 0, __func__,
-			      __LINE__);
-	if (!nbuf)
-		nbuf = qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz,
-				      0, 4, false);
-
-	if (!nbuf)
-		return NULL;
-
-	return nbuf;
-}
-
-static void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
-{
-	nbuf = wbuff_buff_put(nbuf);
-	if (nbuf)
-		qdf_nbuf_free(nbuf);
-}
-
-static int
-hif_calc_wbuff_pool_size(struct hif_softc *scn, struct CE_state *ce_state)
-{
-	int ul_is_polled, dl_is_polled;
-	bool is_wmi_svc, wmi_diag_svc;
-	uint8_t ul_pipe, dl_pipe;
-	int pool_size;
-	int status;
-	int ce_id;
-
-	if (!ce_state)
-		return 0;
-
-	ce_id = ce_state->id;
-
-	status = hif_map_service_to_pipe(&scn->osc, WMI_CONTROL_SVC,
-					 &ul_pipe, &dl_pipe,
-					 &ul_is_polled, &dl_is_polled);
-	is_wmi_svc = !status && (dl_pipe == ce_id);
-
-	status = hif_map_service_to_pipe(GET_HIF_OPAQUE_HDL(scn),
-					 WMI_CONTROL_DIAG_SVC,
-					 &ul_pipe, &dl_pipe,
-					 &ul_is_polled, &dl_is_polled);
-	wmi_diag_svc = !status;
-
-	if (is_wmi_svc && !wmi_diag_svc)
-		pool_size = ce_state->dest_ring->nentries +
-			HIF_CE_RX_NBUF_WMI_POOL_SIZE;
-	else if (is_wmi_svc && wmi_diag_svc)
-		pool_size = ce_state->dest_ring->nentries +
-			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
-	else if (!is_wmi_svc && wmi_diag_svc && ce_id == dl_pipe)
-		pool_size = ce_state->dest_ring->nentries +
-			HIF_CE_RX_NBUF_WMI_POOL_SIZE / 2;
-	else
-		pool_size = ce_state->dest_ring->nentries;
-
-	return pool_size;
-}
-
-static void hif_ce_rx_wbuff_register(struct hif_softc *scn)
-{
-	struct wbuff_alloc_request wbuff_alloc[CE_COUNT_MAX] = {0};
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct HIF_CE_pipe_info *pipe_info;
-	struct CE_state *ce_state;
-	int ce_id;
-
-	for (ce_id = 0; ce_id <  scn->ce_count; ce_id++) {
-		pipe_info = &hif_state->pipe_info[ce_id];
-		ce_state = scn->ce_id_to_state[ce_id];
-
-		if (!pipe_info->buf_sz)
-			continue;
-
-		/* Only RX CEs need WBUFF registration. recv_bufs_needed
-		 * contains valid count for RX CEs during init time.
-		 */
-		if (!atomic_read(&pipe_info->recv_bufs_needed))
-			continue;
-
-		if (ce_is_fastpath_enabled(scn) &&
-		    ce_state->htt_rx_data)
-			continue;
-
-		wbuff_alloc[ce_id].pool_id = ce_id;
-		wbuff_alloc[ce_id].buffer_size = pipe_info->buf_sz;
-		wbuff_alloc[ce_id].pool_size =
-				hif_calc_wbuff_pool_size(scn, ce_state);
-	}
-
-	scn->wbuff_handle =
-		wbuff_module_register(wbuff_alloc, CE_COUNT_MAX, 0, 4,
-				      WBUFF_MODULE_CE_RX);
-}
-
-static void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
-{
-	wbuff_module_deregister(scn->wbuff_handle);
-	scn->wbuff_handle = NULL;
-}
-#else
-static inline qdf_nbuf_t
-hif_ce_rx_nbuf_alloc(struct hif_softc *scn, uint8_t ce_id)
-{
-	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
-	struct HIF_CE_pipe_info *pipe_info = &hif_state->pipe_info[ce_id];
-
-	return qdf_nbuf_alloc(scn->qdf_dev, pipe_info->buf_sz, 0, 4, false);
-}
-
-static inline void hif_ce_rx_nbuf_free(qdf_nbuf_t nbuf)
-{
-	return qdf_nbuf_free(nbuf);
-}
-
-static inline void hif_ce_rx_wbuff_register(struct hif_softc *scn)
-{
-}
-
-static inline void hif_ce_rx_wbuff_deregister(struct hif_softc *scn)
-{
-}
-#endif /* WLAN_FEATURE_CE_RX_BUFFER_REUSE */
-
 /**
  * hif_ce_do_recv(): send message from copy engine to upper layers
  * @msg_callbacks: structure containing callback and callback context
@@ -4284,7 +4280,6 @@ void hif_ce_stop(struct hif_softc *scn)
 	}
 
 	hif_buffer_cleanup(hif_state);
-	hif_ce_rx_wbuff_deregister(scn);
 
 	for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
 		struct HIF_CE_pipe_info *pipe_info;
@@ -4307,6 +4302,8 @@ void hif_ce_stop(struct hif_softc *scn)
 		}
 	}
 
+	hif_ce_rx_wbuff_deregister(scn);
+
 	if (hif_state->sleep_timer_init) {
 		qdf_timer_stop(&hif_state->sleep_timer);
 		qdf_timer_free(&hif_state->sleep_timer);

+ 3 - 0
wbuff/src/wbuff.c

@@ -463,6 +463,9 @@ qdf_nbuf_t wbuff_buff_put(qdf_nbuf_t buf)
 	uint8_t module_id = 0, pool_id = 0;
 	struct wbuff_pool *wbuff_pool;
 
+	if (qdf_nbuf_get_users(buffer) > 1)
+		return buffer;
+
 	if (!wbuff.initialized)
 		return buffer;