Browse Source

qcacmn: Use prealloc multi pages for direct link CE dest buffers

The IOVA for the buffers that are attached to the direct
link receive copy engine need to be contiguous for optimal
memory mapping on ADSP.

Fix is to use multi pages prealloc when posting buffers
for direct link receive buffer.

Change-Id: Ieb253bd3c1b6550e4c1c63cd587993891ac817f2
CRs-Fixed: 3502633
Yeshwanth Sriram Guntuka 2 years ago
parent
commit
d2b4b226d4
5 changed files with 222 additions and 14 deletions
  1. 9 0
      hif/inc/hif.h
  2. 97 1
      hif/src/ce/ce_main.c
  3. 10 13
      hif/src/ce/ce_service_srng.c
  4. 54 0
      hif/src/hif_main.c
  5. 52 0
      hif/src/hif_main.h

+ 9 - 0
hif/inc/hif.h

@@ -878,6 +878,8 @@ struct htc_callbacks {
  * @get_bandwidth_level: Query current bandwidth level for the driver
  * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
  * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
+ * @prealloc_get_multi_pages: get prealloc multi pages memory
+ * @prealloc_put_multi_pages: put prealloc multi pages memory back to pool
  * This Structure provides callback pointer for HIF to query hdd for driver
  * states.
  */
@@ -893,6 +895,13 @@ struct hif_driver_state_callbacks {
 						       qdf_dma_addr_t *paddr,
 						       uint32_t ring_type);
 	void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
+	void (*prealloc_get_multi_pages)(uint32_t desc_type,
+					 qdf_size_t elem_size,
+					 uint16_t elem_num,
+					 struct qdf_mem_multi_page_t *pages,
+					 bool cacheable);
+	void (*prealloc_put_multi_pages)(uint32_t desc_type,
+					 struct qdf_mem_multi_page_t *pages);
 };
 
 /* This API detaches the HTC layer from the HIF device */

+ 97 - 1
hif/src/ce/ce_main.c

@@ -3585,6 +3585,89 @@ QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
 	return QDF_STATUS_SUCCESS;
 }
 
+#ifdef FEATURE_DIRECT_LINK
+static QDF_STATUS
+hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
+					  int pipe_num)
+{
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
+	struct service_to_pipe *tgt_svc_cfg;
+	struct HIF_CE_pipe_info *pipe_info;
+	int32_t recv_bufs_needed;
+	qdf_dma_addr_t dma_addr;
+	uint16_t num_elem_per_page;
+	uint16_t i;
+	bool is_found = false;
+
+	tgt_svc_cfg = hif_ce_state->tgt_svc_map;
+
+	for (i = 0; i < hif_ce_state->sz_tgt_svc_map; i++) {
+		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC ||
+		    tgt_svc_cfg[i].pipedir != PIPEDIR_IN ||
+		    tgt_svc_cfg[i].pipenum != pipe_num)
+			continue;
+
+		pipe_info = &hif_ce_state->pipe_info[pipe_num];
+		recv_bufs_needed = atomic_read(&pipe_info->recv_bufs_needed);
+
+		if (!pipe_info->buf_sz || !recv_bufs_needed)
+			continue;
+
+		is_found = true;
+		break;
+	}
+
+	if (!is_found)
+		return QDF_STATUS_E_NOSUPPORT;
+
+	scn->dl_recv_pipe_num = pipe_num;
+
+	hif_prealloc_get_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE,
+				     pipe_info->buf_sz, recv_bufs_needed,
+				     &scn->dl_recv_pages, false);
+	if (!scn->dl_recv_pages.num_pages)
+		return QDF_STATUS_E_NOMEM;
+
+	num_elem_per_page = scn->dl_recv_pages.num_element_per_page;
+	for (i = 0; i < recv_bufs_needed; i++) {
+		dma_addr = scn->dl_recv_pages.dma_pages[i / num_elem_per_page].page_p_addr;
+		dma_addr += (i % num_elem_per_page) * pipe_info->buf_sz;
+		ce_recv_buf_enqueue(pipe_info->ce_hdl, NULL, dma_addr);
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static QDF_STATUS
+hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
+					 int pipe_num)
+{
+	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
+
+	if (pipe_num != scn->dl_recv_pipe_num)
+		return QDF_STATUS_E_NOSUPPORT;
+
+	hif_prealloc_put_multi_pages(scn, QDF_DP_RX_DIRECT_LINK_CE_BUF_TYPE,
+				     &scn->dl_recv_pages, false);
+
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS
+hif_alloc_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
+					  int pipe_num)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+
+static inline QDF_STATUS
+hif_free_pages_for_direct_link_recv_pipe(struct HIF_CE_state *hif_ce_state,
+					 int pipe_num)
+{
+	return QDF_STATUS_E_NOSUPPORT;
+}
+#endif
+
 /*
  * Try to post all desired receive buffers for all pipes.
  * Returns 0 for non fastpath rx copy engine as
@@ -3617,6 +3700,12 @@ static QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
 		    ce_state && (ce_state->htt_rx_data))
 			continue;
 
+		qdf_status =
+			hif_alloc_pages_for_direct_link_recv_pipe(hif_state,
+								  pipe_num);
+		if (QDF_IS_STATUS_SUCCESS(qdf_status))
+			continue;
+
 		qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
 		if (!QDF_IS_STATUS_SUCCESS(qdf_status) && ce_state &&
 			ce_state->htt_rx_data &&
@@ -3667,6 +3756,7 @@ static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
 	qdf_nbuf_t netbuf;
 	qdf_dma_addr_t CE_data;
 	void *per_CE_context;
+	QDF_STATUS status;
 
 	buf_sz = pipe_info->buf_sz;
 	/* Unused Copy Engine */
@@ -3683,6 +3773,12 @@ static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
 
 	if (!scn->qdf_dev)
 		return;
+
+	status = hif_free_pages_for_direct_link_recv_pipe(hif_state,
+							  pipe_info->pipe_num);
+	if (QDF_IS_STATUS_SUCCESS(status))
+		return;
+
 	while (ce_revoke_recv_next
 		       (ce_hdl, &per_CE_context, (void **)&netbuf,
 			&CE_data) == QDF_STATUS_SUCCESS) {
@@ -5635,6 +5731,7 @@ void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
 	qdf_mem_copy(data + *offset, &info, size);
 	*offset = *offset + size;
 }
+#endif
 
 #ifdef FEATURE_DIRECT_LINK
 QDF_STATUS
@@ -5687,4 +5784,3 @@ hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
 	return QDF_STATUS_E_NOSUPPORT;
 }
 #endif
-#endif

+ 10 - 13
hif/src/ce/ce_service_srng.c

@@ -1163,8 +1163,7 @@ uint16_t ce_get_direct_link_dest_srng_buffers(struct hif_softc *scn,
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
 	struct CE_state *ce_state;
 	struct service_to_pipe *tgt_svc_cfg;
-	qdf_nbuf_t nbuf;
-	uint64_t *nbuf_dmaaddr = NULL;
+	uint64_t *dma_addr_arr = NULL;
 	uint32_t i;
 	uint32_t j = 0;
 
@@ -1182,25 +1181,23 @@ uint16_t ce_get_direct_link_dest_srng_buffers(struct hif_softc *scn,
 			return QDF_STATUS_E_FAILURE;
 		}
 
-		nbuf_dmaaddr = qdf_mem_malloc(sizeof(*nbuf_dmaaddr) *
-					      ce_state->dest_ring->nentries);
-		if (!nbuf_dmaaddr)
-			return 0;
+		QDF_ASSERT(scn->dl_recv_pages.dma_pages);
 
-		for (j = 0; j < ce_state->dest_ring->nentries; j++) {
-			nbuf = ce_state->dest_ring->per_transfer_context[j];
-			if (!nbuf)
-				break;
+		dma_addr_arr = qdf_mem_malloc(sizeof(*dma_addr_arr) *
+					      scn->dl_recv_pages.num_pages);
+		if (!dma_addr_arr)
+			return 0;
 
-			nbuf_dmaaddr[j] = QDF_NBUF_CB_PADDR(nbuf);
-		}
+		for (j = 0; j < scn->dl_recv_pages.num_pages; j++)
+			dma_addr_arr[j] =
+				scn->dl_recv_pages.dma_pages[j].page_p_addr;
 
 		*buf_size = ce_state->src_sz_max;
 
 		break;
 	}
 
-	*dma_addr = nbuf_dmaaddr;
+	*dma_addr = dma_addr_arr;
 
 	return j;
 }

+ 54 - 0
hif/src/hif_main.c

@@ -1168,6 +1168,26 @@ hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
 }
 #endif
 
+#ifdef FEATURE_DIRECT_LINK
+/**
+ * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
+ *  pipe number
+ * @scn: hif context
+ *
+ * Return: None
+ */
+static inline
+void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
+{
+	scn->dl_recv_pipe_num = INVALID_PIPE_NO;
+}
+#else
+static inline
+void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
+{
+}
+#endif
+
 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
 				  uint32_t mode,
 				  enum qdf_bus_type bus_type,
@@ -1217,6 +1237,7 @@ struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
 	hif_cpuhp_register(scn);
 	hif_latency_detect_init(scn);
 	hif_affinity_mgr_init(scn, psoc);
+	hif_init_direct_link_rcv_pipe_num(scn);
 
 out:
 	return GET_HIF_OPAQUE_HDL(scn);
@@ -2309,6 +2330,39 @@ void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
 					size, vaddr, paddr, memctx);
 	}
 }
+
+void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
+				  qdf_size_t elem_size, uint16_t elem_num,
+				  struct qdf_mem_multi_page_t *pages,
+				  bool cacheable)
+{
+	struct hif_driver_state_callbacks *cbk =
+			hif_get_callbacks_handle(scn);
+
+	if (cbk && cbk->prealloc_get_multi_pages)
+		cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
+					      pages, cacheable);
+
+	if (!pages->num_pages)
+		qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
+					  elem_size, elem_num, 0, cacheable);
+}
+
+void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
+				  struct qdf_mem_multi_page_t *pages,
+				  bool cacheable)
+{
+	struct hif_driver_state_callbacks *cbk =
+			hif_get_callbacks_handle(scn);
+
+	if (cbk && cbk->prealloc_put_multi_pages &&
+	    pages->is_mem_prealloc)
+		cbk->prealloc_put_multi_pages(desc_type, pages);
+
+	if (!pages->is_mem_prealloc)
+		qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
+					 cacheable);
+}
 #endif
 
 /**

+ 52 - 0
hif/src/hif_main.h

@@ -429,6 +429,10 @@ struct hif_softc {
 	struct hif_cpu_affinity irq_cpu_mask[HIF_MAX_GROUP][HIF_MAX_GRP_IRQ];
 	qdf_cpu_mask allowed_mask;
 #endif
+#ifdef FEATURE_DIRECT_LINK
+	struct qdf_mem_multi_page_t dl_recv_pages;
+	int dl_recv_pipe_num;
+#endif
 };
 
 #if defined(NUM_SOC_PERF_CLUSTER) && (NUM_SOC_PERF_CLUSTER > 1)
@@ -610,6 +614,35 @@ void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
 				       qdf_dma_addr_t paddr,
 				       qdf_dma_context_t memctx,
 				       uint8_t is_mem_prealloc);
+
+/**
+ * hif_prealloc_get_multi_pages() - gets pre-alloc DP multi-pages memory
+ * @scn: HIF context
+ * @desc_type: descriptor type
+ * @elem_size: single element size
+ * @elem_num: total number of elements should be allocated
+ * @pages: multi page information storage
+ * @cacheable: coherent memory or cacheable memory
+ *
+ * Return: None
+ */
+void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
+				  qdf_size_t elem_size, uint16_t elem_num,
+				  struct qdf_mem_multi_page_t *pages,
+				  bool cacheable);
+
+/**
+ * hif_prealloc_put_multi_pages() - puts back pre-alloc DP multi-pages memory
+ * @scn: HIF context
+ * @desc_type: descriptor type
+ * @pages: multi page information storage
+ * @cacheable: coherent memory or cacheable memory
+ *
+ * Return: None
+ */
+void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
+				  struct qdf_mem_multi_page_t *pages,
+				  bool cacheable);
 #else
 static inline
 void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
@@ -635,6 +668,25 @@ void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
 	return qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
 				       size, vaddr, paddr, memctx);
 }
+
+static inline
+void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
+				  qdf_size_t elem_size, uint16_t elem_num,
+				  struct qdf_mem_multi_page_t *pages,
+				  bool cacheable)
+{
+	qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
+				  elem_size, elem_num, 0, cacheable);
+}
+
+static inline
+void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
+				  struct qdf_mem_multi_page_t *pages,
+				  bool cacheable)
+{
+	qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
+				 cacheable);
+}
 #endif
 
 /**