Browse Source

qcacmn: CE services separation for legacy and lithium architecture

CE service file includes APIs for both legacy and lithium
architecture.

These are getting compiled for both targets today. Separate the CE
services into legacy and srng and selectively compile based on
targets present in the SoC.

Note: Generic APIs implemented for both legacy and SRNG services
      are separated out through this change. Fast path implementation
      will be de-coupled from common service and moved to legacy source
      in a follow-up change.

CRs-Fixed: 2258640
Change-Id: If86c21c0bf560a360474c9efcdbcd4841a09828d
Sathish Kumar 6 years ago
parent
commit
86876490f2

+ 6 - 0
hal/wifi3.0/hal_srng.c

@@ -137,6 +137,8 @@ QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
 	return QDF_STATUS_SUCCESS;
 }
 
+qdf_export_symbol(hal_set_one_shadow_config);
+
 QDF_STATUS hal_construct_shadow_config(void *hal_soc)
 {
 	int ring_type, ring_num;
@@ -162,6 +164,8 @@ QDF_STATUS hal_construct_shadow_config(void *hal_soc)
 	return QDF_STATUS_SUCCESS;
 }
 
+qdf_export_symbol(hal_construct_shadow_config);
+
 void hal_get_shadow_config(void *hal_soc,
 	struct pld_shadow_reg_v2_cfg **shadow_config,
 	int *num_shadow_registers_configured)
@@ -176,6 +180,8 @@ void hal_get_shadow_config(void *hal_soc,
 			"%s", __func__);
 }
 
+qdf_export_symbol(hal_get_shadow_config);
+
 
 static void hal_validate_shadow_register(struct hal_soc *hal,
 				  uint32_t *destination,

+ 24 - 1
hif/src/ce/ce_api.h

@@ -102,12 +102,28 @@ typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
 typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
 				void *per_CE_wm_context, unsigned int flags);
 
+
 #define CE_WM_FLAG_SEND_HIGH   1
 #define CE_WM_FLAG_SEND_LOW    2
 #define CE_WM_FLAG_RECV_HIGH   4
 #define CE_WM_FLAG_RECV_LOW    8
 #define CE_HTT_TX_CE           4
 
+
+/**
+ * ce_service_srng_init() - Initialization routine for CE services
+ *                          in SRNG based targets
+ * Return : None
+ */
+void ce_service_srng_init(void);
+
+/**
+ * ce_service_legacy_init() - Initialization routine for CE services
+ *                            in legacy targets
+ * Return : None
+ */
+void ce_service_legacy_init(void);
+
 /* A list of buffers to be gathered and sent */
 struct ce_sendlist;
 
@@ -385,6 +401,14 @@ void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
  */
 bool ce_get_rx_pending(struct hif_softc *scn);
 
+/**
+ * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
+ *
+ * Return: None
+ */
+void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
+				   u32 ctrl_addr, unsigned int write_index);
+
 /* CE_attr.flags values */
 #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
 #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
@@ -536,7 +560,6 @@ struct ce_ops {
 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
 			    struct pld_shadow_reg_v2_cfg **shadow_config,
 			    int *num_shadow_registers_configured);
-
 };
 
 int hif_ce_bus_early_suspend(struct hif_softc *scn);

+ 23 - 0
hif/src/ce/ce_internal.h

@@ -549,4 +549,27 @@ QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
 void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
 #endif /*HIF_CE_DEBUG_DATA_BUF*/
 #endif /* #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || HIF_CE_DEBUG_DATA_BUF */
+
+#ifdef HIF_CONFIG_SLUB_DEBUG_ON
+/**
+ * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
+ * @nbytes: nbytes value being written into a send descriptor
+ * @ce_state: context of the copy engine
+
+ * nbytes should be non-zero and less than max configured for the copy engine
+ *
+ * Return: none
+ */
+static inline void ce_validate_nbytes(uint32_t nbytes,
+				      struct CE_state *ce_state)
+{
+	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
+		QDF_BUG(0);
+}
+#else
+static inline void ce_validate_nbytes(uint32_t nbytes,
+				      struct CE_state *ce_state)
+{
+}
+#endif
 #endif /* __COPY_ENGINE_INTERNAL_H__ */

+ 48 - 4
hif/src/ce/ce_main.c

@@ -885,6 +885,21 @@ static void ce_free_desc_ring(struct hif_softc *scn, unsigned int CE_id,
 }
 #endif /* IPA_OFFLOAD */
 
+/*
+ * TODO: Need to explore the possibility of having this as part of a
+ * target context instead of a global array.
+ */
+static struct ce_ops* (*ce_attach_register[CE_MAX_TARGET_TYPE])(void);
+
+void ce_service_register_module(enum ce_target_type target_type,
+				struct ce_ops* (*ce_attach)(void))
+{
+	if (target_type < CE_MAX_TARGET_TYPE)
+		ce_attach_register[target_type] = ce_attach;
+}
+
+qdf_export_symbol(ce_service_register_module);
+
 /**
  * ce_srng_based() - Does this target use srng
  * @ce_state : pointer to the state context of the CE
@@ -917,17 +932,26 @@ qdf_export_symbol(ce_srng_based);
 #ifdef QCA_WIFI_SUPPORT_SRNG
 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
 {
-	if (ce_srng_based(scn))
-		return ce_services_srng();
+	struct ce_ops *ops = NULL;
+
+	if (ce_srng_based(scn)) {
+		if (ce_attach_register[CE_SVC_SRNG])
+			ops = ce_attach_register[CE_SVC_SRNG]();
+	} else if (ce_attach_register[CE_SVC_LEGACY]) {
+		ops = ce_attach_register[CE_SVC_LEGACY]();
+	}
 
-	return ce_services_legacy();
+	return ops;
 }
 
 
 #else	/* QCA_LITHIUM */
 static struct ce_ops *ce_services_attach(struct hif_softc *scn)
 {
-	return ce_services_legacy();
+	if (ce_attach_register[CE_SVC_LEGACY])
+		return ce_attach_register[CE_SVC_LEGACY]();
+
+	return NULL;
 }
 #endif /* QCA_LITHIUM */
 
@@ -2740,6 +2764,25 @@ void hif_set_ce_config_qcn7605(struct hif_softc *scn,
 }
 #endif
 
+#ifdef CE_SVC_CMN_INIT
+#ifdef QCA_WIFI_SUPPORT_SRNG
+static inline void hif_ce_service_init(void)
+{
+	ce_service_srng_init();
+}
+#else
+static inline void hif_ce_service_init(void)
+{
+	ce_service_legacy_init();
+}
+#endif
+#else
+static inline void hif_ce_service_init(void)
+{
+}
+#endif
+
+
 /**
  * hif_ce_prepare_config() - load the correct static tables.
  * @scn: hif context
@@ -2753,6 +2796,7 @@ void hif_ce_prepare_config(struct hif_softc *scn)
 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
 
+	hif_ce_service_init();
 	hif_state->ce_services = ce_services_attach(scn);
 
 	scn->ce_count = HOST_CE_COUNT;

+ 9 - 0
hif/src/ce/ce_main.h

@@ -53,6 +53,12 @@ enum ce_id_type {
 	CE_ID_MAX
 };
 
+enum ce_target_type {
+	CE_SVC_LEGACY,
+	CE_SVC_SRNG,
+	CE_MAX_TARGET_TYPE
+};
+
 #ifdef CONFIG_WIN
 #define QWLAN_VERSIONSTR "WIN"
 #endif
@@ -228,4 +234,7 @@ void hif_select_epping_service_to_pipe_map(struct service_to_pipe
 { }
 #endif
 
+void ce_service_register_module(enum ce_target_type target_type,
+				struct ce_ops* (*ce_attach)(void));
+
 #endif /* __CE_H__ */

+ 16 - 849
hif/src/ce/ce_service.c

@@ -293,52 +293,12 @@ bool hif_ce_service_should_yield(struct hif_softc *scn,
 	return yield;
 }
 #endif
-/*
- * Support for Copy Engine hardware, which is mainly used for
- * communication between Host and Target over a PCIe interconnect.
- */
-
-/*
- * A single CopyEngine (CE) comprises two "rings":
- *   a source ring
- *   a destination ring
- *
- * Each ring consists of a number of descriptors which specify
- * an address, length, and meta-data.
- *
- * Typically, one side of the PCIe interconnect (Host or Target)
- * controls one ring and the other side controls the other ring.
- * The source side chooses when to initiate a transfer and it
- * chooses what to send (buffer address, length). The destination
- * side keeps a supply of "anonymous receive buffers" available and
- * it handles incoming data as it arrives (when the destination
- * receives an interrupt).
- *
- * The sender may send a simple buffer (address/length) or it may
- * send a small list of buffers.  When a small list is sent, hardware
- * "gathers" these and they end up in a single destination buffer
- * with a single interrupt.
- *
- * There are several "contexts" managed by this layer -- more, it
- * may seem -- than should be needed. These are provided mainly for
- * maximum flexibility and especially to facilitate a simpler HIF
- * implementation. There are per-CopyEngine recv, send, and watermark
- * contexts. These are supplied by the caller when a recv, send,
- * or watermark handler is established and they are echoed back to
- * the caller when the respective callbacks are invoked. There is
- * also a per-transfer context supplied by the caller when a buffer
- * (or sendlist) is sent and when a buffer is enqueued for recv.
- * These per-transfer contexts are echoed back to the caller when
- * the buffer is sent/received.
- * Target TX harsh result toeplitz_hash_result
- */
 
 /*
  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
  * The caller takes responsibility for any needed locking.
  */
 
-static
 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
 				   u32 ctrl_addr, unsigned int write_index)
 {
@@ -376,126 +336,7 @@ void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
 	}
 }
 
-#ifdef HIF_CONFIG_SLUB_DEBUG_ON
-/**
- * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
- * @nbytes: nbytes value being written into a send descriptor
- * @ce_state: context of the copy engine
-
- * nbytes should be non-zero and less than max configured for the copy engine
- *
- * Return: none
- */
-static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
-{
-	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
-		QDF_BUG(0);
-}
-#else
-static void ce_validate_nbytes(uint32_t nbytes, struct CE_state *ce_state)
-{
-}
-#endif
-
-static int
-ce_send_nolock_legacy(struct CE_handle *copyeng,
-			   void *per_transfer_context,
-			   qdf_dma_addr_t buffer,
-			   uint32_t nbytes,
-			   uint32_t transfer_id,
-			   uint32_t flags,
-			   uint32_t user_flags)
-{
-	int status;
-	struct CE_state *CE_state = (struct CE_state *)copyeng;
-	struct CE_ring_state *src_ring = CE_state->src_ring;
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	unsigned int nentries_mask = src_ring->nentries_mask;
-	unsigned int sw_index = src_ring->sw_index;
-	unsigned int write_index = src_ring->write_index;
-	uint64_t dma_addr = buffer;
-	struct hif_softc *scn = CE_state->scn;
-
-	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
-		return QDF_STATUS_E_FAILURE;
-	if (unlikely(CE_RING_DELTA(nentries_mask,
-				write_index, sw_index - 1) <= 0)) {
-		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
-		Q_TARGET_ACCESS_END(scn);
-		return QDF_STATUS_E_FAILURE;
-	}
-	{
-		enum hif_ce_event_type event_type;
-		struct CE_src_desc *src_ring_base =
-			(struct CE_src_desc *)src_ring->base_addr_owner_space;
-		struct CE_src_desc *shadow_base =
-			(struct CE_src_desc *)src_ring->shadow_base;
-		struct CE_src_desc *src_desc =
-			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
-		struct CE_src_desc *shadow_src_desc =
-			CE_SRC_RING_TO_DESC(shadow_base, write_index);
-
-		/* Update low 32 bits source descriptor address */
-		shadow_src_desc->buffer_addr =
-			(uint32_t)(dma_addr & 0xFFFFFFFF);
-#ifdef QCA_WIFI_3_0
-		shadow_src_desc->buffer_addr_hi =
-			(uint32_t)((dma_addr >> 32) & 0x1F);
-		user_flags |= shadow_src_desc->buffer_addr_hi;
-		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
-			   sizeof(uint32_t));
-#endif
-		shadow_src_desc->target_int_disable = 0;
-		shadow_src_desc->host_int_disable = 0;
-
-		shadow_src_desc->meta_data = transfer_id;
-
-		/*
-		 * Set the swap bit if:
-		 * typical sends on this CE are swapped (host is big-endian)
-		 * and this send doesn't disable the swapping
-		 * (data is not bytestream)
-		 */
-		shadow_src_desc->byte_swap =
-			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
-			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
-		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
-		shadow_src_desc->nbytes = nbytes;
-		ce_validate_nbytes(nbytes, CE_state);
-
-		*src_desc = *shadow_src_desc;
-
-		src_ring->per_transfer_context[write_index] =
-			per_transfer_context;
-
-		/* Update Source Ring Write Index */
-		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
-
-		/* WORKAROUND */
-		if (shadow_src_desc->gather) {
-			event_type = HIF_TX_GATHER_DESC_POST;
-		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
-			event_type = HIF_TX_DESC_SOFTWARE_POST;
-			CE_state->state = CE_PENDING;
-		} else {
-			event_type = HIF_TX_DESC_POST;
-			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
-						      write_index);
-		}
-
-		/* src_ring->write index hasn't been updated event though
-		 * the register has allready been written to.
-		 */
-		hif_record_ce_desc_event(scn, CE_state->id, event_type,
-			(union ce_desc *) shadow_src_desc, per_transfer_context,
-			src_ring->write_index, nbytes);
-
-		src_ring->write_index = write_index;
-		status = QDF_STATUS_SUCCESS;
-	}
-	Q_TARGET_ACCESS_END(scn);
-	return status;
-}
+qdf_export_symbol(war_ce_src_ring_write_idx_set);
 
 int
 ce_send(struct CE_handle *copyeng,
@@ -569,84 +410,12 @@ ce_sendlist_send(struct CE_handle *copyeng,
 			per_transfer_context, sendlist, transfer_id);
 }
 
-static int
-ce_sendlist_send_legacy(struct CE_handle *copyeng,
-		 void *per_transfer_context,
-		 struct ce_sendlist *sendlist, unsigned int transfer_id)
-{
-	int status = -ENOMEM;
-	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
-	struct CE_state *CE_state = (struct CE_state *)copyeng;
-	struct CE_ring_state *src_ring = CE_state->src_ring;
-	unsigned int nentries_mask = src_ring->nentries_mask;
-	unsigned int num_items = sl->num_items;
-	unsigned int sw_index;
-	unsigned int write_index;
-	struct hif_softc *scn = CE_state->scn;
-
-	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
-
-	qdf_spin_lock_bh(&CE_state->ce_index_lock);
-
-	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
-	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
-		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
-					       scn, CE_state->ctrl_addr);
-		Q_TARGET_ACCESS_END(scn);
-	}
-
-	sw_index = src_ring->sw_index;
-	write_index = src_ring->write_index;
-
-	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
-	    num_items) {
-		struct ce_sendlist_item *item;
-		int i;
-
-		/* handle all but the last item uniformly */
-		for (i = 0; i < num_items - 1; i++) {
-			item = &sl->item[i];
-			/* TBDXXX: Support extensible sendlist_types? */
-			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
-			status = ce_send_nolock_legacy(copyeng,
-				CE_SENDLIST_ITEM_CTXT,
-				(qdf_dma_addr_t) item->data,
-				item->u.nbytes, transfer_id,
-				item->flags | CE_SEND_FLAG_GATHER,
-				item->user_flags);
-			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
-		}
-		/* provide valid context pointer for final item */
-		item = &sl->item[i];
-		/* TBDXXX: Support extensible sendlist_types? */
-		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
-		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
-					(qdf_dma_addr_t) item->data,
-					item->u.nbytes,
-					transfer_id, item->flags,
-					item->user_flags);
-		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
-		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
-					QDF_NBUF_TX_PKT_CE);
-		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
-			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
-			QDF_TRACE_DEFAULT_PDEV_ID,
-			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
-			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
-			QDF_TX));
-	} else {
-		/*
-		 * Probably not worth the additional complexity to support
-		 * partial sends with continuation or notification.  We expect
-		 * to use large rings and small sendlists. If we can't handle
-		 * the entire request at once, punt it back to the caller.
-		 */
-	}
-	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
-
-	return status;
-}
-
+/*
+ * TODO : Fast path implementatiom must be de-coupled from generic service
+ * APIs shared between SRNG and Legacy CE implementations and must be moved
+ * to ce_service_legacy.c.
+ * CR-2315620
+ */
 #ifdef WLAN_FEATURE_FASTPATH
 #ifdef QCA_WIFI_3_0
 static inline void
@@ -861,17 +630,6 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
 	return 1;
 }
 
-/**
- * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
- * @scn: Handle to HIF context
- *
- * Return: true if fastpath is enabled else false.
- */
-static bool ce_is_fastpath_enabled(struct hif_softc *scn)
-{
-	return scn->fastpath_mode_on;
-}
-
 /**
  * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
  * fastpath is enabled.
@@ -887,13 +645,7 @@ static bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
 		return false;
 }
 
-
 #else
-static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
-{
-	return false;
-}
-
 static inline bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
 {
 	return false;
@@ -1136,74 +888,6 @@ ce_recv_buf_enqueue(struct CE_handle *copyeng,
 			per_recv_context, buffer);
 }
 
-/**
- * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
- * @coyeng: copy engine handle
- * @per_recv_context: virtual address of the nbuf
- * @buffer: physical address of the nbuf
- *
- * Return: 0 if the buffer is enqueued
- */
-static int
-ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
-		    void *per_recv_context, qdf_dma_addr_t buffer)
-{
-	int status;
-	struct CE_state *CE_state = (struct CE_state *)copyeng;
-	struct CE_ring_state *dest_ring = CE_state->dest_ring;
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	unsigned int nentries_mask = dest_ring->nentries_mask;
-	unsigned int write_index;
-	unsigned int sw_index;
-	uint64_t dma_addr = buffer;
-	struct hif_softc *scn = CE_state->scn;
-
-	qdf_spin_lock_bh(&CE_state->ce_index_lock);
-	write_index = dest_ring->write_index;
-	sw_index = dest_ring->sw_index;
-
-	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
-		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
-		return -EIO;
-	}
-
-	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
-	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
-		struct CE_dest_desc *dest_ring_base =
-			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
-		struct CE_dest_desc *dest_desc =
-			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
-
-		/* Update low 32 bit destination descriptor */
-		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
-#ifdef QCA_WIFI_3_0
-		dest_desc->buffer_addr_hi =
-			(uint32_t)((dma_addr >> 32) & 0x1F);
-#endif
-		dest_desc->nbytes = 0;
-
-		dest_ring->per_transfer_context[write_index] =
-			per_recv_context;
-
-		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
-				(union ce_desc *) dest_desc, per_recv_context,
-				write_index, 0);
-
-		/* Update Destination Ring Write Index */
-		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
-		if (write_index != sw_index) {
-			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
-			dest_ring->write_index = write_index;
-		}
-		status = QDF_STATUS_SUCCESS;
-	} else
-		status = QDF_STATUS_E_FAILURE;
-
-	Q_TARGET_ACCESS_END(scn);
-	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
-	return status;
-}
-
 void
 ce_send_watermarks_set(struct CE_handle *copyeng,
 		       unsigned int low_alert_nentries,
@@ -1268,22 +952,6 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
  * Guts of ce_send_entries_done.
  * The caller takes responsibility for any necessary locking.
  */
-static unsigned int
-ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
-			    struct CE_state *CE_state)
-{
-	struct CE_ring_state *src_ring = CE_state->src_ring;
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	unsigned int nentries_mask = src_ring->nentries_mask;
-	unsigned int sw_index;
-	unsigned int read_index;
-
-	sw_index = src_ring->sw_index;
-	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
-
-	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
-}
-
 unsigned int ce_send_entries_done(struct CE_handle *copyeng)
 {
 	struct CE_state *CE_state = (struct CE_state *)copyeng;
@@ -1303,22 +971,6 @@ unsigned int ce_send_entries_done(struct CE_handle *copyeng)
  * Guts of ce_recv_entries_done.
  * The caller takes responsibility for any necessary locking.
  */
-static unsigned int
-ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
-			    struct CE_state *CE_state)
-{
-	struct CE_ring_state *dest_ring = CE_state->dest_ring;
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	unsigned int nentries_mask = dest_ring->nentries_mask;
-	unsigned int sw_index;
-	unsigned int read_index;
-
-	sw_index = dest_ring->sw_index;
-	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
-
-	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
-}
-
 unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
 {
 	struct CE_state *CE_state = (struct CE_state *)copyeng;
@@ -1338,74 +990,6 @@ unsigned int ce_recv_entries_done(struct CE_handle *copyeng)
  * Guts of ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int
-ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
-			      void **per_CE_contextp,
-			      void **per_transfer_contextp,
-			      qdf_dma_addr_t *bufferp,
-			      unsigned int *nbytesp,
-			      unsigned int *transfer_idp,
-			      unsigned int *flagsp)
-{
-	int status;
-	struct CE_ring_state *dest_ring = CE_state->dest_ring;
-	unsigned int nentries_mask = dest_ring->nentries_mask;
-	unsigned int sw_index = dest_ring->sw_index;
-	struct hif_softc *scn = CE_state->scn;
-	struct CE_dest_desc *dest_ring_base =
-		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
-	struct CE_dest_desc *dest_desc =
-		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
-	int nbytes;
-	struct CE_dest_desc dest_desc_info;
-	/*
-	 * By copying the dest_desc_info element to local memory, we could
-	 * avoid extra memory read from non-cachable memory.
-	 */
-	dest_desc_info =  *dest_desc;
-	nbytes = dest_desc_info.nbytes;
-	if (nbytes == 0) {
-		/*
-		 * This closes a relatively unusual race where the Host
-		 * sees the updated DRRI before the update to the
-		 * corresponding descriptor has completed. We treat this
-		 * as a descriptor that is not yet done.
-		 */
-		status = QDF_STATUS_E_FAILURE;
-		goto done;
-	}
-
-	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
-			(union ce_desc *) dest_desc,
-			dest_ring->per_transfer_context[sw_index],
-			sw_index, 0);
-
-	dest_desc->nbytes = 0;
-
-	/* Return data from completed destination descriptor */
-	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
-	*nbytesp = nbytes;
-	*transfer_idp = dest_desc_info.meta_data;
-	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
-
-	if (per_CE_contextp)
-		*per_CE_contextp = CE_state->recv_context;
-
-	if (per_transfer_contextp) {
-		*per_transfer_contextp =
-			dest_ring->per_transfer_context[sw_index];
-	}
-	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
-
-	/* Update sw_index */
-	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
-	dest_ring->sw_index = sw_index;
-	status = QDF_STATUS_SUCCESS;
-
-done:
-	return status;
-}
-
 int
 ce_completed_recv_next(struct CE_handle *copyeng,
 		       void **per_CE_contextp,
@@ -1442,150 +1026,6 @@ ce_revoke_recv_next(struct CE_handle *copyeng,
 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
 			per_CE_contextp, per_transfer_contextp, bufferp);
 }
-/* NB: Modeled after ce_completed_recv_next_nolock */
-static QDF_STATUS
-ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
-		    void **per_CE_contextp,
-		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
-{
-	struct CE_state *CE_state;
-	struct CE_ring_state *dest_ring;
-	unsigned int nentries_mask;
-	unsigned int sw_index;
-	unsigned int write_index;
-	QDF_STATUS status;
-	struct hif_softc *scn;
-
-	CE_state = (struct CE_state *)copyeng;
-	dest_ring = CE_state->dest_ring;
-	if (!dest_ring)
-		return QDF_STATUS_E_FAILURE;
-
-	scn = CE_state->scn;
-	qdf_spin_lock(&CE_state->ce_index_lock);
-	nentries_mask = dest_ring->nentries_mask;
-	sw_index = dest_ring->sw_index;
-	write_index = dest_ring->write_index;
-	if (write_index != sw_index) {
-		struct CE_dest_desc *dest_ring_base =
-			(struct CE_dest_desc *)dest_ring->
-			    base_addr_owner_space;
-		struct CE_dest_desc *dest_desc =
-			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
-
-		/* Return data from completed destination descriptor */
-		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
-
-		if (per_CE_contextp)
-			*per_CE_contextp = CE_state->recv_context;
-
-		if (per_transfer_contextp) {
-			*per_transfer_contextp =
-				dest_ring->per_transfer_context[sw_index];
-		}
-		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
-
-		/* Update sw_index */
-		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
-		dest_ring->sw_index = sw_index;
-		status = QDF_STATUS_SUCCESS;
-	} else {
-		status = QDF_STATUS_E_FAILURE;
-	}
-	qdf_spin_unlock(&CE_state->ce_index_lock);
-
-	return status;
-}
-
-/*
- * Guts of ce_completed_send_next.
- * The caller takes responsibility for any necessary locking.
- */
-static int
-ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
-			      void **per_CE_contextp,
-			      void **per_transfer_contextp,
-			      qdf_dma_addr_t *bufferp,
-			      unsigned int *nbytesp,
-			      unsigned int *transfer_idp,
-			      unsigned int *sw_idx,
-			      unsigned int *hw_idx,
-			      uint32_t *toeplitz_hash_result)
-{
-	int status = QDF_STATUS_E_FAILURE;
-	struct CE_ring_state *src_ring = CE_state->src_ring;
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	unsigned int nentries_mask = src_ring->nentries_mask;
-	unsigned int sw_index = src_ring->sw_index;
-	unsigned int read_index;
-	struct hif_softc *scn = CE_state->scn;
-
-	if (src_ring->hw_index == sw_index) {
-		/*
-		 * The SW completion index has caught up with the cached
-		 * version of the HW completion index.
-		 * Update the cached HW completion index to see whether
-		 * the SW has really caught up to the HW, or if the cached
-		 * value of the HW index has become stale.
-		 */
-		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
-			return QDF_STATUS_E_FAILURE;
-		src_ring->hw_index =
-			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
-		if (Q_TARGET_ACCESS_END(scn) < 0)
-			return QDF_STATUS_E_FAILURE;
-	}
-	read_index = src_ring->hw_index;
-
-	if (sw_idx)
-		*sw_idx = sw_index;
-
-	if (hw_idx)
-		*hw_idx = read_index;
-
-	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
-		struct CE_src_desc *shadow_base =
-			(struct CE_src_desc *)src_ring->shadow_base;
-		struct CE_src_desc *shadow_src_desc =
-			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
-#ifdef QCA_WIFI_3_0
-		struct CE_src_desc *src_ring_base =
-			(struct CE_src_desc *)src_ring->base_addr_owner_space;
-		struct CE_src_desc *src_desc =
-			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
-#endif
-		hif_record_ce_desc_event(scn, CE_state->id,
-				HIF_TX_DESC_COMPLETION,
-				(union ce_desc *) shadow_src_desc,
-				src_ring->per_transfer_context[sw_index],
-				sw_index, shadow_src_desc->nbytes);
-
-		/* Return data from completed source descriptor */
-		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
-		*nbytesp = shadow_src_desc->nbytes;
-		*transfer_idp = shadow_src_desc->meta_data;
-#ifdef QCA_WIFI_3_0
-		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
-#else
-		*toeplitz_hash_result = 0;
-#endif
-		if (per_CE_contextp)
-			*per_CE_contextp = CE_state->send_context;
-
-		if (per_transfer_contextp) {
-			*per_transfer_contextp =
-				src_ring->per_transfer_context[sw_index];
-		}
-		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
-
-		/* Update sw_index */
-		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
-		src_ring->sw_index = sw_index;
-		status = QDF_STATUS_SUCCESS;
-	}
-
-	return status;
-}
 
 QDF_STATUS
 ce_cancel_send_next(struct CE_handle *copyeng,
@@ -1604,75 +1044,6 @@ ce_cancel_send_next(struct CE_handle *copyeng,
 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
 }
 
-/* NB: Modeled after ce_completed_send_next */
-static QDF_STATUS
-ce_cancel_send_next_legacy(struct CE_handle *copyeng,
-		void **per_CE_contextp,
-		void **per_transfer_contextp,
-		qdf_dma_addr_t *bufferp,
-		unsigned int *nbytesp,
-		unsigned int *transfer_idp,
-		uint32_t *toeplitz_hash_result)
-{
-	struct CE_state *CE_state;
-	struct CE_ring_state *src_ring;
-	unsigned int nentries_mask;
-	unsigned int sw_index;
-	unsigned int write_index;
-	QDF_STATUS status;
-	struct hif_softc *scn;
-
-	CE_state = (struct CE_state *)copyeng;
-	src_ring = CE_state->src_ring;
-	if (!src_ring)
-		return QDF_STATUS_E_FAILURE;
-
-	scn = CE_state->scn;
-	qdf_spin_lock(&CE_state->ce_index_lock);
-	nentries_mask = src_ring->nentries_mask;
-	sw_index = src_ring->sw_index;
-	write_index = src_ring->write_index;
-
-	if (write_index != sw_index) {
-		struct CE_src_desc *src_ring_base =
-			(struct CE_src_desc *)src_ring->base_addr_owner_space;
-		struct CE_src_desc *src_desc =
-			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
-
-		/* Return data from completed source descriptor */
-		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
-		*nbytesp = src_desc->nbytes;
-		*transfer_idp = src_desc->meta_data;
-#ifdef QCA_WIFI_3_0
-		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
-#else
-		*toeplitz_hash_result = 0;
-#endif
-
-		if (per_CE_contextp)
-			*per_CE_contextp = CE_state->send_context;
-
-		if (per_transfer_contextp) {
-			*per_transfer_contextp =
-				src_ring->per_transfer_context[sw_index];
-		}
-		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
-
-		/* Update sw_index */
-		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
-		src_ring->sw_index = sw_index;
-		status = QDF_STATUS_SUCCESS;
-	} else {
-		status = QDF_STATUS_E_FAILURE;
-	}
-	qdf_spin_unlock(&CE_state->ce_index_lock);
-
-	return status;
-}
-
-/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
-#define CE_WM_SHFT 1
-
 int
 ce_completed_send_next(struct CE_handle *copyeng,
 		       void **per_CE_contextp,
@@ -1802,6 +1173,12 @@ void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
  */
 #define CE_TXRX_COMP_CHECK_THRESHOLD 20
 
+/*
+ * TODO : Fast path implementatiom must be de-coupled from generic service
+ * APIs shared between SRNG and Legacy CE implementations and must be moved
+ * to ce_service_legacy.c.
+ * CR-2315620
+ */
 #ifdef WLAN_FEATURE_FASTPATH
 /**
  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
@@ -1867,6 +1244,8 @@ static void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
 	uint32_t ctrl_addr = ce_state->ctrl_addr;
 	uint32_t nbuf_cmpl_idx = 0;
 	unsigned int more_comp_cnt = 0;
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct ce_ops *ce_services = hif_state->ce_services;
 
 more_data:
 	for (;;) {
@@ -1992,7 +1371,7 @@ more_data:
 		return;
 	}
 
-	if (ce_recv_entries_done_nolock_legacy(scn, ce_state)) {
+	if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) {
 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
 			goto more_data;
 		} else {
@@ -2297,43 +1676,6 @@ void ce_per_engine_service_any(int irq, struct hif_softc *scn)
 	Q_TARGET_ACCESS_END(scn);
 }
 
-/*
- * Adjust interrupts for the copy complete handler.
- * If it's needed for either send or recv, then unmask
- * this interrupt; otherwise, mask it.
- *
- * Called with target_lock held.
- */
-static void
-ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
-			     int disable_copy_compl_intr)
-{
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	struct hif_softc *scn = CE_state->scn;
-
-	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
-
-	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
-		return;
-
-	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
-		hif_err_rl("%s: target access is not allowed", __func__);
-		return;
-	}
-
-	if ((!disable_copy_compl_intr) &&
-	    (CE_state->send_cb || CE_state->recv_cb))
-		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
-	else
-		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
-
-	if (CE_state->watermark_cb)
-		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
-	 else
-		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
-	Q_TARGET_ACCESS_END(scn);
-}
-
 /*Iterate the CE_state list and disable the compl interrupt
  * if it has been registered already.
  */
@@ -2569,181 +1911,6 @@ void ce_ipa_get_resource(struct CE_handle *ce,
 }
 #endif /* IPA_OFFLOAD */
 
-static bool ce_check_int_watermark(struct CE_state *CE_state,
-				   unsigned int *flags)
-{
-	uint32_t ce_int_status;
-	uint32_t ctrl_addr = CE_state->ctrl_addr;
-	struct hif_softc *scn = CE_state->scn;
-
-	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
-	if (ce_int_status & CE_WATERMARK_MASK) {
-		/* Convert HW IS bits to software flags */
-		*flags =
-			(ce_int_status & CE_WATERMARK_MASK) >>
-			CE_WM_SHFT;
-		return true;
-	}
-
-	return false;
-}
-
-static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
-			struct CE_ring_state *src_ring,
-			struct CE_attr *attr)
-{
-	uint32_t ctrl_addr;
-	uint64_t dma_addr;
-
-	QDF_ASSERT(ce_id < scn->ce_count);
-	ctrl_addr = CE_BASE_ADDRESS(ce_id);
-
-	src_ring->hw_index =
-		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
-	src_ring->sw_index = src_ring->hw_index;
-	src_ring->write_index =
-		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
-	dma_addr = src_ring->base_addr_CE_space;
-	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
-			(uint32_t)(dma_addr & 0xFFFFFFFF));
-
-	/* if SR_BA_ADDRESS_HIGH register exists */
-	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
-		uint32_t tmp;
-
-		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
-				scn, ctrl_addr);
-		tmp &= ~0x1F;
-		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
-		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
-				ctrl_addr, (uint32_t)dma_addr);
-	}
-	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
-	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
-#ifdef BIG_ENDIAN_HOST
-	/* Enable source ring byte swap for big endian host */
-	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
-#endif
-	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
-	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
-
-}
-
-static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
-				struct CE_ring_state *dest_ring,
-				struct CE_attr *attr)
-{
-	uint32_t ctrl_addr;
-	uint64_t dma_addr;
-
-	QDF_ASSERT(ce_id < scn->ce_count);
-	ctrl_addr = CE_BASE_ADDRESS(ce_id);
-	dest_ring->sw_index =
-		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
-	dest_ring->write_index =
-		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
-	dma_addr = dest_ring->base_addr_CE_space;
-	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
-			(uint32_t)(dma_addr & 0xFFFFFFFF));
-
-	/* if DR_BA_ADDRESS_HIGH exists */
-	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
-		uint32_t tmp;
-
-		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
-				ctrl_addr);
-		tmp &= ~0x1F;
-		dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
-		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
-				ctrl_addr, (uint32_t)dma_addr);
-	}
-
-	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
-#ifdef BIG_ENDIAN_HOST
-	/* Enable Dest ring byte swap for big endian host */
-	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
-#endif
-	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
-	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
-}
-
-static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
-{
-	switch (ring_type) {
-	case CE_RING_SRC:
-		return sizeof(struct CE_src_desc);
-	case CE_RING_DEST:
-		return sizeof(struct CE_dest_desc);
-	case CE_RING_STATUS:
-		qdf_assert(0);
-		return 0;
-	default:
-		return 0;
-	}
-
-	return 0;
-}
-
-static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
-		uint32_t ce_id, struct CE_ring_state *ring,
-		struct CE_attr *attr)
-{
-	int status = Q_TARGET_ACCESS_BEGIN(scn);
-
-	if (status < 0)
-		goto out;
-
-
-	switch (ring_type) {
-	case CE_RING_SRC:
-		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
-		break;
-	case CE_RING_DEST:
-		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
-		break;
-	case CE_RING_STATUS:
-	default:
-		qdf_assert(0);
-		break;
-	}
-
-	Q_TARGET_ACCESS_END(scn);
-out:
-	return status;
-}
-
-static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
-			    struct pld_shadow_reg_v2_cfg **shadow_config,
-			    int *num_shadow_registers_configured)
-{
-	*num_shadow_registers_configured = 0;
-	*shadow_config = NULL;
-}
-
-struct ce_ops ce_service_legacy = {
-	.ce_get_desc_size = ce_get_desc_size_legacy,
-	.ce_ring_setup = ce_ring_setup_legacy,
-	.ce_sendlist_send = ce_sendlist_send_legacy,
-	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
-	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
-	.ce_cancel_send_next = ce_cancel_send_next_legacy,
-	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
-	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
-	.ce_send_nolock = ce_send_nolock_legacy,
-	.watermark_int = ce_check_int_watermark,
-	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
-	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
-	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
-	.ce_prepare_shadow_register_v2_cfg =
-		ce_prepare_shadow_register_v2_cfg_legacy,
-};
-
-
-struct ce_ops *ce_services_legacy()
-{
-	return &ce_service_legacy;
-}
-
 #if HIF_CE_DEBUG_DATA_BUF
 /**
  * hif_dump_desc_data_buf() - record ce descriptor events

+ 865 - 0
hif/src/ce/ce_service_legacy.c

@@ -0,0 +1,865 @@
+/*
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ce_api.h"
+#include "ce_internal.h"
+#include "ce_main.h"
+#include "ce_reg.h"
+#include "hif.h"
+#include "hif_debug.h"
+#include "hif_io32.h"
+#include "qdf_lock.h"
+#include "hif_main.h"
+#include "hif_napi.h"
+#include "qdf_module.h"
+#include "regtable.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ *   a source ring
+ *   a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * receives an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers.  When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ * Target TX harsh result toeplitz_hash_result
+ */
+
+/* NB: Modeled after ce_completed_send_next */
+/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
+#define CE_WM_SHFT 1
+
+#ifdef WLAN_FEATURE_FASTPATH
+/**
+ * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
+ * @scn: Handle to HIF context
+ *
+ * Return: true if fastpath is enabled else false.
+ */
+static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
+{
+	return scn->fastpath_mode_on;
+}
+#else
+static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
+{
+	return false;
+}
+#endif /* WLAN_FEATURE_FASTPATH */
+
+static int
+ce_send_nolock_legacy(struct CE_handle *copyeng,
+		      void *per_transfer_context,
+		      qdf_dma_addr_t buffer,
+		      uint32_t nbytes,
+		      uint32_t transfer_id,
+		      uint32_t flags,
+		      uint32_t user_flags)
+{
+	int status;
+	struct CE_state *CE_state = (struct CE_state *)copyeng;
+	struct CE_ring_state *src_ring = CE_state->src_ring;
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index = src_ring->sw_index;
+	unsigned int write_index = src_ring->write_index;
+	uint64_t dma_addr = buffer;
+	struct hif_softc *scn = CE_state->scn;
+
+	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
+		return QDF_STATUS_E_FAILURE;
+	if (unlikely(CE_RING_DELTA(nentries_mask,
+				   write_index, sw_index - 1) <= 0)) {
+		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
+		Q_TARGET_ACCESS_END(scn);
+		return QDF_STATUS_E_FAILURE;
+	}
+	{
+		enum hif_ce_event_type event_type;
+		struct CE_src_desc *src_ring_base =
+			(struct CE_src_desc *)src_ring->base_addr_owner_space;
+		struct CE_src_desc *shadow_base =
+			(struct CE_src_desc *)src_ring->shadow_base;
+		struct CE_src_desc *src_desc =
+			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
+		struct CE_src_desc *shadow_src_desc =
+			CE_SRC_RING_TO_DESC(shadow_base, write_index);
+
+		/* Update low 32 bits source descriptor address */
+		shadow_src_desc->buffer_addr =
+			(uint32_t)(dma_addr & 0xFFFFFFFF);
+#ifdef QCA_WIFI_3_0
+		shadow_src_desc->buffer_addr_hi =
+			(uint32_t)((dma_addr >> 32) & 0x1F);
+		user_flags |= shadow_src_desc->buffer_addr_hi;
+		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
+		       sizeof(uint32_t));
+#endif
+		shadow_src_desc->target_int_disable = 0;
+		shadow_src_desc->host_int_disable = 0;
+
+		shadow_src_desc->meta_data = transfer_id;
+
+		/*
+		 * Set the swap bit if:
+		 * typical sends on this CE are swapped (host is big-endian)
+		 * and this send doesn't disable the swapping
+		 * (data is not bytestream)
+		 */
+		shadow_src_desc->byte_swap =
+			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
+		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
+		shadow_src_desc->nbytes = nbytes;
+		ce_validate_nbytes(nbytes, CE_state);
+
+		*src_desc = *shadow_src_desc;
+
+		src_ring->per_transfer_context[write_index] =
+			per_transfer_context;
+
+		/* Update Source Ring Write Index */
+		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+		/* WORKAROUND */
+		if (shadow_src_desc->gather) {
+			event_type = HIF_TX_GATHER_DESC_POST;
+		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
+			event_type = HIF_TX_DESC_SOFTWARE_POST;
+			CE_state->state = CE_PENDING;
+		} else {
+			event_type = HIF_TX_DESC_POST;
+			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
+						      write_index);
+		}
+
+		/* src_ring->write index hasn't been updated event though
+		 * the register has allready been written to.
+		 */
+		hif_record_ce_desc_event(scn, CE_state->id, event_type,
+			(union ce_desc *)shadow_src_desc, per_transfer_context,
+			src_ring->write_index, nbytes);
+
+		src_ring->write_index = write_index;
+		status = QDF_STATUS_SUCCESS;
+	}
+	Q_TARGET_ACCESS_END(scn);
+	return status;
+}
+
+static int
+ce_sendlist_send_legacy(struct CE_handle *copyeng,
+			void *per_transfer_context,
+			struct ce_sendlist *sendlist, unsigned int transfer_id)
+{
+	int status = -ENOMEM;
+	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
+	struct CE_state *CE_state = (struct CE_state *)copyeng;
+	struct CE_ring_state *src_ring = CE_state->src_ring;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int num_items = sl->num_items;
+	unsigned int sw_index;
+	unsigned int write_index;
+	struct hif_softc *scn = CE_state->scn;
+
+	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
+
+	qdf_spin_lock_bh(&CE_state->ce_index_lock);
+
+	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
+	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
+		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
+					       scn, CE_state->ctrl_addr);
+		Q_TARGET_ACCESS_END(scn);
+	}
+
+	sw_index = src_ring->sw_index;
+	write_index = src_ring->write_index;
+
+	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
+	    num_items) {
+		struct ce_sendlist_item *item;
+		int i;
+
+		/* handle all but the last item uniformly */
+		for (i = 0; i < num_items - 1; i++) {
+			item = &sl->item[i];
+			/* TBDXXX: Support extensible sendlist_types? */
+			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
+			status = ce_send_nolock_legacy(copyeng,
+				CE_SENDLIST_ITEM_CTXT,
+				(qdf_dma_addr_t)item->data,
+				item->u.nbytes, transfer_id,
+				item->flags | CE_SEND_FLAG_GATHER,
+				item->user_flags);
+			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
+		}
+		/* provide valid context pointer for final item */
+		item = &sl->item[i];
+		/* TBDXXX: Support extensible sendlist_types? */
+		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
+		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
+					       (qdf_dma_addr_t) item->data,
+					       item->u.nbytes,
+					       transfer_id, item->flags,
+					       item->user_flags);
+		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
+		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
+					     QDF_NBUF_TX_PKT_CE);
+		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
+			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
+			QDF_TRACE_DEFAULT_PDEV_ID,
+			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
+			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
+			QDF_TX));
+	} else {
+		/*
+		 * Probably not worth the additional complexity to support
+		 * partial sends with continuation or notification.  We expect
+		 * to use large rings and small sendlists. If we can't handle
+		 * the entire request at once, punt it back to the caller.
+		 */
+	}
+	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
+
+	return status;
+}
+
+/**
+ * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
+ * @coyeng: copy engine handle
+ * @per_recv_context: virtual address of the nbuf
+ * @buffer: physical address of the nbuf
+ *
+ * Return: 0 if the buffer is enqueued
+ */
+static int
+ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
+			   void *per_recv_context, qdf_dma_addr_t buffer)
+{
+	int status;
+	struct CE_state *CE_state = (struct CE_state *)copyeng;
+	struct CE_ring_state *dest_ring = CE_state->dest_ring;
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int write_index;
+	unsigned int sw_index;
+	uint64_t dma_addr = buffer;
+	struct hif_softc *scn = CE_state->scn;
+
+	qdf_spin_lock_bh(&CE_state->ce_index_lock);
+	write_index = dest_ring->write_index;
+	sw_index = dest_ring->sw_index;
+
+	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
+		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
+		return -EIO;
+	}
+
+	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
+	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
+		struct CE_dest_desc *dest_ring_base =
+			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
+		struct CE_dest_desc *dest_desc =
+			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
+
+		/* Update low 32 bit destination descriptor */
+		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
+#ifdef QCA_WIFI_3_0
+		dest_desc->buffer_addr_hi =
+			(uint32_t)((dma_addr >> 32) & 0x1F);
+#endif
+		dest_desc->nbytes = 0;
+
+		dest_ring->per_transfer_context[write_index] =
+			per_recv_context;
+
+		hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_POST,
+				(union ce_desc *)dest_desc, per_recv_context,
+				write_index, 0);
+
+		/* Update Destination Ring Write Index */
+		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+		if (write_index != sw_index) {
+			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
+			dest_ring->write_index = write_index;
+		}
+		status = QDF_STATUS_SUCCESS;
+	} else
+		status = QDF_STATUS_E_FAILURE;
+
+	Q_TARGET_ACCESS_END(scn);
+	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
+	return status;
+}
+
+static unsigned int
+ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
+				   struct CE_state *CE_state)
+{
+	struct CE_ring_state *src_ring = CE_state->src_ring;
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index;
+	unsigned int read_index;
+
+	sw_index = src_ring->sw_index;
+	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
+
+	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
+}
+
+static unsigned int
+ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
+				   struct CE_state *CE_state)
+{
+	struct CE_ring_state *dest_ring = CE_state->dest_ring;
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int sw_index;
+	unsigned int read_index;
+
+	sw_index = dest_ring->sw_index;
+	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
+
+	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
+}
+
+static int
+ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
+				     void **per_CE_contextp,
+				     void **per_transfer_contextp,
+				     qdf_dma_addr_t *bufferp,
+				     unsigned int *nbytesp,
+				     unsigned int *transfer_idp,
+				     unsigned int *flagsp)
+{
+	int status;
+	struct CE_ring_state *dest_ring = CE_state->dest_ring;
+	unsigned int nentries_mask = dest_ring->nentries_mask;
+	unsigned int sw_index = dest_ring->sw_index;
+	struct hif_softc *scn = CE_state->scn;
+	struct CE_dest_desc *dest_ring_base =
+		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
+	struct CE_dest_desc *dest_desc =
+		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
+	int nbytes;
+	struct CE_dest_desc dest_desc_info;
+	/*
+	 * By copying the dest_desc_info element to local memory, we could
+	 * avoid extra memory read from non-cachable memory.
+	 */
+	dest_desc_info =  *dest_desc;
+	nbytes = dest_desc_info.nbytes;
+	if (nbytes == 0) {
+		/*
+		 * This closes a relatively unusual race where the Host
+		 * sees the updated DRRI before the update to the
+		 * corresponding descriptor has completed. We treat this
+		 * as a descriptor that is not yet done.
+		 */
+		status = QDF_STATUS_E_FAILURE;
+		goto done;
+	}
+
+	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
+				 (union ce_desc *)dest_desc,
+				 dest_ring->per_transfer_context[sw_index],
+				 sw_index, 0);
+
+	dest_desc->nbytes = 0;
+
+	/* Return data from completed destination descriptor */
+	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
+	*nbytesp = nbytes;
+	*transfer_idp = dest_desc_info.meta_data;
+	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
+
+	if (per_CE_contextp)
+		*per_CE_contextp = CE_state->recv_context;
+
+	if (per_transfer_contextp) {
+		*per_transfer_contextp =
+			dest_ring->per_transfer_context[sw_index];
+	}
+	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
+
+	/* Update sw_index */
+	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+	dest_ring->sw_index = sw_index;
+	status = QDF_STATUS_SUCCESS;
+
+done:
+	return status;
+}
+
+/* NB: Modeled after ce_completed_recv_next_nolock */
+static QDF_STATUS
+ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
+			   void **per_CE_contextp,
+			   void **per_transfer_contextp,
+			   qdf_dma_addr_t *bufferp)
+{
+	struct CE_state *CE_state;
+	struct CE_ring_state *dest_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	QDF_STATUS status;
+	struct hif_softc *scn;
+
+	CE_state = (struct CE_state *)copyeng;
+	dest_ring = CE_state->dest_ring;
+	if (!dest_ring)
+		return QDF_STATUS_E_FAILURE;
+
+	scn = CE_state->scn;
+	qdf_spin_lock(&CE_state->ce_index_lock);
+	nentries_mask = dest_ring->nentries_mask;
+	sw_index = dest_ring->sw_index;
+	write_index = dest_ring->write_index;
+	if (write_index != sw_index) {
+		struct CE_dest_desc *dest_ring_base =
+			(struct CE_dest_desc *)dest_ring->
+			    base_addr_owner_space;
+		struct CE_dest_desc *dest_desc =
+			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
+
+		/* Return data from completed destination descriptor */
+		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
+
+		if (per_CE_contextp)
+			*per_CE_contextp = CE_state->recv_context;
+
+		if (per_transfer_contextp) {
+			*per_transfer_contextp =
+				dest_ring->per_transfer_context[sw_index];
+		}
+		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		dest_ring->sw_index = sw_index;
+		status = QDF_STATUS_SUCCESS;
+	} else {
+		status = QDF_STATUS_E_FAILURE;
+	}
+	qdf_spin_unlock(&CE_state->ce_index_lock);
+
+	return status;
+}
+
+/*
+ * Guts of ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int
+ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
+				     void **per_CE_contextp,
+				     void **per_transfer_contextp,
+				     qdf_dma_addr_t *bufferp,
+				     unsigned int *nbytesp,
+				     unsigned int *transfer_idp,
+				     unsigned int *sw_idx,
+				     unsigned int *hw_idx,
+				     uint32_t *toeplitz_hash_result)
+{
+	int status = QDF_STATUS_E_FAILURE;
+	struct CE_ring_state *src_ring = CE_state->src_ring;
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	unsigned int nentries_mask = src_ring->nentries_mask;
+	unsigned int sw_index = src_ring->sw_index;
+	unsigned int read_index;
+	struct hif_softc *scn = CE_state->scn;
+
+	if (src_ring->hw_index == sw_index) {
+		/*
+		 * The SW completion index has caught up with the cached
+		 * version of the HW completion index.
+		 * Update the cached HW completion index to see whether
+		 * the SW has really caught up to the HW, or if the cached
+		 * value of the HW index has become stale.
+		 */
+		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
+			return QDF_STATUS_E_FAILURE;
+		src_ring->hw_index =
+			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
+		if (Q_TARGET_ACCESS_END(scn) < 0)
+			return QDF_STATUS_E_FAILURE;
+	}
+	read_index = src_ring->hw_index;
+
+	if (sw_idx)
+		*sw_idx = sw_index;
+
+	if (hw_idx)
+		*hw_idx = read_index;
+
+	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
+		struct CE_src_desc *shadow_base =
+			(struct CE_src_desc *)src_ring->shadow_base;
+		struct CE_src_desc *shadow_src_desc =
+			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
+#ifdef QCA_WIFI_3_0
+		struct CE_src_desc *src_ring_base =
+			(struct CE_src_desc *)src_ring->base_addr_owner_space;
+		struct CE_src_desc *src_desc =
+			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
+#endif
+		hif_record_ce_desc_event(scn, CE_state->id,
+				HIF_TX_DESC_COMPLETION,
+				(union ce_desc *)shadow_src_desc,
+				src_ring->per_transfer_context[sw_index],
+				sw_index, shadow_src_desc->nbytes);
+
+		/* Return data from completed source descriptor */
+		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
+		*nbytesp = shadow_src_desc->nbytes;
+		*transfer_idp = shadow_src_desc->meta_data;
+#ifdef QCA_WIFI_3_0
+		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
+#else
+		*toeplitz_hash_result = 0;
+#endif
+		if (per_CE_contextp)
+			*per_CE_contextp = CE_state->send_context;
+
+		if (per_transfer_contextp) {
+			*per_transfer_contextp =
+				src_ring->per_transfer_context[sw_index];
+		}
+		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		src_ring->sw_index = sw_index;
+		status = QDF_STATUS_SUCCESS;
+	}
+
+	return status;
+}
+
+static QDF_STATUS
+ce_cancel_send_next_legacy(struct CE_handle *copyeng,
+			   void **per_CE_contextp,
+			   void **per_transfer_contextp,
+			   qdf_dma_addr_t *bufferp,
+			   unsigned int *nbytesp,
+			   unsigned int *transfer_idp,
+			   uint32_t *toeplitz_hash_result)
+{
+	struct CE_state *CE_state;
+	struct CE_ring_state *src_ring;
+	unsigned int nentries_mask;
+	unsigned int sw_index;
+	unsigned int write_index;
+	QDF_STATUS status;
+	struct hif_softc *scn;
+
+	CE_state = (struct CE_state *)copyeng;
+	src_ring = CE_state->src_ring;
+	if (!src_ring)
+		return QDF_STATUS_E_FAILURE;
+
+	scn = CE_state->scn;
+	qdf_spin_lock(&CE_state->ce_index_lock);
+	nentries_mask = src_ring->nentries_mask;
+	sw_index = src_ring->sw_index;
+	write_index = src_ring->write_index;
+
+	if (write_index != sw_index) {
+		struct CE_src_desc *src_ring_base =
+			(struct CE_src_desc *)src_ring->base_addr_owner_space;
+		struct CE_src_desc *src_desc =
+			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
+
+		/* Return data from completed source descriptor */
+		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
+		*nbytesp = src_desc->nbytes;
+		*transfer_idp = src_desc->meta_data;
+#ifdef QCA_WIFI_3_0
+		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
+#else
+		*toeplitz_hash_result = 0;
+#endif
+
+		if (per_CE_contextp)
+			*per_CE_contextp = CE_state->send_context;
+
+		if (per_transfer_contextp) {
+			*per_transfer_contextp =
+				src_ring->per_transfer_context[sw_index];
+		}
+		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
+
+		/* Update sw_index */
+		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+		src_ring->sw_index = sw_index;
+		status = QDF_STATUS_SUCCESS;
+	} else {
+		status = QDF_STATUS_E_FAILURE;
+	}
+	qdf_spin_unlock(&CE_state->ce_index_lock);
+
+	return status;
+}
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with target_lock held.
+ */
+static void
+ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
+				    int disable_copy_compl_intr)
+{
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	struct hif_softc *scn = CE_state->scn;
+
+	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
+
+	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
+		return;
+
+	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
+		hif_err_rl("%s: target access is not allowed", __func__);
+		return;
+	}
+
+	if ((!disable_copy_compl_intr) &&
+	    (CE_state->send_cb || CE_state->recv_cb))
+		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
+	else
+		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
+
+	if (CE_state->watermark_cb)
+		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
+	else
+		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
+	Q_TARGET_ACCESS_END(scn);
+}
+
+static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
+				     struct CE_ring_state *src_ring,
+				     struct CE_attr *attr)
+{
+	uint32_t ctrl_addr;
+	uint64_t dma_addr;
+
+	QDF_ASSERT(ce_id < scn->ce_count);
+	ctrl_addr = CE_BASE_ADDRESS(ce_id);
+
+	src_ring->hw_index =
+		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
+	src_ring->sw_index = src_ring->hw_index;
+	src_ring->write_index =
+		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
+	dma_addr = src_ring->base_addr_CE_space;
+	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
+				  (uint32_t)(dma_addr & 0xFFFFFFFF));
+
+	/* if SR_BA_ADDRESS_HIGH register exists */
+	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
+		uint32_t tmp;
+
+		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
+				scn, ctrl_addr);
+		tmp &= ~0x1F;
+		dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
+		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
+					ctrl_addr, (uint32_t)dma_addr);
+	}
+	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
+	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
+#ifdef BIG_ENDIAN_HOST
+	/* Enable source ring byte swap for big endian host */
+	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
+#endif
+	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
+	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
+}
+
+static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
+				struct CE_ring_state *dest_ring,
+				struct CE_attr *attr)
+{
+	uint32_t ctrl_addr;
+	uint64_t dma_addr;
+
+	QDF_ASSERT(ce_id < scn->ce_count);
+	ctrl_addr = CE_BASE_ADDRESS(ce_id);
+	dest_ring->sw_index =
+		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
+	dest_ring->write_index =
+		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
+	dma_addr = dest_ring->base_addr_CE_space;
+	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
+				   (uint32_t)(dma_addr & 0xFFFFFFFF));
+
+	/* if DR_BA_ADDRESS_HIGH exists */
+	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
+		uint32_t tmp;
+
+		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
+						      ctrl_addr);
+		tmp &= ~0x1F;
+		dma_addr = ((dma_addr >> 32) & 0x1F) | tmp;
+		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
+				ctrl_addr, (uint32_t)dma_addr);
+	}
+
+	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
+#ifdef BIG_ENDIAN_HOST
+	/* Enable Dest ring byte swap for big endian host */
+	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
+#endif
+	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
+	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
+}
+
+static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
+{
+	switch (ring_type) {
+	case CE_RING_SRC:
+		return sizeof(struct CE_src_desc);
+	case CE_RING_DEST:
+		return sizeof(struct CE_dest_desc);
+	case CE_RING_STATUS:
+		qdf_assert(0);
+		return 0;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
+				uint32_t ce_id, struct CE_ring_state *ring,
+				struct CE_attr *attr)
+{
+	int status = Q_TARGET_ACCESS_BEGIN(scn);
+
+	if (status < 0)
+		goto out;
+
+	switch (ring_type) {
+	case CE_RING_SRC:
+		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
+		break;
+	case CE_RING_DEST:
+		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
+		break;
+	case CE_RING_STATUS:
+	default:
+		qdf_assert(0);
+		break;
+	}
+
+	Q_TARGET_ACCESS_END(scn);
+out:
+	return status;
+}
+
+static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
+			    struct pld_shadow_reg_v2_cfg **shadow_config,
+			    int *num_shadow_registers_configured)
+{
+	*num_shadow_registers_configured = 0;
+	*shadow_config = NULL;
+}
+
+static bool ce_check_int_watermark(struct CE_state *CE_state,
+				   unsigned int *flags)
+{
+	uint32_t ce_int_status;
+	uint32_t ctrl_addr = CE_state->ctrl_addr;
+	struct hif_softc *scn = CE_state->scn;
+
+	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
+	if (ce_int_status & CE_WATERMARK_MASK) {
+		/* Convert HW IS bits to software flags */
+		*flags =
+			(ce_int_status & CE_WATERMARK_MASK) >>
+			CE_WM_SHFT;
+		return true;
+	}
+
+	return false;
+}
+
+struct ce_ops ce_service_legacy = {
+	.ce_get_desc_size = ce_get_desc_size_legacy,
+	.ce_ring_setup = ce_ring_setup_legacy,
+	.ce_sendlist_send = ce_sendlist_send_legacy,
+	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
+	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
+	.ce_cancel_send_next = ce_cancel_send_next_legacy,
+	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
+	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
+	.ce_send_nolock = ce_send_nolock_legacy,
+	.watermark_int = ce_check_int_watermark,
+	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
+	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
+	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
+	.ce_prepare_shadow_register_v2_cfg =
+		ce_prepare_shadow_register_v2_cfg_legacy,
+};
+
+struct ce_ops *ce_services_legacy()
+{
+	return &ce_service_legacy;
+}
+
+qdf_export_symbol(ce_services_legacy);
+
+void ce_service_legacy_init(void)
+{
+	ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy);
+}

+ 5 - 13
hif/src/ce/ce_service_srng.c

@@ -581,19 +581,6 @@ ce_cancel_send_next_srng(struct CE_handle *copyeng,
 	return status;
 }
 
-/* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
-#define CE_WM_SHFT 1
-
-/*
- * Number of times to check for any pending tx/rx completion on
- * a copy engine, this count should be big enough. Once we hit
- * this threashold we'll not check for any Tx/Rx comlpetion in same
- * interrupt handling. Note that this threashold is only used for
- * Rx interrupt processing, this can be used tor Tx as well if we
- * suspect any infinite loop in checking for pending Tx completion.
- */
-#define CE_TXRX_COMP_CHECK_THRESHOLD 20
-
 /*
  * Adjust interrupts for the copy complete handler.
  * If it's needed for either send or recv, then unmask
@@ -866,3 +853,8 @@ struct ce_ops *ce_services_srng()
 	return &ce_service_srng;
 }
 qdf_export_symbol(ce_services_srng);
+
+void ce_service_srng_init(void)
+{
+	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
+}