Browse Source

qcacmn: Correct the return types of CE functions

A few functions in CE files returns QDF status value with return
type as non QDF STATUS. For such functions, update the return type as
QDF_STATUS.

Change-Id: I401c0a1e1c21ac0865cf4da018eb2d2fb6d86900
CRs-Fixed: 2734818
Shashikala Prabhu 5 năm trước cách đây
mục cha
commit
a143607a26

+ 61 - 34
hif/src/ce/ce_api.h

@@ -230,19 +230,21 @@ QDF_STATUS ce_sendlist_send(struct CE_handle *copyeng,
 
 /*==================Recv=====================================================*/
 
-/*
- * Make a buffer available to receive. The buffer must be at least of a
- * minimal size appropriate for this copy engine (src_sz_max attribute).
- *   copyeng                    - which copy engine to use
- *   per_transfer_recv_context  - context passed back to caller's recv_cb
- *   buffer                     - address of buffer in CE space
- * Returns 0 on success; otherwise an error status.
+/**
+ * ce_recv_buf_enqueue() -  Make a buffer available to receive. The buffer must
+ * be at least of a minimal size appropriate for this copy engine (src_sz_max
+ * attribute).
+ * @copyeng: which copy engine to use
+ * @per_transfer_recv_context: context passed back to caller's recv_cb
+ * @buffer: address of buffer in CE space
  *
  * Implementation note: Pushes a buffer to Dest ring.
+ *
+ * Return: QDF_STATUS.
  */
-int ce_recv_buf_enqueue(struct CE_handle *copyeng,
-			void *per_transfer_recv_context,
-			qdf_dma_addr_t buffer);
+QDF_STATUS ce_recv_buf_enqueue(struct CE_handle *copyeng,
+			       void *per_transfer_recv_context,
+			       qdf_dma_addr_t buffer);
 
 /*
  * Register a Receive Callback function.
@@ -313,8 +315,16 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
 /* Data is byte-swapped */
 #define CE_RECV_FLAG_SWAPPED            1
 
-/*
- * Supply data for the next completed unprocessed receive descriptor.
+/**
+ * ce_completed_recv_next() - Supply data for the next completed unprocessed
+ * receive descriptor.
+ * @copyeng: which copy engine to use
+ * @per_CE_contextp: CE context
+ * @per_transfer_contextp: Transfer context
+ * @bufferp: buffer pointer
+ * @nbytesp: number of bytes
+ * @transfer_idp: Transfer idp
+ * @flagsp: flags
  *
  * For use
  *    with CE Watermark callback,
@@ -322,33 +332,47 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
  *    in a recv_cb function in order to mitigate recv_cb's.
  *
  * Implementation note: Pops buffer from Dest ring.
+ *
+ * Return: QDF_STATUS
  */
-int ce_completed_recv_next(struct CE_handle *copyeng,
-			   void **per_CE_contextp,
-			   void **per_transfer_contextp,
-			   qdf_dma_addr_t *bufferp,
-			   unsigned int *nbytesp,
-			   unsigned int *transfer_idp,
-			   unsigned int *flagsp);
+QDF_STATUS ce_completed_recv_next(struct CE_handle *copyeng,
+				  void **per_CE_contextp,
+				  void **per_transfer_contextp,
+				  qdf_dma_addr_t *bufferp,
+				  unsigned int *nbytesp,
+				  unsigned int *transfer_idp,
+				  unsigned int *flagsp);
 
-/*
- * Supply data for the next completed unprocessed send descriptor.
+/**
+ * ce_completed_send_next() - Supply data for the next completed unprocessed
+ * send descriptor.
+ * @copyeng: which copy engine to use
+ * @per_CE_contextp: CE context
+ * @per_transfer_contextp: Transfer context
+ * @bufferp: buffer pointer
+ * @nbytesp: number of bytes
+ * @transfer_idp: Transfer idp
+ * @sw_idx: SW index
+ * @hw_idx: HW index
+ * @toeplitz_hash_result: toeplitz hash result
  *
  * For use
  *    with CE Watermark callback
  *    in a send_cb function in order to mitigate send_cb's.
  *
  * Implementation note: Pops 1 completed send buffer from Source ring
+ *
+ * Return: QDF_STATUS
  */
-int ce_completed_send_next(struct CE_handle *copyeng,
-			   void **per_CE_contextp,
-			   void **per_transfer_contextp,
-			   qdf_dma_addr_t *bufferp,
-			   unsigned int *nbytesp,
-			   unsigned int *transfer_idp,
-			   unsigned int *sw_idx,
-			   unsigned int *hw_idx,
-			   uint32_t *toeplitz_hash_result);
+QDF_STATUS ce_completed_send_next(struct CE_handle *copyeng,
+				  void **per_CE_contextp,
+				  void **per_transfer_contextp,
+				  qdf_dma_addr_t *bufferp,
+				  unsigned int *nbytesp,
+				  unsigned int *transfer_idp,
+				  unsigned int *sw_idx,
+				  unsigned int *hw_idx,
+				  uint32_t *toeplitz_hash_result);
 
 /*==================CE Engine Initialization=================================*/
 
@@ -530,17 +554,20 @@ struct ce_ops {
 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
 			unsigned int *transfer_idp,
 			uint32_t *toeplitz_hash_result);
-	int (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
-			void *per_recv_context, qdf_dma_addr_t buffer);
+	QDF_STATUS (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
+					  void *per_recv_context,
+					  qdf_dma_addr_t buffer);
 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
-	int (*ce_completed_recv_next_nolock)(struct CE_state *CE_state,
+	QDF_STATUS (*ce_completed_recv_next_nolock)(
+			struct CE_state *CE_state,
 			void **per_CE_contextp,
 			void **per_transfer_contextp,
 			qdf_dma_addr_t *bufferp,
 			unsigned int *nbytesp,
 			unsigned int *transfer_idp,
 			unsigned int *flagsp);
-	int (*ce_completed_send_next_nolock)(struct CE_state *CE_state,
+	QDF_STATUS (*ce_completed_send_next_nolock)(
+			struct CE_state *CE_state,
 			void **per_CE_contextp,
 			void **per_transfer_contextp,
 			qdf_dma_addr_t *bufferp,

+ 6 - 6
hif/src/ce/ce_service.c

@@ -671,9 +671,9 @@ QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
  * @per_recv_context: virtual address of the nbuf
  * @buffer: physical address of the nbuf
  *
- * Return: 0 if the buffer is enqueued
+ * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
  */
-int
+QDF_STATUS
 ce_recv_buf_enqueue(struct CE_handle *copyeng,
 		    void *per_recv_context, qdf_dma_addr_t buffer)
 {
@@ -749,7 +749,7 @@ unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
  * Guts of ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-int
+QDF_STATUS
 ce_completed_recv_next(struct CE_handle *copyeng,
 		       void **per_CE_contextp,
 		       void **per_transfer_contextp,
@@ -758,7 +758,7 @@ ce_completed_recv_next(struct CE_handle *copyeng,
 		       unsigned int *transfer_idp, unsigned int *flagsp)
 {
 	struct CE_state *CE_state = (struct CE_state *)copyeng;
-	int status;
+	QDF_STATUS status;
 	struct hif_softc *scn = CE_state->scn;
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
 	struct ce_ops *ce_services;
@@ -804,7 +804,7 @@ ce_cancel_send_next(struct CE_handle *copyeng,
 }
 qdf_export_symbol(ce_cancel_send_next);
 
-int
+QDF_STATUS
 ce_completed_send_next(struct CE_handle *copyeng,
 		       void **per_CE_contextp,
 		       void **per_transfer_contextp,
@@ -819,7 +819,7 @@ ce_completed_send_next(struct CE_handle *copyeng,
 	struct hif_softc *scn = CE_state->scn;
 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
 	struct ce_ops *ce_services;
-	int status;
+	QDF_STATUS status;
 
 	ce_services = hif_state->ce_services;
 	qdf_spin_lock_bh(&CE_state->ce_index_lock);

+ 8 - 8
hif/src/ce/ce_service_legacy.c

@@ -706,13 +706,13 @@ ce_sendlist_send_legacy(struct CE_handle *copyeng,
  * @per_recv_context: virtual address of the nbuf
  * @buffer: physical address of the nbuf
  *
- * Return: 0 if the buffer is enqueued
+ * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
  */
-static int
+static QDF_STATUS
 ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
 			   void *per_recv_context, qdf_dma_addr_t buffer)
 {
-	int status;
+	QDF_STATUS status;
 	struct CE_state *CE_state = (struct CE_state *)copyeng;
 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
 	uint32_t ctrl_addr = CE_state->ctrl_addr;
@@ -728,7 +728,7 @@ ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
 
 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
-		return -EIO;
+		return QDF_STATUS_E_IO;
 	}
 
 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
@@ -802,7 +802,7 @@ ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
 }
 
-static int
+static QDF_STATUS
 ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
 				     void **per_CE_contextp,
 				     void **per_transfer_contextp,
@@ -811,7 +811,7 @@ ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
 				     unsigned int *transfer_idp,
 				     unsigned int *flagsp)
 {
-	int status;
+	QDF_STATUS status;
 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
 	unsigned int sw_index = dest_ring->sw_index;
@@ -930,7 +930,7 @@ ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
  * Guts of ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int
+static QDF_STATUS
 ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
 				     void **per_CE_contextp,
 				     void **per_transfer_contextp,
@@ -941,7 +941,7 @@ ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
 				     unsigned int *hw_idx,
 				     uint32_t *toeplitz_hash_result)
 {
-	int status = QDF_STATUS_E_FAILURE;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
 	struct CE_ring_state *src_ring = CE_state->src_ring;
 	uint32_t ctrl_addr = CE_state->ctrl_addr;
 	unsigned int nentries_mask = src_ring->nentries_mask;

+ 8 - 8
hif/src/ce/ce_service_srng.c

@@ -294,13 +294,13 @@ ce_sendlist_send_srng(struct CE_handle *copyeng,
  * @per_recv_context: virtual address of the nbuf
  * @buffer: physical address of the nbuf
  *
- * Return: 0 if the buffer is enqueued
+ * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
  */
-static int
+static QDF_STATUS
 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
 		    void *per_recv_context, qdf_dma_addr_t buffer)
 {
-	int status;
+	QDF_STATUS status;
 	struct CE_state *CE_state = (struct CE_state *)copyeng;
 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -316,7 +316,7 @@ ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
 
 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
-		return -EIO;
+		return QDF_STATUS_E_IO;
 	}
 
 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
@@ -402,7 +402,7 @@ ce_send_entries_done_nolock_srng(struct hif_softc *scn,
  * Guts of ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int
+static QDF_STATUS
 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
 			      void **per_CE_contextp,
 			      void **per_transfer_contextp,
@@ -411,7 +411,7 @@ ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
 			      unsigned int *transfer_idp,
 			      unsigned int *flagsp)
 {
-	int status;
+	QDF_STATUS status;
 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
 	struct CE_ring_state *status_ring = CE_state->status_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
@@ -547,7 +547,7 @@ ce_revoke_recv_next_srng(struct CE_handle *copyeng,
  * Guts of ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int
+static QDF_STATUS
 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
 			      void **per_CE_contextp,
 			      void **per_transfer_contextp,
@@ -558,7 +558,7 @@ ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
 			      unsigned int *hw_idx,
 			      uint32_t *toeplitz_hash_result)
 {
-	int status = QDF_STATUS_E_FAILURE;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
 	struct CE_ring_state *src_ring = CE_state->src_ring;
 	unsigned int nentries_mask = src_ring->nentries_mask;
 	unsigned int sw_index = src_ring->sw_index;