Kaynağa Gözat

qcacld-3.0: TSO MAP-UNMAP individual segments one by one

Add support to unmap tso segment when host receives
ol_tx_comletion_handler for a particular tso segment.
Host unmap all the fragments of corresponding tso segment
except 0th fragment.
0th fragment of each tso segment now points to EIT header
which is common across all the segments. So 0th fragment cannot
be unmapped until host receives tx completion for last tso segment.
Also, now with introduction of tso, skb map-unmap is not required
anymore for tcp packets as host anyway does map-unmap of each
tso segment before sending it.

CRs-Fixed: 1106688
Change-Id: I572c7dcd2d29cb19b398e13e0fe7ce6f88ee1641
Poddar, Siddarth 8 yıl önce
ebeveyn
işleme
3f1fb13bc6

+ 105 - 0
core/dp/txrx/ol_tx.c

@@ -98,6 +98,9 @@ static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
 	msdu_info->tso_info.curr_seg = NULL;
 	if (qdf_nbuf_is_tso(msdu)) {
 		int num_seg = qdf_nbuf_get_tso_num_seg(msdu);
+		struct qdf_tso_num_seg_elem_t *tso_num_seg;
+
+		msdu_info->tso_info.tso_num_seg_list = NULL;
 		msdu_info->tso_info.tso_seg_list = NULL;
 		msdu_info->tso_info.num_segs = num_seg;
 		while (num_seg) {
@@ -123,6 +126,25 @@ static inline uint8_t ol_tx_prepare_tso(ol_txrx_vdev_handle vdev,
 				return 1;
 			}
 		}
+		tso_num_seg = ol_tso_num_seg_alloc(vdev->pdev);
+		if (tso_num_seg) {
+			tso_num_seg->next = msdu_info->tso_info.
+						tso_num_seg_list;
+			msdu_info->tso_info.tso_num_seg_list = tso_num_seg;
+		} else {
+			/* Free the already allocated num of segments */
+			struct qdf_tso_seg_elem_t *next_seg;
+			struct qdf_tso_seg_elem_t *free_seg =
+					msdu_info->tso_info.tso_seg_list;
+			qdf_print("TSO num of seg alloc for one jumbo skb failed!\n");
+			while (free_seg) {
+				next_seg = free_seg->next;
+				ol_tso_free_segment(vdev->pdev,
+					 free_seg);
+				free_seg = next_seg;
+			}
+			return 1;
+		}
 		qdf_nbuf_get_tso_info(vdev->pdev->osdev,
 			msdu, &(msdu_info->tso_info));
 		msdu_info->tso_info.curr_seg =
@@ -387,6 +409,7 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
 	tx_desc->netbuf = msdu;
 	if (msdu_info->tso_info.is_tso) {
 		tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
+		tx_desc->tso_num_desc = msdu_info->tso_info.tso_num_seg_list;
 		tx_desc->pkt_type = OL_TX_FRM_TSO;
 		TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, msdu);
 	} else {
@@ -1794,6 +1817,13 @@ qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
 }
 
 #if defined(FEATURE_TSO)
+/**
+ * ol_tso_seg_list_init() - function to initialise the tso seg freelist
+ * @pdev: the data physical device sending the data
+ * @num_seg: number of segments needs to be intialised
+ *
+ * Return: none
+ */
 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
 {
 	int i;
@@ -1825,6 +1855,12 @@ void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
 	qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
 }
 
+/**
+ * ol_tso_seg_list_deinit() - function to de-initialise the tso seg freelist
+ * @pdev: the data physical device sending the data
+ *
+ * Return: none
+ */
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 {
 	int i;
@@ -1859,4 +1895,73 @@ void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 		c_element = temp;
 	}
 }
+
+/**
+ * ol_tso_num_seg_list_init() - function to initialise the freelist of elements
+ *				use to count the num of tso segments in jumbo
+ *				skb packet freelist
+ * @pdev: the data physical device sending the data
+ * @num_seg: number of elements needs to be intialised
+ *
+ * Return: none
+ */
+void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
+{
+	int i;
+	struct qdf_tso_num_seg_elem_t *c_element;
+
+	c_element = qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
+	pdev->tso_num_seg_pool.freelist = c_element;
+	for (i = 0; i < (num_seg - 1); i++) {
+		if (qdf_unlikely(!c_element)) {
+			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+				"%s: ERROR: c_element NULL for num of seg %d",
+				__func__, i);
+			QDF_BUG(0);
+			pdev->tso_num_seg_pool.num_seg_pool_size = i;
+			qdf_spinlock_create(&pdev->tso_num_seg_pool.
+							tso_num_seg_mutex);
+			return;
+		}
+
+		c_element->next =
+			qdf_mem_malloc(sizeof(struct qdf_tso_num_seg_elem_t));
+		c_element = c_element->next;
+		c_element->next = NULL;
+	}
+	pdev->tso_num_seg_pool.num_seg_pool_size = num_seg;
+	qdf_spinlock_create(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+}
+
+/**
+ * ol_tso_num_seg_list_deinit() - function to de-initialise the freelist of
+ *				  elements use to count the num of tso segment
+ *				  in a jumbo skb packet freelist
+ * @pdev: the data physical device sending the data
+ *
+ * Return: none
+ */
+void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+	int i;
+	struct qdf_tso_num_seg_elem_t *c_element;
+	struct qdf_tso_num_seg_elem_t *temp;
+
+	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+	c_element = pdev->tso_num_seg_pool.freelist;
+	i = pdev->tso_num_seg_pool.num_seg_pool_size;
+
+	pdev->tso_num_seg_pool.freelist = NULL;
+	pdev->tso_num_seg_pool.num_free = 0;
+	pdev->tso_num_seg_pool.num_seg_pool_size = 0;
+
+	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+	qdf_spinlock_destroy(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+
+	while (i-- > 0 && c_element) {
+		temp = c_element->next;
+		qdf_mem_free(c_element);
+		c_element = temp;
+	}
+}
 #endif /* FEATURE_TSO */

+ 9 - 0
core/dp/txrx/ol_tx.h

@@ -175,6 +175,8 @@ ol_tx_reinject(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu, uint16_t peer_id);
 #if defined(FEATURE_TSO)
 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
+void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
+void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
 #else
 static inline void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev,
 	uint32_t num_seg)
@@ -185,6 +187,13 @@ static inline void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 {
 	return;
 }
+static inline void ol_tso_num_seg_list_init(struct ol_txrx_pdev_t *pdev,
+	uint32_t num_seg)
+{
+}
+static inline void ol_tso_num_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+}
 #endif
 
 #if defined(HELIUMPLUS_PADDR64)

+ 101 - 29
core/dp/txrx/ol_tx_desc.c

@@ -302,6 +302,48 @@ ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
 }
 #endif
 
+#ifdef FEATURE_TSO
+static void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
+				struct ol_tx_desc_t *tx_desc)
+{
+	if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s %d TSO desc is NULL!",
+			  __func__, __LINE__);
+		qdf_assert(0);
+	} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "%s %d TSO common info is NULL!",
+			  __func__, __LINE__);
+		qdf_assert(0);
+	} else {
+		struct qdf_tso_num_seg_elem_t *tso_num_desc =
+			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
+		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
+			tso_num_desc->num_seg.tso_cmn_num_seg--;
+			qdf_nbuf_unmap_tso_segment(pdev->osdev,
+						   tx_desc->tso_desc, false);
+		} else {
+			tso_num_desc->num_seg.tso_cmn_num_seg--;
+			qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
+			qdf_nbuf_unmap_tso_segment(pdev->osdev,
+						   tx_desc->tso_desc, true);
+			ol_tso_num_seg_free(pdev, tx_desc->tso_num_desc);
+			tx_desc->tso_num_desc = NULL;
+		}
+		ol_tso_free_segment(pdev, tx_desc->tso_desc);
+		tx_desc->tso_desc = NULL;
+	}
+}
+
+#else
+static inline void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
+				       struct ol_tx_desc_t *tx_desc)
+{
+}
+
+#endif
+
 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
 /**
  * ol_tx_desc_free() - put descriptor to freelist
@@ -314,16 +356,9 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 {
 	qdf_spin_lock_bh(&pdev->tx_mutex);
 
-	if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
-		if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
-			qdf_print("%s %d TSO desc is NULL!\n",
-				 __func__, __LINE__);
-			qdf_assert(0);
-		} else {
-			ol_tso_free_segment(pdev, tx_desc->tso_desc);
-			tx_desc->tso_desc = NULL;
-		}
-	}
+	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
+		ol_tx_tso_desc_free(pdev, tx_desc);
+
 	ol_tx_desc_dup_detect_reset(pdev, tx_desc);
 	ol_tx_desc_reset_pkt_type(tx_desc);
 	ol_tx_desc_reset_timestamp(tx_desc);
@@ -346,17 +381,9 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 {
 	struct ol_tx_flow_pool_t *pool = tx_desc->pool;
 
-#if defined(FEATURE_TSO)
-	if (tx_desc->pkt_type == OL_TX_FRM_TSO) {
-		if (qdf_unlikely(tx_desc->tso_desc == NULL))
-			qdf_print("%s %d TSO desc is NULL!\n",
-				 __func__, __LINE__);
-		else {
-			ol_tso_free_segment(pdev, tx_desc->tso_desc);
-			tx_desc->tso_desc = NULL;
-		}
-	}
-#endif
+	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
+		ol_tx_tso_desc_free(pdev, tx_desc);
+
 	ol_tx_desc_reset_pkt_type(tx_desc);
 	ol_tx_desc_reset_timestamp(tx_desc);
 
@@ -492,6 +519,7 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 
 	if (msdu_info->tso_info.is_tso) {
 		tx_desc->tso_desc = msdu_info->tso_info.curr_seg;
+		tx_desc->tso_num_desc = msdu_info->tso_info.tso_num_seg_list;
 		tx_desc->pkt_type = OL_TX_FRM_TSO;
 		TXRX_STATS_MSDU_INCR(pdev, tx.tso.tso_pkts, netbuf);
 	} else {
@@ -713,15 +741,14 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 
 #if defined(FEATURE_TSO)
 /**
- * htt_tso_alloc_segment() - function to allocate a TSO segment
+ * ol_tso_alloc_segment() - function to allocate a TSO segment
  * element
- * @pdev:   HTT pdev
- * @tso_seg:    This is the output. The TSO segment element.
+ * @pdev: the data physical device sending the data
  *
  * Allocates a TSO segment element from the free list held in
- * the HTT pdev
+ * the pdev
  *
- * Return: none
+ * Return: tso_seg
  */
 struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
 {
@@ -754,15 +781,14 @@ struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
 /**
  * ol_tso_free_segment() - function to free a TSO segment
  * element
- * @pdev:   HTT pdev
+ * @pdev: the data physical device sending the data
  * @tso_seg: The TSO segment element to be freed
  *
  * Returns a TSO segment element to the free list held in the
- * HTT pdev
+ * pdev
  *
  * Return: none
  */
-
 void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
 	 struct qdf_tso_seg_elem_t *tso_seg)
 {
@@ -785,4 +811,50 @@ void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
 	pdev->tso_seg_pool.num_free++;
 	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
 }
+
+/**
+ * ol_tso_num_seg_alloc() - function to allocate a element to count TSO segments
+ *			    in a jumbo skb packet.
+ * @pdev: the data physical device sending the data
+ *
+ * Allocates a element to count TSO segments from the free list held in
+ * the pdev
+ *
+ * Return: tso_num_seg
+ */
+struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc(struct ol_txrx_pdev_t *pdev)
+{
+	struct qdf_tso_num_seg_elem_t *tso_num_seg = NULL;
+
+	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+	if (pdev->tso_num_seg_pool.freelist) {
+		pdev->tso_num_seg_pool.num_free--;
+		tso_num_seg = pdev->tso_num_seg_pool.freelist;
+		pdev->tso_num_seg_pool.freelist =
+				pdev->tso_num_seg_pool.freelist->next;
+	}
+	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+
+	return tso_num_seg;
+}
+
+/**
+ * ol_tso_num_seg_free() - function to free a TSO segment
+ * element
+ * @pdev: the data physical device sending the data
+ * @tso_seg: The TSO segment element to be freed
+ *
+ * Returns a element to the free list held in the pdev
+ *
+ * Return: none
+ */
+void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev,
+	 struct qdf_tso_num_seg_elem_t *tso_num_seg)
+{
+	qdf_spin_lock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+	tso_num_seg->next = pdev->tso_num_seg_pool.freelist;
+	pdev->tso_num_seg_pool.freelist = tso_num_seg;
+	pdev->tso_num_seg_pool.num_free++;
+	qdf_spin_unlock_bh(&pdev->tso_num_seg_pool.tso_num_seg_mutex);
+}
 #endif

+ 9 - 1
core/dp/txrx/ol_tx_desc.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -228,9 +228,17 @@ struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev);
 
 void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
 	 struct qdf_tso_seg_elem_t *tso_seg);
+struct qdf_tso_num_seg_elem_t *ol_tso_num_seg_alloc(
+				struct ol_txrx_pdev_t *pdev);
+void ol_tso_num_seg_free(struct ol_txrx_pdev_t *pdev,
+	 struct qdf_tso_num_seg_elem_t *tso_num_seg);
+
 #else
 #define ol_tso_alloc_segment(pdev) /*no-op*/
 #define ol_tso_free_segment(pdev, tso_seg) /*no-op*/
+#define ol_tso_num_seg_alloc(pdev) /*no-op*/
+#define ol_tso_num_seg_free(pdev, tso_num_seg) /*no-op*/
+
 #endif
 
 /**

+ 3 - 0
core/dp/txrx/ol_txrx.c

@@ -1549,6 +1549,8 @@ ol_txrx_pdev_post_attach(void *ppdev)
 
 	ol_tso_seg_list_init(pdev, desc_pool_size);
 
+	ol_tso_num_seg_list_init(pdev, desc_pool_size);
+
 	ol_tx_register_flow_control(pdev);
 
 	return 0;            /* success */
@@ -1647,6 +1649,7 @@ static void ol_txrx_pdev_detach(void *ppdev, int force)
 #endif
 #endif
 	ol_tso_seg_list_deinit(pdev);
+	ol_tso_num_seg_list_deinit(pdev);
 
 	if (force) {
 		/*

+ 9 - 1
core/dp/txrx/ol_txrx_types.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -199,6 +199,7 @@ struct ol_tx_desc_t {
 	struct ol_tx_flow_pool_t *pool;
 #endif
 	void *tso_desc;
+	void *tso_num_desc;
 };
 
 typedef TAILQ_HEAD(some_struct_name, ol_tx_desc_t) ol_tx_desc_list;
@@ -895,6 +896,13 @@ struct ol_txrx_pdev_t {
 		/* tso mutex */
 		OL_TX_MUTEX_TYPE tso_mutex;
 	} tso_seg_pool;
+	struct {
+		uint16_t num_seg_pool_size;
+		uint16_t num_free;
+		struct qdf_tso_num_seg_elem_t *freelist;
+		/* tso mutex */
+		OL_TX_MUTEX_TYPE tso_num_seg_mutex;
+	} tso_num_seg_pool;
 #endif
 
 #if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)