Ver Fonte

qcacmn: TSO MAP-UNMAP individual segments one by one

Change the static allocation of tso common info to dynamic
allocation to use this structure later on.
Introduce one more element in tso common info structure
use to store the dma address of EIT header which can be
repeatedly use to assign to the dma address of the 0th fragment
of all the tso segments corresponding to one jumbo skb.

CRs-Fixed: 1106688
Change-Id: I572c7dcd2d29cb19b398e13e0fe7ce6f88ee1641
Poddar, Siddarth há 8 anos atrás
pai
commit
d56844e3e5
4 ficheiros alterados com 148 adições e 16 exclusões
  1. 17 1
      qdf/inc/qdf_nbuf.h
  2. 23 0
      qdf/inc/qdf_types.h
  3. 5 1
      qdf/linux/src/i_qdf_nbuf.h
  4. 103 14
      qdf/linux/src/qdf_nbuf.c

+ 17 - 1
qdf/inc/qdf_nbuf.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -1834,6 +1834,22 @@ static inline uint32_t qdf_nbuf_get_tso_info(qdf_device_t osdev,
 	return __qdf_nbuf_get_tso_info(osdev, nbuf, tso_info);
 }
 
+/**
+ * qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
+ *
+ * @osdev: qdf device handle
+ * @tso_seg: TSO segment element to be unmapped
+ * @is_last_seg: whether this is last tso seg or not
+ *
+ * Return: none
+ */
+static inline void qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
+			  struct qdf_tso_seg_elem_t *tso_seg,
+			  bool is_last_seg)
+{
+	return __qdf_nbuf_unmap_tso_segment(osdev, tso_seg, is_last_seg);
+}
+
 /**
  * qdf_nbuf_get_tso_num_seg() - function to calculate the number
  * of TCP segments within the TSO jumbo packet

+ 23 - 0
qdf/inc/qdf_types.h

@@ -466,12 +466,34 @@ struct qdf_tso_seg_elem_t {
 	struct qdf_tso_seg_elem_t *next;
 };
 
+/**
+ * struct qdf_tso_num_seg_t - single element to count for num of seg
+ * @tso_cmn_num_seg: num of seg in a jumbo skb
+ *
+ * This structure holds the information of num of segments of a jumbo
+ * TSO network buffer.
+ */
+struct qdf_tso_num_seg_t {
+	uint32_t tso_cmn_num_seg;
+};
+
+/**
+ * qdf_tso_num_seg_elem_t - num of tso segment element for jumbo skb
+ * @num_seg: instance of num of seg
+ * @next: pointer to the next segment
+ */
+struct qdf_tso_num_seg_elem_t {
+	struct qdf_tso_num_seg_t num_seg;
+	struct qdf_tso_num_seg_elem_t *next;
+};
+
 /**
  * struct qdf_tso_info_t - TSO information extracted
  * @is_tso: is this is a TSO frame
  * @num_segs: number of segments
  * @tso_seg_list: list of TSO segments for this jumbo packet
  * @curr_seg: segment that is currently being processed
+ * @tso_num_seg_list: num of tso seg for this jumbo packet
  *
  * This structure holds the TSO information extracted after parsing the TSO
  * jumbo network buffer. It contains a chain of the TSO segments belonging to
@@ -482,6 +504,7 @@ struct qdf_tso_info_t {
 	uint32_t num_segs;
 	struct qdf_tso_seg_elem_t *tso_seg_list;
 	struct qdf_tso_seg_elem_t *curr_seg;
+	struct qdf_tso_num_seg_elem_t *tso_num_seg_list;
 };
 
 /**

+ 5 - 1
qdf/linux/src/i_qdf_nbuf.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -1057,6 +1057,10 @@ void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
 uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
 	struct qdf_tso_info_t *tso_info);
 
+void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
+			  struct qdf_tso_seg_elem_t *tso_seg,
+			  bool is_last_seg);
+
 uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
 
 static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)

+ 103 - 14
qdf/linux/src/qdf_nbuf.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -1560,12 +1560,32 @@ EXPORT_SYMBOL(qdf_net_buf_debug_release_skb);
 #endif /*MEMORY_DEBUG */
 #if defined(FEATURE_TSO)
 
+/**
+ * struct qdf_tso_cmn_seg_info_t - TSO common info structure
+ *
+ * @ethproto: ethernet type of the msdu
+ * @ip_tcp_hdr_len: ip + tcp length for the msdu
+ * @l2_len: L2 length for the msdu
+ * @eit_hdr: pointer to EIT header
+ * @eit_hdr_len: EIT header length for the msdu
+ * @eit_hdr_dma_map_addr: dma addr for EIT header
+ * @tcphdr: pointer to tcp header
+ * @ipv4_csum_en: ipv4 checksum enable
+ * @tcp_ipv4_csum_en: TCP ipv4 checksum enable
+ * @tcp_ipv6_csum_en: TCP ipv6 checksum enable
+ * @ip_id: IP id
+ * @tcp_seq_num: TCP sequence number
+ *
+ * This structure holds the TSO common info that is common
+ * across all the TCP segments of the jumbo packet.
+ */
 struct qdf_tso_cmn_seg_info_t {
 	uint16_t ethproto;
 	uint16_t ip_tcp_hdr_len;
 	uint16_t l2_len;
-	unsigned char *eit_hdr;
-	unsigned int eit_hdr_len;
+	uint8_t *eit_hdr;
+	uint32_t eit_hdr_len;
+	qdf_dma_addr_t eit_hdr_dma_map_addr;
 	struct tcphdr *tcphdr;
 	uint16_t ipv4_csum_en;
 	uint16_t tcp_ipv4_csum_en;
@@ -1577,14 +1597,18 @@ struct qdf_tso_cmn_seg_info_t {
 /**
  * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
  * information
+ * @osdev: qdf device handle
+ * @skb: skb buffer
+ * @tso_info: Parameters common to all segements
  *
  * Get the TSO information that is common across all the TCP
  * segments of the jumbo packet
  *
  * Return: 0 - success 1 - failure
  */
-static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
-	struct qdf_tso_cmn_seg_info_t *tso_info)
+static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(qdf_device_t osdev,
+			struct sk_buff *skb,
+			struct qdf_tso_cmn_seg_info_t *tso_info)
 {
 	/* Get ethernet type and ethernet header length */
 	tso_info->ethproto = vlan_get_protocol(skb);
@@ -1617,6 +1641,16 @@ static uint8_t __qdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
 	tso_info->eit_hdr = skb->data;
 	tso_info->eit_hdr_len = (skb_transport_header(skb)
 		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
+	tso_info->eit_hdr_dma_map_addr = dma_map_single(osdev->dev,
+							tso_info->eit_hdr,
+							tso_info->eit_hdr_len,
+							DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(osdev->dev,
+				       tso_info->eit_hdr_dma_map_addr))) {
+		qdf_print("DMA mapping error!\n");
+		qdf_assert(0);
+		return 1;
+	}
 	tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
 	TSO_DEBUG("%s seq# %u eit hdr len %u l2 len %u  skb len %u\n", __func__,
 		tso_info->tcp_seq_num,
@@ -1649,18 +1683,15 @@ void __qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
 /**
  * __qdf_nbuf_fill_tso_cmn_seg_info() - Init function for each TSO nbuf segment
  *
- * @osdev: qdf device handle
  * @curr_seg: Segment whose contents are initialized
  * @tso_cmn_info: Parameters common to all segements
  *
  * Return: None
  */
-static inline void __qdf_nbuf_fill_tso_cmn_seg_info(qdf_device_t osdev,
+static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
 				struct qdf_tso_seg_elem_t *curr_seg,
 				struct qdf_tso_cmn_seg_info_t *tso_cmn_info)
 {
-	qdf_dma_addr_t mapped;
-
 	/* Initialize the flags to 0 */
 	memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
 
@@ -1698,9 +1729,7 @@ static inline void __qdf_nbuf_fill_tso_cmn_seg_info(qdf_device_t osdev,
 	curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info->eit_hdr;
 	curr_seg->seg.tso_frags[0].length = tso_cmn_info->eit_hdr_len;
 	curr_seg->seg.total_len = curr_seg->seg.tso_frags[0].length;
-	mapped = dma_map_single(osdev->dev, tso_cmn_info->eit_hdr,
-				tso_cmn_info->eit_hdr_len, DMA_TO_DEVICE);
-	curr_seg->seg.tso_frags[0].paddr = mapped;
+	curr_seg->seg.tso_frags[0].paddr = tso_cmn_info->eit_hdr_dma_map_addr;
 
 	TSO_DEBUG("%s %d eit hdr %p eit_hdr_len %d tcp_seq_num %u tso_info->total_len %u\n",
 		   __func__, __LINE__, tso_cmn_info->eit_hdr,
@@ -1734,6 +1763,7 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
 	qdf_dma_addr_t tso_frag_paddr = 0;
 	uint32_t num_seg = 0;
 	struct qdf_tso_seg_elem_t *curr_seg;
+	struct qdf_tso_num_seg_elem_t *total_num_seg;
 	struct skb_frag_struct *frag = NULL;
 	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
 	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
@@ -1743,10 +1773,12 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
 
 	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
 
-	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
+	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(osdev,
+						skb, &tso_cmn_info))) {
 		qdf_print("TSO: error getting common segment info\n");
 		return 0;
 	}
+	total_num_seg = tso_info->tso_num_seg_list;
 	curr_seg = tso_info->tso_seg_list;
 
 	/* length of the first chunk of data in the skb */
@@ -1768,14 +1800,16 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
 	num_seg = tso_info->num_segs;
 	tso_info->num_segs = 0;
 	tso_info->is_tso = 1;
+	total_num_seg->num_seg.tso_cmn_num_seg = 0;
 
 	while (num_seg && curr_seg) {
 		int i = 1; /* tso fragment index */
 		uint8_t more_tso_frags = 1;
 
 		tso_info->num_segs++;
+		total_num_seg->num_seg.tso_cmn_num_seg++;
 
-		__qdf_nbuf_fill_tso_cmn_seg_info(osdev, curr_seg,
+		__qdf_nbuf_fill_tso_cmn_seg_info(curr_seg,
 						 &tso_cmn_info);
 
 		if (unlikely(skb_proc == 0))
@@ -1870,6 +1904,61 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(__qdf_nbuf_get_tso_info);
 
+/**
+ * __qdf_nbuf_unmap_tso_segment() - function to dma unmap TSO segment element
+ *
+ * @osdev: qdf device handle
+ * @tso_seg: TSO segment element to be unmapped
+ * @is_last_seg: whether this is last tso seg or not
+ *
+ * Return: none
+ */
+void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
+			  struct qdf_tso_seg_elem_t *tso_seg,
+			  bool is_last_seg)
+{
+	uint32_t num_frags = tso_seg->seg.num_frags - 1;
+
+	/*Num of frags in a tso seg cannot be less than 2 */
+	if (num_frags < 1) {
+		qdf_assert(0);
+		qdf_print("ERROR: num of frags in a tso segment is %d\n",
+				  (num_frags + 1));
+		return;
+	}
+
+	while (num_frags) {
+		/*Do dma unmap the tso seg except the 0th frag */
+		if (0 ==  tso_seg->seg.tso_frags[num_frags].paddr) {
+			qdf_print("ERROR: TSO seg frag %d mapped physical address is NULL\n",
+				  num_frags);
+			qdf_assert(0);
+			return;
+		}
+		dma_unmap_single(osdev->dev,
+				 tso_seg->seg.tso_frags[num_frags].paddr,
+				 tso_seg->seg.tso_frags[num_frags].length,
+				 QDF_DMA_TO_DEVICE);
+		tso_seg->seg.tso_frags[num_frags].paddr = 0;
+		num_frags--;
+	}
+
+	if (is_last_seg) {
+		/*Do dma unmap for the tso seg 0th frag */
+		if (0 ==  tso_seg->seg.tso_frags[0].paddr) {
+			qdf_print("ERROR: TSO seg frag 0 mapped physical address is NULL\n");
+			qdf_assert(0);
+			return;
+		}
+		dma_unmap_single(osdev->dev,
+				 tso_seg->seg.tso_frags[0].paddr,
+				 tso_seg->seg.tso_frags[0].length,
+				 QDF_DMA_TO_DEVICE);
+		tso_seg->seg.tso_frags[0].paddr = 0;
+	}
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_tso_segment);
+
 /**
  * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
  * into segments