Browse Source

qcacld-3.0: change skb->cb to support 64 bit paddrs(1/2)

Praprogation from qcacld-3.0 to qcacld-3.1.

Change skb->sb such that it is overlayed between tx
and rx and each one of then still fits 48 bytes.
Note that this will break IPA and it will be fixed
subsequently by another change.

Change-Id: I96168aee99dbdbecbdbd4259597e179b02d29f5d
CRs-Fixed: 881090
Acked-by: Orhan K AKYILDIZ <[email protected]>
Houston Hoffman 9 năm trước cách đây
mục cha
commit
43d47fa659

+ 83 - 10
core/cdf/inc/cdf_nbuf.h

@@ -163,6 +163,7 @@ static inline int cdf_nbuf_get_num_frags(cdf_nbuf_t buf)
  */
 static inline int cdf_nbuf_get_frag_len(cdf_nbuf_t buf, int frag_num)
 {
+	BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
 	return __cdf_nbuf_get_frag_len(buf, frag_num);
 }
 
@@ -176,19 +177,21 @@ static inline int cdf_nbuf_get_frag_len(cdf_nbuf_t buf, int frag_num)
 static inline unsigned char *cdf_nbuf_get_frag_vaddr(cdf_nbuf_t buf,
 						     int frag_num)
 {
+	BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
 	return __cdf_nbuf_get_frag_vaddr(buf, frag_num);
 }
 
 /**
- * cdf_nbuf_get_frag_paddr_lo() - get fragment physical address low order bytes
+ * cdf_nbuf_get_frag_paddr() - get fragment physical address
  * @buf: Network buffer
  * @frag_num: Fragment number
  *
- * Return: Fragment physical address lo
+ * Return: Fragment physical address
  */
-static inline uint32_t cdf_nbuf_get_frag_paddr_lo(cdf_nbuf_t buf, int frag_num)
+static inline cdf_dma_addr_t cdf_nbuf_get_frag_paddr(cdf_nbuf_t buf, int frag_num)
 {
-	return __cdf_nbuf_get_frag_paddr_lo(buf, frag_num);
+	BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
+	return __cdf_nbuf_get_frag_paddr(buf, frag_num);
 }
 
 /**
@@ -200,6 +203,7 @@ static inline uint32_t cdf_nbuf_get_frag_paddr_lo(cdf_nbuf_t buf, int frag_num)
  */
 static inline int cdf_nbuf_get_frag_is_wordstream(cdf_nbuf_t buf, int frag_num)
 {
+	BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
 	return __cdf_nbuf_get_frag_is_wordstream(buf, frag_num);
 }
 
@@ -214,9 +218,79 @@ static inline int cdf_nbuf_get_frag_is_wordstream(cdf_nbuf_t buf, int frag_num)
 static inline void
 cdf_nbuf_set_frag_is_wordstream(cdf_nbuf_t buf, int frag_num, int is_wordstream)
 {
+	BUG_ON(frag_num >= NBUF_CB_TX_MAX_EXTRA_FRAGS);
 	__cdf_nbuf_set_frag_is_wordstream(buf, frag_num, is_wordstream);
 }
 
+/**
+ * cdf_nbuf_ipa_owned_get - gets the ipa_owned flag
+ * @buf: Network buffer
+ *
+ * Return: none
+ */
+static inline int cdf_nbuf_ipa_owned_get(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_ipa_owned_get(buf);
+}
+
+/**
+ * cdf_nbuf_ipa_owned_set - sets the ipa_owned flag
+ * @buf: Network buffer
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_ipa_owned_set(cdf_nbuf_t buf)
+{
+	__cdf_nbuf_ipa_owned_set(buf);
+}
+
+/**
+ * cdf_nbuf_ipa_priv_get - gets the ipa_priv field
+ * @buf: Network buffer
+ *
+ * Return: none
+ */
+static inline int cdf_nbuf_ipa_priv_get(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_ipa_priv_get(buf);
+}
+
+/**
+ * cdf_nbuf_ipa_priv_set - sets the ipa_priv field
+ * @buf: Network buffer
+ *
+ * Return: none
+ */
+static inline void cdf_nbuf_ipa_priv_set(cdf_nbuf_t buf, uint32_t priv)
+{
+	BUG_ON(priv & 0x80000000); /* priv is 31 bits only */
+	__cdf_nbuf_ipa_priv_set(buf, priv);
+}
+
+/**
+ * cdf_nbuf_mapped_paddr_get - gets the paddr of nbuf->data
+ * @buf: Network buffer
+ *
+ * Return: none
+ */
+static inline cdf_dma_addr_t
+cdf_nbuf_mapped_paddr_get(cdf_nbuf_t buf)
+{
+	return __cdf_nbuf_mapped_paddr_get(buf);
+}
+
+/**
+ * cdf_nbuf_mapped_paddr_set - sets the paddr of nbuf->data
+ * @buf: Network buffer
+ *
+ * Return: none
+ */
+static inline void
+cdf_nbuf_mapped_paddr_set(cdf_nbuf_t buf, cdf_dma_addr_t paddr)
+{
+	__cdf_nbuf_mapped_paddr_set(buf, paddr);
+}
+
 /**
  * cdf_nbuf_frag_push_head() - push fragment head
  * @buf: Network buffer
@@ -231,10 +305,9 @@ static inline void
 cdf_nbuf_frag_push_head(cdf_nbuf_t buf,
 			int frag_len,
 			char *frag_vaddr,
-			uint32_t frag_paddr_lo, uint32_t frag_paddr_hi)
+			cdf_dma_addr_t frag_paddr)
 {
-	__cdf_nbuf_frag_push_head(buf, frag_len, frag_vaddr, frag_paddr_lo,
-				  frag_paddr_hi);
+	__cdf_nbuf_frag_push_head(buf, frag_len, frag_vaddr, frag_paddr);
 }
 
 #ifdef MEMORY_DEBUG
@@ -926,14 +999,14 @@ static inline void cdf_invalidate_range(void *start, void *end)
 
 #if defined(FEATURE_TSO)
 /**
- * cdf_nbuf_dec_num_frags() - decrement the number of fragments
+ * cdf_nbuf_reset_num_frags() - resets the number of frags to 0 (valid range: 0..1)
  * @buf: Network buffer
  *
  * Return: Number of fragments
  */
-static inline int cdf_nbuf_dec_num_frags(cdf_nbuf_t buf)
+static inline int cdf_nbuf_reset_num_frags(cdf_nbuf_t buf)
 {
-	return __cdf_nbuf_dec_num_frags(buf);
+	return __cdf_nbuf_reset_num_frags(buf);
 }
 
 /**

+ 1 - 1
core/cdf/inc/cdf_types.h

@@ -126,7 +126,7 @@ typedef __cdf_size_t cdf_size_t;
 typedef __cdf_dma_map_t cdf_dma_map_t;
 
 /**
- * tyepdef cdf_dma_addr_t - DMA address.
+ * typedef cdf_dma_addr_t - DMA address.
  */
 typedef __cdf_dma_addr_t cdf_dma_addr_t;
 

+ 2 - 0
core/cdf/src/cdf_memory.c

@@ -483,6 +483,8 @@ void cdf_mem_multi_pages_alloc(cdf_device_t osdev,
 	void **cacheable_pages = NULL;
 	uint16_t i;
 
+	CDF_BUG(PAGE_SIZE >= element_size);
+
 	pages->num_element_per_page = PAGE_SIZE / element_size;
 	if (!pages->num_element_per_page) {
 		cdf_print("Invalid page %d or element size %d",

+ 58 - 26
core/cdf/src/cdf_nbuf.c

@@ -136,18 +136,19 @@ void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state)
 	 * such as scan commands are not tracked
 	 */
 	uint8_t packet_type;
-	packet_type = NBUF_GET_PACKET_TRACK(nbuf);
+	packet_type = NBUF_CB_TX_PACKET_TRACK(nbuf);
 
 	if ((packet_type != NBUF_TX_PKT_DATA_TRACK) &&
 		(packet_type != NBUF_TX_PKT_MGMT_TRACK)) {
 		return;
 	}
-	NBUF_SET_PACKET_STATE(nbuf, current_state);
+	NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
 	cdf_nbuf_tx_desc_count_update(packet_type,
 					current_state);
 }
 
-cdf_nbuf_trace_update_t trace_update_cb = NULL;
+/* globals do not need to be initialized to NULL/0 */
+cdf_nbuf_trace_update_t trace_update_cb;
 
 /**
  * __cdf_nbuf_alloc() - Allocate nbuf
@@ -183,11 +184,9 @@ struct sk_buff *__cdf_nbuf_alloc(cdf_device_t osdev, size_t size, int reserve,
 	/*
 	 * The default is for netbuf fragments to be interpreted
 	 * as wordstreams rather than bytestreams.
-	 * Set the CVG_NBUF_MAX_EXTRA_FRAGS+1 wordstream_flags bits,
-	 * to provide this default.
 	 */
-	NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) =
-		(1 << (CVG_NBUF_MAX_EXTRA_FRAGS + 1)) - 1;
+	NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
+	NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
 
 	/*
 	 * XXX:how about we reserve first then align
@@ -217,8 +216,9 @@ struct sk_buff *__cdf_nbuf_alloc(cdf_device_t osdev, size_t size, int reserve,
  */
 void __cdf_nbuf_free(struct sk_buff *skb)
 {
-	if ((NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID) && NBUF_CALLBACK_FN(skb))
-		NBUF_CALLBACK_FN_EXEC(skb);
+	if (cdf_nbuf_ipa_owned_get(skb))
+		/* IPA cleanup function will need to be called here */
+		CDF_BUG(1);
 	else
 		dev_kfree_skb_any(skb);
 }
@@ -289,19 +289,20 @@ __cdf_nbuf_unmap(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
 CDF_STATUS
 __cdf_nbuf_map_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
 {
-	uint32_t paddr_lo;
+	cdf_dma_addr_t paddr;
 
 /* tempory hack for simulation */
 #ifdef A_SIMOS_DEVHOST
-	NBUF_MAPPED_PADDR_LO(buf) = paddr_lo = (uint32_t) buf->data;
+	NBUF_CB_PADDR(buf) = paddr = buf->data;
 	return CDF_STATUS_SUCCESS;
 #else
 	/* assume that the OS only provides a single fragment */
-	NBUF_MAPPED_PADDR_LO(buf) = paddr_lo =
-					dma_map_single(osdev->dev, buf->data,
-					skb_end_pointer(buf) - buf->data, dir);
-	return dma_mapping_error(osdev->dev, paddr_lo) ?
-	       CDF_STATUS_E_FAILURE : CDF_STATUS_SUCCESS;
+	NBUF_CB_PADDR(buf) = paddr =
+		dma_map_single(osdev->dev, buf->data,
+			       skb_end_pointer(buf) - buf->data, dir);
+	return dma_mapping_error(osdev->dev, paddr)
+		? CDF_STATUS_E_FAILURE
+		: CDF_STATUS_SUCCESS;
 #endif /* #ifdef A_SIMOS_DEVHOST */
 }
 
@@ -317,7 +318,7 @@ void
 __cdf_nbuf_unmap_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
 {
 #if !defined(A_SIMOS_DEVHOST)
-	dma_unmap_single(osdev->dev, NBUF_MAPPED_PADDR_LO(buf),
+	dma_unmap_single(osdev->dev, NBUF_CB_PADDR(buf),
 			 skb_end_pointer(buf) - buf->data, dir);
 #endif /* #if !defined(A_SIMOS_DEVHOST) */
 }
@@ -794,6 +795,24 @@ uint8_t __cdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
 	return 0;
 }
 
+/**
+ * cdf_dmaaddr_to_32s - return high and low parts of dma_addr
+ *
+ * Returns the high and low 32-bits of the DMA addr in the provided ptrs
+ *
+ * Return: N/A
+*/
+static inline void cdf_dmaaddr_to_32s(cdf_dma_addr_t dmaaddr,
+				      uint32_t *lo, uint32_t *hi)
+{
+	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
+		*lo = (uint32_t) (dmaaddr & 0x0ffffffff);
+		*hi = (uint32_t) (dmaaddr >> 32);
+	} else {
+		*lo = dmaaddr;
+		*hi = 0;
+	}
+}
 /**
  * __cdf_nbuf_get_tso_info() - function to divide a TSO nbuf
  * into segments
@@ -815,7 +834,8 @@ uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
 
 	/* segment specific */
 	char *tso_frag_vaddr;
-	uint32_t tso_frag_paddr_32 = 0;
+	cdf_dma_addr_t tso_frag_paddr = 0;
+	uint32_t       tso_frag_paddr_lo, tso_frag_paddr_hi;
 	uint32_t num_seg = 0;
 	struct cdf_tso_seg_elem_t *curr_seg;
 	const struct skb_frag_struct *frag = NULL;
@@ -845,8 +865,9 @@ uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
 	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
 	/* get the length of the next tso fragment */
 	tso_frag_len = min(skb_frag_len, tso_seg_size);
-	tso_frag_paddr_32 = dma_map_single(osdev->dev,
+	tso_frag_paddr = dma_map_single(osdev->dev,
 		 tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
+	cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
 
 	num_seg = tso_info->num_segs;
 	tso_info->num_segs = 0;
@@ -895,9 +916,16 @@ uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
 		curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
 		curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
 		tso_info->total_len = curr_seg->seg.tso_frags[0].length;
-		curr_seg->seg.tso_frags[0].paddr_low_32 =
-			 dma_map_single(osdev->dev, tso_cmn_info.eit_hdr,
+		{
+			cdf_dma_addr_t mapped;
+			uint32_t       lo, hi;
+
+			mapped = dma_map_single(osdev->dev, tso_cmn_info.eit_hdr,
 				tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
+			cdf_dmaaddr_to_32s(mapped, &lo, &hi);
+			curr_seg->seg.tso_frags[0].paddr_low_32 = lo;
+			curr_seg->seg.tso_frags[0].paddr_upper_16 = (hi & 0xffff);
+		}
 		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
 		curr_seg->seg.num_frags++;
 
@@ -913,9 +941,10 @@ uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
 
 			/* increment the TCP sequence number */
 			tso_cmn_info.tcp_seq_num += tso_frag_len;
-			curr_seg->seg.tso_frags[i].paddr_upper_16 = 0;
+			curr_seg->seg.tso_frags[i].paddr_upper_16 =
+				(tso_frag_paddr_hi & 0xffff);
 			curr_seg->seg.tso_frags[i].paddr_low_32 =
-				 tso_frag_paddr_32;
+				 tso_frag_paddr_lo;
 
 			/* if there is no more data left in the skb */
 			if (!skb_proc)
@@ -939,17 +968,19 @@ uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
 				tso_frag_len = min(skb_frag_len, tso_seg_size);
 				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
 				if (from_frag_table) {
-					tso_frag_paddr_32 =
+					tso_frag_paddr =
 						 skb_frag_dma_map(osdev->dev,
 							 frag, foffset,
 							 tso_frag_len,
 							 DMA_TO_DEVICE);
+					cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
 				} else {
-					tso_frag_paddr_32 =
+					tso_frag_paddr =
 						 dma_map_single(osdev->dev,
 							 tso_frag_vaddr,
 							 tso_frag_len,
 							 DMA_TO_DEVICE);
+					cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
 				}
 			} else { /* the next fragment is not contiguous */
 				tso_frag_len = min(skb_frag_len, tso_seg_size);
@@ -957,9 +988,10 @@ uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
 				skb_frag_len = skb_frag_size(frag);
 
 				tso_frag_vaddr = skb_frag_address(frag);
-				tso_frag_paddr_32 = skb_frag_dma_map(osdev->dev,
+				tso_frag_paddr = skb_frag_dma_map(osdev->dev,
 					 frag, 0, tso_frag_len,
 					 DMA_TO_DEVICE);
+				cdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo, &tso_frag_paddr_hi);
 				foffset += tso_frag_len;
 				from_frag_table = 1;
 				j++;

+ 7 - 7
core/cdf/src/cdf_trace.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -757,13 +757,13 @@ void cdf_dp_trace_set_track(cdf_nbuf_t nbuf)
 	if (g_cdf_dp_trace_data.proto_bitmap != 0) {
 		if (cds_pkt_get_proto_type(nbuf,
 			g_cdf_dp_trace_data.proto_bitmap, 0)) {
-			CDF_NBUF_SET_DP_TRACE(nbuf, 1);
+			NBUF_CB_TX_DP_TRACE(nbuf) = 1;
 		}
 	}
 	if ((g_cdf_dp_trace_data.no_of_record != 0) &&
 		(g_cdf_dp_trace_data.count %
 			g_cdf_dp_trace_data.no_of_record == 0)) {
-		CDF_NBUF_SET_DP_TRACE(nbuf, 1);
+		NBUF_CB_TX_DP_TRACE(nbuf) = 1;
 	}
 	spin_unlock_bh(&l_dp_trace_lock);
 	return;
@@ -793,7 +793,7 @@ static void dump_hex_trace(uint8_t *buf, uint8_t buf_len)
  *
  * Return: None
  */
-void cdf_dp_display_record(struct cdf_dp_trace_record_s *pRecord ,
+void cdf_dp_display_record(struct cdf_dp_trace_record_s *pRecord,
 				uint16_t recIndex)
 {
 	cdf_print("INDEX: %04d TIME: %012llu CODE: %02d\n", recIndex,
@@ -857,14 +857,14 @@ void cdf_dp_trace(cdf_nbuf_t nbuf, enum CDF_DP_TRACE_ID code,
 	}
 
 	/* Return when the packet is not a data packet */
-	if (NBUF_GET_PACKET_TRACK(nbuf) != NBUF_TX_PKT_DATA_TRACK)
+	if (NBUF_CB_TX_PACKET_TRACK(nbuf) != NBUF_TX_PKT_DATA_TRACK)
 		return;
 
 	/* Return when nbuf is not marked for dp tracing or
 	 * verbosity does not allow
 	 */
-	if (cdf_dp_trace_enable_track(code) == false ||
-			!CDF_NBUF_GET_DP_TRACE(nbuf))
+	if ((cdf_dp_trace_enable_track(code) == false) ||
+	    !NBUF_CB_TX_DP_TRACE(nbuf))
 		return;
 
 	/* Acquire the lock so that only one thread at a time can fill the ring

+ 429 - 441
core/cdf/src/i_cdf_nbuf.h

@@ -37,14 +37,9 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/dma-mapping.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
 #include <cdf_types.h>
 #include <cdf_status.h>
 
-#define __CDF_NBUF_NULL   NULL
-
-
 /*
  * Use socket buffer as the underlying implentation as skbuf .
  * Linux use sk_buff to represent both packet and data,
@@ -52,316 +47,286 @@
  */
 typedef struct sk_buff *__cdf_nbuf_t;
 
-typedef void (*__cdf_nbuf_callback_fn)(struct sk_buff *skb);
-#define OSDEP_EAPOL_TID 6       /* send it on VO queue */
-
-/* CVG_NBUF_MAX_OS_FRAGS -
+/* NBUFCB_TX_MAX_OS_FRAGS
  * max tx fragments provided by the OS
  */
-#define CVG_NBUF_MAX_OS_FRAGS 1
+#define NBUF_CB_TX_MAX_OS_FRAGS 1
 
-/* CVG_NBUF_MAX_EXTRA_FRAGS -
+/* NBUF_CB_TX_MAX_EXTRA_FRAGS -
  * max tx fragments added by the driver
- * The driver will always add one tx fragment (the tx descriptor) and may
- * add a second tx fragment (e.g. a TSO segment's modified IP header).
+ * The driver will always add one tx fragment (the tx descriptor)
  */
-#define CVG_NBUF_MAX_EXTRA_FRAGS 2
+#define NBUF_CB_TX_MAX_EXTRA_FRAGS 2
 
-typedef void (*cdf_nbuf_trace_update_t)(char *);
+/*
+ * Make sure that cdf_dma_addr_t in the cb block is always 64 bit aligned
+ */
+typedef union {
+	uint64_t       u64;
+	cdf_dma_addr_t dma_addr;
+} cdf_paddr_t;
 
 /**
- * struct cvg_nbuf_cb - network buffer control block
- * @data_attr: Value that is programmed in CE descriptor, contains:
- *		1) CE classification enablement bit
- *		2) Pkt type (802.3 or Ethernet Type II)
- *		3) Pkt Offset (Usually the length of HTT/HTC desc.)
- * @trace: info for DP tracing
- * @mapped_paddr_lo: DMA mapping info
- * @extra_frags: Extra tx fragments
- * @owner_id: Owner id
- * @cdf_nbuf_callback_fn: Callback function
- * @priv_data: IPA specific priv data
- * @proto_type: Protocol type
- * @vdev_id: vdev id
- * @tx_htt2_frm: HTT 2 frame
- * @tx_htt2_reserved: HTT 2 reserved bits
+ * struct cdf_nbuf_cb - network buffer control block contents (skb->cb)
+ *                    - data passed between layers of the driver.
+ *
+ * Notes:
+ *   1. Hard limited to 48 bytes. Please count your bytes
+ *   2. The size of this structure has to be easily calculatable and
+ *      consistently so: do not use any conditional compile flags
+ *   3. Split into a common part followed by a tx/rx overlay
+ *   4. There is only one extra frag, which represents the HTC/HTT header
+ *
+ * @common.paddr   : physical addressed retrived by dma_map of nbuf->data
+ * @rx.lro_flags   : hardware assisted flags:
+ *   @rx.lro_eligible    : flag to indicate whether the MSDU is LRO eligible
+ *   @rx.tcp_proto       : L4 protocol is TCP
+ *   @rx.tcp_pure_ack    : A TCP ACK packet with no payload
+ *   @rx.ipv6_proto      : L3 protocol is IPV6
+ *   @rx.ip_offset       : offset to IP header
+ *   @rx.tcp_offset      : offset to TCP header
+ *   @rx.tcp_udp_chksum  : L4 payload checksum
+ *   @rx.tcp_seq_num     : TCP sequence number
+ *   @rx.tcp_ack_num     : TCP ACK number
+ *   @rx.flow_id_toeplitz: 32-bit 5-tuple Toeplitz hash
+ * @tx.extra_frag  : represent HTC/HTT header
+ * @tx.efrag.vaddr       : virtual address of ~
+ * @tx.efrag.paddr       : physical/DMA address of ~
+ * @tx.efrag.len         : length of efrag pointed by the above pointers
+ * @tx.efrag.num         : number of extra frags ( 0 or 1)
+ * @tx.efrag.flags.nbuf  : flag, nbuf payload to be swapped (wordstream)
+ * @tx.efrag.flags.efrag : flag, efrag payload to be swapped (wordstream)
+ * @tx.efrag.flags.chfrag_start: used by WIN
+ * @tx.efrags.flags.chfrag_end:   used by WIN
+ * @tx.data_attr   : value that is programmed into CE descr, includes:
+ *                 + (1) CE classification enablement bit
+ *                 + (2) packet type (802.3 or Ethernet type II)
+ *                 + (3) packet offset (usually length of HTC/HTT descr)
+ * @tx.trace       : combined structure for DP and protocol trace
+ * @tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
+ *                       +               (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
+ * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
+ * @tx.trace.proto_type  : bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
+ *                       +                              (MGMT_ACTION)] - 4 bits
+ * @tx.trace.dp_trace    : flag (Datapath trace)
+ * @tx.trace.htt2_frm    : flag (high-latency path only)
+ * @tx.trace.vdev_id     : vdev (for protocol trace)
+ * @tx.ipa.owned   : packet owned by IPA
+ * @tx.ipa.priv    : private data, used by IPA
  */
-struct cvg_nbuf_cb {
-	uint32_t data_attr;
-	/*
-	 * Store info for data path tracing
-	 */
-	struct {
-		uint8_t packet_state;
-		uint8_t packet_track;
-		uint8_t dp_trace;
-	} trace;
-
-	/*
-	 * Store the DMA mapping info for the network buffer fragments
-	 * provided by the OS.
-	 */
-	uint32_t mapped_paddr_lo[CVG_NBUF_MAX_OS_FRAGS];
-#ifdef DEBUG_RX_RING_BUFFER
-	uint32_t map_index;
-#endif
-
-	/* store extra tx fragments provided by the driver */
-	struct {
-		/* vaddr -
-		 * CPU address (a.k.a. virtual address) of the tx fragments
-		 * added by the driver
-		 */
-		unsigned char *vaddr[CVG_NBUF_MAX_EXTRA_FRAGS];
-		/* paddr_lo -
-		 * bus address (a.k.a. physical address) of the tx fragments
-		 * added by the driver
-		 */
-		uint32_t paddr_lo[CVG_NBUF_MAX_EXTRA_FRAGS];
-		uint16_t len[CVG_NBUF_MAX_EXTRA_FRAGS];
-		uint8_t num;    /* how many extra frags has the driver added */
-		uint8_t
-		/*
-		 * Store a wordstream vs. bytestream flag for each extra
-		 * fragment, plus one more flag for the original fragment(s)
-		 * of the netbuf.
-		 */
-wordstream_flags:CVG_NBUF_MAX_EXTRA_FRAGS + 1;
-	} extra_frags;
-	uint32_t owner_id;
-	__cdf_nbuf_callback_fn cdf_nbuf_callback_fn;
-	unsigned long priv_data;
-#ifdef QCA_PKT_PROTO_TRACE
-	unsigned char proto_type;
-	unsigned char vdev_id;
-#endif /* QCA_PKT_PROTO_TRACE */
-#ifdef QCA_TX_HTT2_SUPPORT
-	unsigned char tx_htt2_frm:1;
-	unsigned char tx_htt2_reserved:7;
-#endif /* QCA_TX_HTT2_SUPPORT */
-};
-#ifdef DEBUG_RX_RING_BUFFER
-#define NBUF_MAP_ID(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->map_index)
-#endif
-#define NBUF_OWNER_ID(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->owner_id)
-#define NBUF_OWNER_PRIV_DATA(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->priv_data)
-#define NBUF_CALLBACK_FN(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->cdf_nbuf_callback_fn)
-#define NBUF_CALLBACK_FN_EXEC(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->cdf_nbuf_callback_fn)(skb)
-#define NBUF_MAPPED_PADDR_LO(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->mapped_paddr_lo[0])
-#define NBUF_NUM_EXTRA_FRAGS(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.num)
-#define NBUF_EXTRA_FRAG_VADDR(skb, frag_num) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.vaddr[(frag_num)])
-#define NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num)	\
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.paddr_lo[(frag_num)])
-#define NBUF_EXTRA_FRAG_LEN(skb, frag_num) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.len[(frag_num)])
-#define NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.wordstream_flags)
-
-#ifdef QCA_PKT_PROTO_TRACE
-#define NBUF_SET_PROTO_TYPE(skb, proto_type) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->proto_type = proto_type)
-#define NBUF_GET_PROTO_TYPE(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->proto_type)
-#else
-#define NBUF_SET_PROTO_TYPE(skb, proto_type);
-#define NBUF_GET_PROTO_TYPE(skb) 0;
-#endif /* QCA_PKT_PROTO_TRACE */
-
-#ifdef QCA_TX_HTT2_SUPPORT
-#define NBUF_SET_TX_HTT2_FRM(skb, candi) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->tx_htt2_frm = candi)
-#define NBUF_GET_TX_HTT2_FRM(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->tx_htt2_frm)
-#else
-#define NBUF_SET_TX_HTT2_FRM(skb, candi)
-#define NBUF_GET_TX_HTT2_FRM(skb) 0
-#endif /* QCA_TX_HTT2_SUPPORT */
+struct cdf_nbuf_cb {
+	/* common */
+	cdf_paddr_t paddr; /* of skb->data */
+	/* valid only in one direction */
+	union {
+		/* Note: MAX: 40 bytes */
+		struct {
+			uint32_t lro_eligible:1,
+				tcp_proto:1,
+				tcp_pure_ack:1,
+				ipv6_proto:1,
+				ip_offset:7,
+				tcp_offset:7;
+			uint32_t tcp_udp_chksum:16,
+				tcp_win:16;
+			uint32_t tcp_seq_num;
+			uint32_t tcp_ack_num;
+			uint32_t flow_id_toeplitz;
+		} rx; /* 20 bytes */
+
+		/* Note: MAX: 40 bytes */
+		struct {
+			struct {
+				unsigned char *vaddr;
+				cdf_paddr_t paddr;
+				uint16_t len;
+				uint8_t num; /* 0: cmn.addr; 1: tx.efrag */
+				union {
+					struct {
+						uint8_t flag_efrag:1,
+							flag_nbuf:1,
+							/* following for WIN */
+							flag_chfrag_start:1,
+							flag_chfrag_end:1,
+							reserved:4;
+					} bits;
+					uint8_t u8;
+				} flags;
+			} extra_frag; /* 20 bytes */
+			uint32_t data_attr; /* 4 bytes */
+			union {
+				struct {
+					uint8_t packet_state;
+					uint8_t packet_track:4,
+						proto_type:4;
+					uint8_t dp_trace:1,
+						htt2_frm:1,
+						rsrvd:6;
+					uint8_t vdev_id;
+				} hl;
+				struct {
+					uint8_t packet_state;
+					uint8_t packet_track:4,
+						proto_type:4;
+					uint8_t dp_trace:1,
+						rsrvd:7;
+					uint8_t vdev_id;
+				} ll; /* low latency */
+			} trace; /* 4 bytes */
+			struct {
+				uint32_t owned:1,
+					priv:31;
+			} ipa; /* 4 */
+		} tx; /* 32 bytes */
+	} u;
+}; /* struct cdf_nbuf_cb: MAX 48 bytes */
 
-#define NBUF_DATA_ATTR_SET(skb, data_attr)	\
-	(((struct cvg_nbuf_cb *)((skb)->cb))->data_attr = data_attr)
-
-#define NBUF_DATA_ATTR_GET(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->data_attr)
-
-#if defined(FEATURE_LRO)
 /**
- * struct nbuf_rx_cb - network buffer control block
- * on the receive path of the skb
- * @lro_eligible: indicates whether the msdu is LRO eligible
- * @tcp_proto: indicates if this is a TCP packet
- * @ipv6_proto: indicates if this is an IPv6 packet
- * @ip_offset: offset to the IP header
- * @tcp_offset: offset to the TCP header
- * @tcp_udp_chksum: TCP payload checksum
- * @tcp_seq_num: TCP sequence number
- * @tcp_ack_num: TCP acknowledgement number
- * @flow_id_toeplitz: 32 bit 5-tuple flow id toeplitz hash
+ *  access macros to cdf_nbuf_cb
+ *  Note: These macros can be used as L-values as well as R-values.
+ *        When used as R-values, they effectively function as "get" macros
+ *        When used as L_values, they effectively function as "set" macros
  */
-struct nbuf_rx_cb {
-	uint32_t lro_eligible:1,
-		tcp_proto:1,
-		tcp_pure_ack:1,
-		ipv6_proto:1,
-		ip_offset:7,
-		tcp_offset:7;
-	uint32_t tcp_udp_chksum:16,
-		tcp_win:16;
-	uint32_t tcp_seq_num;
-	uint32_t tcp_ack_num;
-	uint32_t flow_id_toeplitz;
-};
-
-#define NBUF_LRO_ELIGIBLE(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->lro_eligible)
-#define NBUF_TCP_PROTO(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_proto)
-#define NBUF_TCP_PURE_ACK(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_pure_ack)
-#define NBUF_IPV6_PROTO(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->ipv6_proto)
-#define NBUF_IP_OFFSET(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->ip_offset)
-#define NBUF_TCP_OFFSET(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_offset)
-#define NBUF_TCP_CHKSUM(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_udp_chksum)
-#define NBUF_TCP_SEQ_NUM(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_seq_num)
-#define NBUF_TCP_ACK_NUM(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_ack_num)
-#define NBUF_TCP_WIN(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_win)
-#define NBUF_FLOW_ID_TOEPLITZ(skb)	\
-	(((struct nbuf_rx_cb *)((skb)->cb))->flow_id_toeplitz)
-#endif /* FEATURE_LRO */
-
-#define NBUF_SET_PACKET_STATE(skb, pkt_state) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_state = \
-								pkt_state)
-#define NBUF_GET_PACKET_STATE(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_state)
-
-#define NBUF_SET_PACKET_TRACK(skb, pkt_track) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_track = \
-								pkt_track)
-#define NBUF_GET_PACKET_TRACK(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_track)
-
+#define NBUF_CB_PADDR(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
+#define NBUF_CB_RX_LRO_ELIGIBLE(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
+#define NBUF_CB_RX_TCP_PROTO(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
+#define NBUF_CB_RX_TCP_PURE_ACK(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
+#define NBUF_CB_RX_IPV6_PROTO(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
+#define NBUF_CB_RX_IP_OFFSET(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
+#define NBUF_CB_RX_TCP_OFFSET(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
+#define NBUF_CB_RX_TCP_CHKSUM(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
+#define NBUF_CB_RX_TCP_OFFSET(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
+#define NBUF_CB_RX_TCP_WIN(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
+#define NBUF_CB_RX_TCP_SEQ_NUM(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_seq_num)
+#define NBUF_CB_RX_TCP_ACK_NUM(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_ack_num)
+#define NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id_toeplitz)
+
+#define NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.vaddr)
+#define NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.paddr.dma_addr)
+#define NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.len)
+#define NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.num)
+#define NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.u8)
+#define NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.bits.flag_efrag)
+#define NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.bits.flag_nbuf)
+#define NBUF_CB_TX_DATA_ATTR(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.data_attr)
+#define NBUF_CB_TX_PACKET_STATE(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.packet_state)
+#define NBUF_CB_TX_PACKET_TRACK(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.packet_track)
+#define NBUF_CB_TX_PROTO_TYPE(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.proto_type)
 #define NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
 	cdf_nbuf_set_state(skb, PACKET_STATE)
-
-#define CDF_NBUF_SET_DP_TRACE(skb, enable) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.dp_trace \
-								= enable)
-#define CDF_NBUF_GET_DP_TRACE(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.dp_trace)
-
-#define __cdf_nbuf_get_num_frags(skb)		   \
-	/* assume the OS provides a single fragment */ \
-	(NBUF_NUM_EXTRA_FRAGS(skb) + 1)
+#define NBUF_CB_TX_DP_TRACE(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.dp_trace)
+#define NBUF_CB_TX_HL_HTT2_FRM(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.hl.htt2_frm)
+#define NBUF_CB_TX_VDEV_ID(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.trace.ll.vdev_id)
+#define NBUF_CB_TX_IPA_OWNED(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.ipa.owned)
+#define NBUF_CB_TX_IPA_PRIV(skb) \
+	(((struct cdf_nbuf_cb *)((skb)->cb))->u.tx.ipa.priv)
+
+#define __cdf_nbuf_get_num_frags(skb) \
+	(NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
 
 #if defined(FEATURE_TSO)
-#define __cdf_nbuf_dec_num_frags(skb)		   \
-	(NBUF_NUM_EXTRA_FRAGS(skb)--)
+#define __cdf_nbuf_reset_num_frags(skb) \
+	(NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
 #endif
+/**
+ *   end of nbuf->cb access macros
+ */
+
+typedef void (*cdf_nbuf_trace_update_t)(char *);
 
-#define __cdf_nbuf_frag_push_head( \
-		skb, frag_len, frag_vaddr, frag_paddr_lo, frag_paddr_hi) \
-	do { \
-		int frag_num = NBUF_NUM_EXTRA_FRAGS(skb)++; \
-		NBUF_EXTRA_FRAG_VADDR(skb, frag_num) = frag_vaddr; \
-		NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num) = frag_paddr_lo; \
-		NBUF_EXTRA_FRAG_LEN(skb, frag_num) = frag_len; \
+#define __cdf_nbuf_mapped_paddr_get(skb) \
+	NBUF_CB_PADDR(skb)
+
+#define __cdf_nbuf_mapped_paddr_set(skb, paddr)	\
+	(NBUF_CB_PADDR(skb) = (paddr))
+
+#define __cdf_nbuf_frag_push_head(					\
+	skb, frag_len, frag_vaddr, frag_paddr)				\
+	do {								\
+		NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;			\
+		NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;		\
+		NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;		\
+		NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
 	} while (0)
 
-#define __cdf_nbuf_get_frag_len(skb, frag_num)		 \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?	     \
-	 NBUF_EXTRA_FRAG_LEN(skb, frag_num) : (skb)->len)
-
-#define __cdf_nbuf_get_frag_vaddr(skb, frag_num)	      \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?		  \
-	 NBUF_EXTRA_FRAG_VADDR(skb, frag_num) : ((skb)->data))
-
-#define __cdf_nbuf_get_frag_paddr_lo(skb, frag_num)		 \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?		     \
-	 NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num) :		  \
-	/* assume that the OS only provides a single fragment */ \
-	 NBUF_MAPPED_PADDR_LO(skb))
-
-#define __cdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?	     \
-	 (NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) >>	  \
-	  (frag_num)) & 0x1 :			       \
-	 (NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) >>	  \
-	  (CVG_NBUF_MAX_EXTRA_FRAGS)) & 0x1)
-
-#define __cdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wordstream)	\
-	do {								    \
-		if (frag_num >= NBUF_NUM_EXTRA_FRAGS(skb)) {			\
-			frag_num = CVG_NBUF_MAX_EXTRA_FRAGS;			    \
-		}								\
-		/* clear the old value */					\
-		NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) &= ~(1 << frag_num);	\
-		/* set the new value */						\
-		NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) |=			\
-			((is_wordstream) << frag_num);				    \
+#define __cdf_nbuf_get_frag_vaddr(skb, frag_num)		\
+	((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
+	 NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
+
+#define __cdf_nbuf_get_frag_paddr(skb, frag_num)			\
+	((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?			\
+	 NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
+	 /* assume that the OS only provides a single fragment */	\
+	 NBUF_CB_PADDR(skb))
+
+#define __cdf_nbuf_get_frag_len(skb, frag_num)			\
+	((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
+	 NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
+
+#define __cdf_nbuf_get_frag_is_wordstream(skb, frag)			\
+	((frag_num < NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))			\
+	 ? (NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
+	 : (NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
+
+#define __cdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
+	do {								\
+		if (frag_num >= NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
+			frag_num = NBUF_CB_TX_MAX_EXTRA_FRAGS;		\
+		if (frag_num)						\
+			NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = is_wstrm; \
+		else							\
+			NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = is_wstrm; \
 	} while (0)
 
 #define __cdf_nbuf_trace_set_proto_type(skb, proto_type) \
-	NBUF_SET_PROTO_TYPE(skb, proto_type)
+	(NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type))
 #define __cdf_nbuf_trace_get_proto_type(skb) \
-	NBUF_GET_PROTO_TYPE(skb);
+	NBUF_CB_TX_PROTO_TYPE(skb)
 
-/**
- * __cdf_nbuf_data_attr_get() -  Retrieves the data_attr value
- *				 from cvg_nbuf_cb (skb->cb)
- * @skb: Pointer to struct sk_buff
- *
- * Return: data_attr
- */
 #define __cdf_nbuf_data_attr_get(skb)		\
-	NBUF_DATA_ATTR_GET(skb)
+	NBUF_CB_TX_DATA_ATTR(skb)
+#define __cdf_nbuf_data_attr_set(skb, data_attr)	\
+	(NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
 
-/**
- * __cdf_nbuf_data_attr_set()  -  Sets the data_attr value
- *				  in cvg_nbuf_cb (skb->cb)
- * @skb: Pointer to struct sk_buff
- * @data_attr: packet type from the enum cdf_txrx_pkt_type
- *
- * Return:
- */
-static inline void
-__cdf_nbuf_data_attr_set(struct sk_buff *skb,
-			     uint32_t data_attr)
-{
-	NBUF_DATA_ATTR_SET(skb, data_attr);
-}
+#define __cdf_nbuf_ipa_owned_get(skb) \
+	NBUF_CB_TX_IPA_OWNED(skb)
 
-/**
- * typedef struct __cdf_nbuf_queue_t -  network buffer queue
- * @head: Head pointer
- * @tail: Tail pointer
- * @qlen: Queue length
- */
-typedef struct __cdf_nbuf_qhead {
-	struct sk_buff *head;
-	struct sk_buff *tail;
-	unsigned int qlen;
-} __cdf_nbuf_queue_t;
+#define __cdf_nbuf_ipa_owned_set(skb) \
+	(NBUF_CB_TX_IPA_OWNED(skb) = 1)
 
-/*
- * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
- * Because the queue head will most likely put in some structure,
- * we don't use pointer type as the definition.
- */
+#define __cdf_nbuf_ipa_priv_get(skb)	\
+	NBUF_CB_TX_IPA_PRIV(skb)
+
+#define __cdf_nbuf_ipa_priv_set(skb, priv)	\
+	(NBUF_CB_TX_IPA_PRIV(skb) = (priv))
 
 /*
  * prototypes. Implemented in cdf_nbuf.c
@@ -417,9 +382,9 @@ static inline size_t __cdf_nbuf_len(struct sk_buff *skb)
 {
 	int i, extra_frag_len = 0;
 
-	i = NBUF_NUM_EXTRA_FRAGS(skb);
-	while (i-- > 0)
-		extra_frag_len += NBUF_EXTRA_FRAG_LEN(skb, i);
+	i = NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
+	if (i > 0)
+		extra_frag_len = NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
 
 	return extra_frag_len + skb->len;
 }
@@ -460,7 +425,9 @@ __cdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
 	return __cdf_os_to_status(error);
 }
 
-/**************************nbuf manipulation routines*****************/
+/*
+ * nbuf manipulation routines
+ */
 
 /**
  * __cdf_nbuf_headroom() - return the amount of tail space available
@@ -494,8 +461,8 @@ static inline uint32_t __cdf_nbuf_tailroom(struct sk_buff *skb)
  */
 static inline uint8_t *__cdf_nbuf_push_head(struct sk_buff *skb, size_t size)
 {
-	if (NBUF_MAPPED_PADDR_LO(skb))
-		NBUF_MAPPED_PADDR_LO(skb) -= size;
+	if (NBUF_CB_PADDR(skb))
+		NBUF_CB_PADDR(skb) -= size;
 
 	return skb_push(skb, size);
 }
@@ -530,8 +497,8 @@ static inline uint8_t *__cdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
  */
 static inline uint8_t *__cdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
 {
-	if (NBUF_MAPPED_PADDR_LO(skb))
-		NBUF_MAPPED_PADDR_LO(skb) += size;
+	if (NBUF_CB_PADDR(skb))
+		NBUF_CB_PADDR(skb) += size;
 
 	return skb_pull(skb, size);
 }
@@ -548,151 +515,6 @@ static inline void __cdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
 	return skb_trim(skb, skb->len - size);
 }
 
-/*********************nbuf private buffer routines*************/
-
-/**
- * __cdf_nbuf_peek_header() - return the header's addr & m_len
- * @skb: Pointer to network buffer
- * @addr: Pointer to store header's addr
- * @m_len: network buffer length
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
-{
-	*addr = skb->data;
-	*len = skb->len;
-}
-
-/******************Custom queue*************/
-
-/**
- * __cdf_nbuf_queue_init() - initiallize the queue head
- * @qhead: Queue head
- *
- * Return: CDF status
- */
-static inline CDF_STATUS __cdf_nbuf_queue_init(__cdf_nbuf_queue_t *qhead)
-{
-	memset(qhead, 0, sizeof(struct __cdf_nbuf_qhead));
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_queue_add() - add an skb in the tail of the queue
- * @qhead: Queue head
- * @skb: Pointer to network buffer
- *
- * This is a lockless version, driver must acquire locks if it
- * needs to synchronize
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_queue_add(__cdf_nbuf_queue_t *qhead, struct sk_buff *skb)
-{
-	skb->next = NULL;       /*Nullify the next ptr */
-
-	if (!qhead->head)
-		qhead->head = skb;
-	else
-		qhead->tail->next = skb;
-
-	qhead->tail = skb;
-	qhead->qlen++;
-}
-
-/**
- * __cdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
- * @qhead: Queue head
- * @skb: Pointer to network buffer
- *
- * This is a lockless version, driver must acquire locks if it needs to
- * synchronize
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_queue_insert_head(__cdf_nbuf_queue_t *qhead, __cdf_nbuf_t skb)
-{
-	if (!qhead->head) {
-		/*Empty queue Tail pointer Must be updated */
-		qhead->tail = skb;
-	}
-	skb->next = qhead->head;
-	qhead->head = skb;
-	qhead->qlen++;
-}
-
-/**
- * __cdf_nbuf_queue_remove() - remove a skb from the head of the queue
- * @qhead: Queue head
- *
- * This is a lockless version. Driver should take care of the locks
- *
- * Return: skb or NULL
- */
-static inline
-struct sk_buff *__cdf_nbuf_queue_remove(__cdf_nbuf_queue_t *qhead)
-{
-	__cdf_nbuf_t tmp = NULL;
-
-	if (qhead->head) {
-		qhead->qlen--;
-		tmp = qhead->head;
-		if (qhead->head == qhead->tail) {
-			qhead->head = NULL;
-			qhead->tail = NULL;
-		} else {
-			qhead->head = tmp->next;
-		}
-		tmp->next = NULL;
-	}
-	return tmp;
-}
-
-/**
- * __cdf_nbuf_queue_len() - return the queue length
- * @qhead: Queue head
- *
- * Return: Queue length
- */
-static inline uint32_t __cdf_nbuf_queue_len(__cdf_nbuf_queue_t *qhead)
-{
-	return qhead->qlen;
-}
-
-/**
- * __cdf_nbuf_queue_next() - return the next skb from packet chain
- * @skb: Pointer to network buffer
- *
- * This API returns the next skb from packet chain, remember the skb is
- * still in the queue
- *
- * Return: NULL if no packets are there
- */
-static inline struct sk_buff *__cdf_nbuf_queue_next(struct sk_buff *skb)
-{
-	return skb->next;
-}
-
-/**
- * __cdf_nbuf_is_queue_empty() - check if the queue is empty or not
- * @qhead: Queue head
- *
- * Return: true if length is 0 else false
- */
-static inline bool __cdf_nbuf_is_queue_empty(__cdf_nbuf_queue_t *qhead)
-{
-	return qhead->qlen == 0;
-}
-
-/*
- * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
- * Because the queue head will most likely put in some structure,
- * we don't use pointer type as the definition.
- */
 
 /*
  * prototypes. Implemented in cdf_nbuf.c
@@ -998,9 +820,9 @@ __cdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
 }
 
 #define __cdf_nbuf_set_tx_htt2_frm(skb, candi) \
-	NBUF_SET_TX_HTT2_FRM(skb, candi)
+	(NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi))
 #define __cdf_nbuf_get_tx_htt2_frm(skb)	\
-	NBUF_GET_TX_HTT2_FRM(skb)
+	NBUF_CB_TX_HL_HTT2_FRM(skb)
 
 #if defined(FEATURE_TSO)
 uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
@@ -1065,4 +887,170 @@ do {								\
 		pkt_type = htt_pkt_type_ethernet;		\
 								\
 } while (0)
+
+/**
+ * nbuf private buffer routines
+ */
+
+/**
+ * __cdf_nbuf_peek_header() - return the header's addr & m_len
+ * @skb: Pointer to network buffer
+ * @addr: Pointer to store header's addr
+ * @m_len: network buffer length
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
+{
+	*addr = skb->data;
+	*len = skb->len;
+}
+
+/**
+ * typedef struct __cdf_nbuf_queue_t -  network buffer queue
+ * @head: Head pointer
+ * @tail: Tail pointer
+ * @qlen: Queue length
+ */
+typedef struct __cdf_nbuf_qhead {
+	struct sk_buff *head;
+	struct sk_buff *tail;
+	unsigned int qlen;
+} __cdf_nbuf_queue_t;
+
+/******************Functions *************/
+
+/**
+ * __cdf_nbuf_queue_init() - initiallize the queue head
+ * @qhead: Queue head
+ *
+ * Return: CDF status
+ */
+static inline CDF_STATUS __cdf_nbuf_queue_init(__cdf_nbuf_queue_t *qhead)
+{
+	memset(qhead, 0, sizeof(struct __cdf_nbuf_qhead));
+	return CDF_STATUS_SUCCESS;
+}
+
+/**
+ * __cdf_nbuf_queue_add() - add an skb in the tail of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it
+ * needs to synchronize
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_queue_add(__cdf_nbuf_queue_t *qhead, struct sk_buff *skb)
+{
+	skb->next = NULL;       /*Nullify the next ptr */
+
+	if (!qhead->head)
+		qhead->head = skb;
+	else
+		qhead->tail->next = skb;
+
+	qhead->tail = skb;
+	qhead->qlen++;
+}
+
+/**
+ * __cdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it needs to
+ * synchronize
+ *
+ * Return: none
+ */
+static inline void
+__cdf_nbuf_queue_insert_head(__cdf_nbuf_queue_t *qhead, __cdf_nbuf_t skb)
+{
+	if (!qhead->head) {
+		/*Empty queue Tail pointer Must be updated */
+		qhead->tail = skb;
+	}
+	skb->next = qhead->head;
+	qhead->head = skb;
+	qhead->qlen++;
+}
+
+/**
+ * __cdf_nbuf_queue_remove() - remove a skb from the head of the queue
+ * @qhead: Queue head
+ *
+ * This is a lockless version. Driver should take care of the locks
+ *
+ * Return: skb or NULL
+ */
+static inline
+struct sk_buff *__cdf_nbuf_queue_remove(__cdf_nbuf_queue_t *qhead)
+{
+	__cdf_nbuf_t tmp = NULL;
+
+	if (qhead->head) {
+		qhead->qlen--;
+		tmp = qhead->head;
+		if (qhead->head == qhead->tail) {
+			qhead->head = NULL;
+			qhead->tail = NULL;
+		} else {
+			qhead->head = tmp->next;
+		}
+		tmp->next = NULL;
+	}
+	return tmp;
+}
+
+/**
+ * __cdf_nbuf_queue_len() - return the queue length
+ * @qhead: Queue head
+ *
+ * Return: Queue length
+ */
+static inline uint32_t __cdf_nbuf_queue_len(__cdf_nbuf_queue_t *qhead)
+{
+	return qhead->qlen;
+}
+
+/**
+ * __cdf_nbuf_queue_next() - return the next skb from packet chain
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the next skb from packet chain, remember the skb is
+ * still in the queue
+ *
+ * Return: NULL if no packets are there
+ */
+static inline struct sk_buff *__cdf_nbuf_queue_next(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __cdf_nbuf_is_queue_empty() - check if the queue is empty or not
+ * @qhead: Queue head
+ *
+ * Return: true if length is 0 else false
+ */
+static inline bool __cdf_nbuf_is_queue_empty(__cdf_nbuf_queue_t *qhead)
+{
+	return qhead->qlen == 0;
+}
+
+/*
+ * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+/*
+ * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
 #endif /*_I_CDF_NET_BUF_H */

+ 11 - 11
core/dp/htt/htt_internal.h

@@ -194,18 +194,18 @@ static inline void htt_print_rx_desc_lro(struct htt_host_rx_desc_base *rx_desc)
 static inline void htt_rx_extract_lro_info(cdf_nbuf_t msdu,
 	 struct htt_host_rx_desc_base *rx_desc)
 {
-	NBUF_LRO_ELIGIBLE(msdu) = rx_desc->msdu_end.lro_eligible;
+	NBUF_CB_RX_LRO_ELIGIBLE(msdu) = rx_desc->msdu_end.lro_eligible;
 	if (rx_desc->msdu_end.lro_eligible) {
-		NBUF_TCP_PURE_ACK(msdu) = rx_desc->msdu_start.tcp_only_ack;
-		NBUF_TCP_CHKSUM(msdu) = rx_desc->msdu_end.tcp_udp_chksum;
-		NBUF_TCP_SEQ_NUM(msdu) = rx_desc->msdu_end.tcp_seq_number;
-		NBUF_TCP_ACK_NUM(msdu) = rx_desc->msdu_end.tcp_ack_number;
-		NBUF_TCP_WIN(msdu) = rx_desc->msdu_end.window_size;
-		NBUF_TCP_PROTO(msdu) = rx_desc->msdu_start.tcp_proto;
-		NBUF_IPV6_PROTO(msdu) = rx_desc->msdu_start.ipv6_proto;
-		NBUF_IP_OFFSET(msdu) = rx_desc->msdu_start.l3_offset;
-		NBUF_TCP_OFFSET(msdu) = rx_desc->msdu_start.l4_offset;
-		NBUF_FLOW_ID_TOEPLITZ(msdu) =
+		NBUF_CB_RX_TCP_PURE_ACK(msdu) = rx_desc->msdu_start.tcp_only_ack;
+		NBUF_CB_RX_TCP_CHKSUM(msdu) = rx_desc->msdu_end.tcp_udp_chksum;
+		NBUF_CB_RX_TCP_SEQ_NUM(msdu) = rx_desc->msdu_end.tcp_seq_number;
+		NBUF_CB_RX_TCP_ACK_NUM(msdu) = rx_desc->msdu_end.tcp_ack_number;
+		NBUF_CB_RX_TCP_WIN(msdu) = rx_desc->msdu_end.window_size;
+		NBUF_CB_RX_TCP_PROTO(msdu) = rx_desc->msdu_start.tcp_proto;
+		NBUF_CB_RX_IPV6_PROTO(msdu) = rx_desc->msdu_start.ipv6_proto;
+		NBUF_CB_RX_IP_OFFSET(msdu) = rx_desc->msdu_start.l3_offset;
+		NBUF_CB_RX_TCP_OFFSET(msdu) = rx_desc->msdu_start.l4_offset;
+		NBUF_CB_RX_FLOW_ID_TOEPLITZ(msdu) =
 			 rx_desc->msdu_start.flow_id_toeplitz;
 	}
 }

+ 13 - 17
core/dp/htt/htt_rx.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -107,7 +107,8 @@
 #endif
 
 /* De -initialization function of the rx buffer hash table. This function will
-   free up the hash table which includes freeing all the pending rx buffers*/
+ *   free up the hash table which includes freeing all the pending rx buffers
+ */
 void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 {
 
@@ -228,7 +229,7 @@ void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
 
 	idx = *(pdev->rx_ring.alloc_idx.vaddr);
 	while (num > 0) {
-		uint32_t paddr;
+		cdf_dma_addr_t paddr;
 		cdf_nbuf_t rx_netbuf;
 		int headroom;
 
@@ -287,7 +288,7 @@ void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
 			cdf_nbuf_free(rx_netbuf);
 			goto fail;
 		}
-		paddr = cdf_nbuf_get_frag_paddr_lo(rx_netbuf, 0);
+		paddr = cdf_nbuf_get_frag_paddr(rx_netbuf, 0);
 		if (pdev->cfg.is_full_reorder_offload) {
 			if (cdf_unlikely
 				    (htt_rx_hash_list_insert(pdev, paddr,
@@ -309,11 +310,9 @@ void htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
 			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
 		}
 #if HTT_PADDR64
-		pdev->rx_ring.buf.paddrs_ring[idx] = 0;
-		pdev->rx_ring.buf.paddrs_ring[idx] = (uint32_t)paddr;
-#else
-		pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
+		paddr &= 0x1fffffffff; /* trim out higher than 37 bits */
 #endif /* HTT_PADDR64 */
+		pdev->rx_ring.buf.paddrs_ring[idx] = paddr;
 		pdev->rx_ring.fill_cnt++;
 
 		num--;
@@ -388,7 +387,7 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
 							   memctx));
 
 	cdf_os_mem_free_consistent(pdev->osdev,
-				   pdev->rx_ring.size * sizeof(uint32_t),
+				   pdev->rx_ring.size * sizeof(cdf_dma_addr_t),
 				   pdev->rx_ring.buf.paddrs_ring,
 				   pdev->rx_ring.base_paddr,
 				   cdf_get_dma_mem_context((&pdev->rx_ring.buf),
@@ -2130,11 +2129,8 @@ void htt_rx_hash_dump_table(struct htt_pdev_t *pdev)
 int htt_rx_attach(struct htt_pdev_t *pdev)
 {
 	cdf_dma_addr_t paddr;
-#if HTT_PADDR64
-	uint32_t ring_elem_size = sizeof(uint64_t);
-#else
-	uint32_t ring_elem_size = sizeof(uint32_t);
-#endif /* HTT_PADDR64 */
+	uint32_t ring_elem_size = sizeof(cdf_dma_addr_t);
+
 	pdev->rx_ring.size = htt_rx_ring_size(pdev);
 	HTT_ASSERT2(CDF_IS_PWR2(pdev->rx_ring.size));
 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
@@ -2243,10 +2239,10 @@ int htt_rx_attach(struct htt_pdev_t *pdev)
 	}
 
 	htt_rx_offload_msdu_pop = htt_rx_offload_msdu_pop_ll;
-        htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
+	htt_rx_mpdu_desc_retry = htt_rx_mpdu_desc_retry_ll;
 	htt_rx_mpdu_desc_seq_num = htt_rx_mpdu_desc_seq_num_ll;
 	htt_rx_mpdu_desc_pn = htt_rx_mpdu_desc_pn_ll;
-        htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
+	htt_rx_mpdu_desc_tid = htt_rx_mpdu_desc_tid_ll;
 	htt_rx_msdu_desc_completes_mpdu = htt_rx_msdu_desc_completes_mpdu_ll;
 	htt_rx_msdu_first_msdu_flag = htt_rx_msdu_first_msdu_flag_ll;
 	htt_rx_msdu_has_wlan_mcast_flag = htt_rx_msdu_has_wlan_mcast_flag_ll;
@@ -2262,7 +2258,7 @@ int htt_rx_attach(struct htt_pdev_t *pdev)
 
 fail3:
 	cdf_os_mem_free_consistent(pdev->osdev,
-				   pdev->rx_ring.size * sizeof(uint32_t),
+				   pdev->rx_ring.size * sizeof(cdf_dma_addr_t),
 				   pdev->rx_ring.buf.paddrs_ring,
 				   pdev->rx_ring.base_paddr,
 				   cdf_get_dma_mem_context((&pdev->rx_ring.buf),

+ 5 - 5
core/dp/htt/htt_t2h.c

@@ -147,19 +147,19 @@ void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
 		pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
 		pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
 		cdf_print
-			("target uses HTT version %d.%d; host uses %d.%d\n",
+			("target uses HTT version %d.%d; host uses %d.%d",
 			pdev->tgt_ver.major, pdev->tgt_ver.minor,
 			HTT_CURRENT_VERSION_MAJOR,
 			HTT_CURRENT_VERSION_MINOR);
 		if (pdev->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR)
 			cdf_print
-			      ("*** Incompatible host/target HTT versions!\n");
+			      ("*** Incompatible host/target HTT versions!");
 		/* abort if the target is incompatible with the host */
 		cdf_assert(pdev->tgt_ver.major ==
 			   HTT_CURRENT_VERSION_MAJOR);
 		if (pdev->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
 			cdf_print("*** Warning: host/target HTT versions are ");
-			cdf_print(" different, though compatible!\n");
+			cdf_print(" different, though compatible!");
 		}
 		break;
 	}
@@ -293,7 +293,7 @@ void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
 			cdf_runtime_pm_put();
 			HTT_TX_SCHED(pdev);
 		} else {
-			cdf_print("Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication\n");
+			cdf_print("Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication");
 		}
 		break;
 	}
@@ -510,7 +510,7 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
 
 #if defined(HELIUMPLUS_DEBUG)
-	cdf_print("%s %d: msg_word 0x%x msg_type %d\n",
+	cdf_print("%s %d: msg_word 0x%x msg_type %d",
 		  __func__, __LINE__, *msg_word, msg_type);
 #endif
 

+ 32 - 36
core/dp/htt/htt_tx.c

@@ -188,7 +188,7 @@ static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev)
  * htt_tx_frag_alloc() - Allocate single fragment descriptor from the pool
  * @pdev:		htt device instance pointer
  * @index:		Descriptor index
- * @frag_paddr_lo:	Fragment descriptor physical address
+ * @frag_paddr:	        Fragment descriptor physical address
  * @frag_ptr:		Fragment descriptor virtual address
  *
  * This function will free fragment descriptor
@@ -196,7 +196,7 @@ static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev)
  * Return: None
  */
 int htt_tx_frag_alloc(htt_pdev_handle pdev,
-	u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr)
+	u_int16_t index, cdf_dma_addr_t *frag_paddr, void **frag_ptr)
 {
 	uint16_t frag_page_index;
 	uint16_t frag_elem_index;
@@ -223,7 +223,7 @@ int htt_tx_frag_alloc(htt_pdev_handle pdev,
 		return 1;
 	}
 
-	*frag_paddr_lo = dma_page->page_p_addr +
+	*frag_paddr = dma_page->page_p_addr +
 		frag_elem_index * pdev->frag_descs.size;
 	return 0;
 }
@@ -436,7 +436,7 @@ static cdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
 
 /*--- descriptor allocation functions ---------------------------------------*/
 
-void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, cdf_dma_addr_t *paddr,
 			uint16_t index)
 {
 	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
@@ -477,7 +477,7 @@ void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
 	 * Include the headroom for the HTC frame header when specifying the
 	 * physical address for the HTT tx descriptor.
 	 */
-	*paddr_lo = (uint32_t)htt_tx_get_paddr(pdev, (char *)htt_host_tx_desc);
+	*paddr = (cdf_dma_addr_t)htt_tx_get_paddr(pdev, (char *)htt_host_tx_desc);
 	/*
 	 * The allocated tx descriptor space includes headroom for a
 	 * HTC frame header.  Hide this headroom, so that we don't have
@@ -504,8 +504,8 @@ void htt_tx_desc_free(htt_pdev_handle pdev, void *tx_desc)
 
 void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
 				 void *htt_tx_desc,
-				 uint32_t paddr,
-				 uint32_t frag_desc_paddr_lo,
+				 cdf_dma_addr_t paddr,
+				 cdf_dma_addr_t frag_desc_paddr,
 				 int reset)
 {
 	uint32_t *fragmentation_descr_field_ptr;
@@ -515,7 +515,7 @@ void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
 	if (reset) {
 #if defined(HELIUMPLUS_PADDR64)
-		*fragmentation_descr_field_ptr = frag_desc_paddr_lo;
+		*fragmentation_descr_field_ptr = frag_desc_paddr;
 #else
 		*fragmentation_descr_field_ptr =
 			htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
@@ -812,7 +812,8 @@ int htt_tx_send_std(htt_pdev_handle pdev, cdf_nbuf_t msdu, uint16_t msdu_id)
 }
 
 #endif /*ATH_11AC_TXCOMPACT */
-#ifdef HTT_DBG
+
+#if defined(HTT_DBG)
 void htt_tx_desc_display(void *tx_desc)
 {
 	struct htt_tx_msdu_desc_t *htt_tx_desc;
@@ -820,32 +821,28 @@ void htt_tx_desc_display(void *tx_desc)
 	htt_tx_desc = (struct htt_tx_msdu_desc_t *)tx_desc;
 
 	/* only works for little-endian */
-	cdf_print("HTT tx desc (@ %p):\n", htt_tx_desc);
-	cdf_print("  msg type = %d\n", htt_tx_desc->msg_type);
-	cdf_print("  pkt subtype = %d\n", htt_tx_desc->pkt_subtype);
-	cdf_print("  pkt type = %d\n", htt_tx_desc->pkt_type);
-	cdf_print("  vdev ID = %d\n", htt_tx_desc->vdev_id);
-	cdf_print("  ext TID = %d\n", htt_tx_desc->ext_tid);
-	cdf_print("  postponed = %d\n", htt_tx_desc->postponed);
-#if HTT_PADDR64
-	cdf_print("  reserved_dword0_bits28 = %d\n", htt_tx_desc->reserved_dword0_bits28);
-	cdf_print("  cksum_offload = %d\n", htt_tx_desc->cksum_offload);
-	cdf_print("  tx_compl_req= %d\n", htt_tx_desc->tx_compl_req);
-#else /* !HTT_PADDR64 */
-	cdf_print("  batch more = %d\n", htt_tx_desc->more_in_batch);
-#endif /* HTT_PADDR64 */
-	cdf_print("  length = %d\n", htt_tx_desc->len);
-	cdf_print("  id = %d\n", htt_tx_desc->id);
+	cdf_print("HTT tx desc (@ %p):", htt_tx_desc);
+	cdf_print("  msg type = %d", htt_tx_desc->msg_type);
+	cdf_print("  pkt subtype = %d", htt_tx_desc->pkt_subtype);
+	cdf_print("  pkt type = %d", htt_tx_desc->pkt_type);
+	cdf_print("  vdev ID = %d", htt_tx_desc->vdev_id);
+	cdf_print("  ext TID = %d", htt_tx_desc->ext_tid);
+	cdf_print("  postponed = %d", htt_tx_desc->postponed);
+	cdf_print("  extension = %d", htt_tx_desc->extension);
+	cdf_print("  cksum_offload = %d", htt_tx_desc->cksum_offload);
+	cdf_print("  tx_compl_req= %d", htt_tx_desc->tx_compl_req);
+	cdf_print("  length = %d", htt_tx_desc->len);
+	cdf_print("  id = %d", htt_tx_desc->id);
 #if HTT_PADDR64
-	cdf_print("  frag desc addr.lo = %#x\n",
+	cdf_print("  frag desc addr.lo = %#x",
 		  htt_tx_desc->frags_desc_ptr.lo);
-	cdf_print("  frag desc addr.hi = %#x\n",
+	cdf_print("  frag desc addr.hi = %#x",
 		  htt_tx_desc->frags_desc_ptr.hi);
-	cdf_print("  peerid = %d\n", htt_tx_desc->peerid);
-	cdf_print("  chanfreq = %d\n", htt_tx_desc->chanfreq);
 #else /* ! HTT_PADDR64 */
-	cdf_print("  frag desc addr = %#x\n", htt_tx_desc->frags_desc_ptr);
+	cdf_print("  frag desc addr = %#x", htt_tx_desc->frags_desc_ptr);
 #endif /* HTT_PADDR64 */
+	cdf_print("  peerid = %d", htt_tx_desc->peerid);
+	cdf_print("  chanfreq = %d", htt_tx_desc->chanfreq);
 }
 #endif
 
@@ -900,7 +897,7 @@ int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 				tx_buffer_count) << 16;
 
 		cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
-		buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+		buffer_paddr = cdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
 		header_ptr++;
 		*header_ptr = (uint32_t) (buffer_paddr +
 						IPA_UC_TX_BUF_FRAG_DESC_OFFSET);
@@ -928,7 +925,7 @@ int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 {
 	unsigned int tx_buffer_count;
 	cdf_nbuf_t buffer_vaddr;
-	uint32_t buffer_paddr;
+	cdf_dma_addr_t buffer_paddr;
 	uint32_t *header_ptr;
 	uint32_t *ring_vaddr;
 #define IPA_UC_TX_BUF_FRAG_DESC_OFFSET 20
@@ -960,7 +957,7 @@ int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 				tx_buffer_count) << 16;
 
 		cdf_nbuf_map(pdev->osdev, buffer_vaddr, CDF_DMA_BIDIRECTIONAL);
-		buffer_paddr = cdf_nbuf_get_frag_paddr_lo(buffer_vaddr, 0);
+		buffer_paddr = cdf_nbuf_get_frag_paddr(buffer_vaddr, 0);
 		header_ptr++;
 
 		/* Frag Desc Pointer */
@@ -1063,8 +1060,7 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 
 free_tx_comp_base:
 	cdf_os_mem_free_consistent(pdev->osdev,
-				   ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->
-								ctrl_pdev) * 4,
+				   tx_comp_ring_size,
 				   pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
 				   pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
 				   cdf_get_dma_mem_context((&pdev->
@@ -1101,7 +1097,7 @@ int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev)
 	if (pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr) {
 		cdf_os_mem_free_consistent(
 			pdev->osdev,
-			ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * 4,
+			ol_cfg_ipa_uc_tx_max_buf_cnt(pdev->ctrl_pdev) * sizeof(cdf_nbuf_t),
 			pdev->ipa_uc_tx_rsc.tx_comp_base.vaddr,
 			pdev->ipa_uc_tx_rsc.tx_comp_base.paddr,
 			cdf_get_dma_mem_context((&pdev->ipa_uc_tx_rsc.

+ 23 - 0
core/dp/htt/htt_types.h

@@ -174,8 +174,30 @@ struct ipa_uc_rx_ring_elem_t {
 };
 
 #if defined(HELIUMPLUS_PADDR64)
+/**
+ * msdu_ext_frag_desc:
+ * semantically, this is an array of 6 of 2-tuples of
+ * a 48-bit physical address and a 16 bit len field
+ * with the following layout:
+ * 31               16       8       0
+ * |        p t r - l o w 3 2         |
+ * | len             | ptr-7/16       |
+ */
+struct msdu_ext_frag_desc {
+	union {
+		uint64_t desc64;
+		struct {
+			uint32_t ptr_low;
+			uint32_t ptr_hi:16,
+				len:16;
+		} frag32;
+	} u;
+};
+
 struct msdu_ext_desc_t {
 	struct cdf_tso_flags_t tso_flags;
+	struct msdu_ext_frag_desc frags[6];
+/*
 	u_int32_t frag_ptr0;
 	u_int32_t frag_len0;
 	u_int32_t frag_ptr1;
@@ -188,6 +210,7 @@ struct msdu_ext_desc_t {
 	u_int32_t frag_len4;
 	u_int32_t frag_ptr5;
 	u_int32_t frag_len5;
+*/
 };
 #endif  /* defined(HELIUMPLUS_PADDR64) */
 

+ 48 - 39
core/dp/ol/inc/ol_htt_tx_api.h

@@ -357,7 +357,7 @@ uint16_t htt_tx_compl_desc_id(void *iterator, int num);
  * @param[OUT] paddr_lo - physical address of the HTT descriptor
  * @return success -> descriptor handle, -OR- failure -> NULL
  */
-void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, cdf_dma_addr_t *paddr,
 			uint16_t index);
 
 /**
@@ -381,10 +381,10 @@ void htt_tx_desc_free(htt_pdev_handle htt_pdev, void *htt_tx_desc);
  * @return success 0
  */
 int htt_tx_frag_alloc(htt_pdev_handle pdev,
-	u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr);
+	u_int16_t index, cdf_dma_addr_t *frag_paddr, void **frag_ptr);
 #else
 static inline int htt_tx_frag_alloc(htt_pdev_handle pdev,
-	u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr)
+	u_int16_t index, cdf_dma_addr_t *frag_paddr, void **frag_ptr)
 {
 	*frag_ptr = NULL;
 	return 0;
@@ -530,7 +530,7 @@ static inline
 void
 htt_tx_desc_init(htt_pdev_handle pdev,
 		 void *htt_tx_desc,
-		 uint32_t htt_tx_desc_paddr_lo,
+		 cdf_dma_addr_t htt_tx_desc_paddr,
 		 uint16_t msdu_id,
 		 cdf_nbuf_t msdu, struct htt_msdu_info_t *msdu_info,
 		 struct cdf_tso_info_t *tso_info,
@@ -681,8 +681,7 @@ htt_tx_desc_init(htt_pdev_handle pdev,
 	/* store a link to the HTT tx descriptor within the netbuf */
 	cdf_nbuf_frag_push_head(msdu, sizeof(struct htt_host_tx_desc_t),
 				(char *)htt_host_tx_desc, /* virtual addr */
-				htt_tx_desc_paddr_lo,
-				0 /* phys addr MSBs - n/a */);
+				htt_tx_desc_paddr);
 
 	/*
 	 * Indicate that the HTT header (and HTC header) is a meta-data
@@ -762,15 +761,14 @@ htt_tx_desc_num_frags(htt_pdev_handle pdev, void *desc, uint32_t num_frags)
 	 */
 #if defined(HELIUMPLUS_PADDR64)
 	if (HTT_WIFI_IP(pdev, 2, 0)) {
+		struct msdu_ext_frag_desc *fdesc;
+
 		/** Skip TSO related 4 dwords WIFI2.0*/
-		desc = (void *)&(((struct msdu_ext_desc_t *)desc)->frag_ptr0);
-		/* Frag ptr is 48 bit wide so clear the next dword as well */
-		*((uint32_t *)(((char *)desc) + (num_frags << 3))) = 0;
-		*((uint32_t *)
-		  (((char *)desc) + (num_frags << 3) + sizeof(uint32_t))) = 0;
-		/* TODO: OKA: remove the magic constants */
+		fdesc = (struct msdu_ext_frag_desc *)
+			&(((struct msdu_ext_desc_t *)desc)->frags[0]);
+		fdesc[num_frags].u.desc64 = 0;
 	} else {
-		/* XXXOKA -- Looks like a bug, called with htt_frag_desc */
+		/* This piece of code should never be executed on HELIUMPLUS */
 		*((u_int32_t *)
 		  (((char *) desc) + HTT_TX_DESC_LEN + num_frags * 8)) = 0;
 	}
@@ -809,54 +807,65 @@ static inline
 void
 htt_tx_desc_frag(htt_pdev_handle pdev,
 		 void *desc,
-		 int frag_num, uint32_t frag_phys_addr, uint16_t frag_len)
+		 int frag_num, cdf_dma_addr_t frag_phys_addr, uint16_t frag_len)
 {
-	u_int32_t *word;
-
+	uint32_t *word32;
 #if defined(HELIUMPLUS_PADDR64)
+	uint64_t  *word64;
+
 	if (HTT_WIFI_IP(pdev, 2, 0)) {
-		word = (u_int32_t *)(desc);
+		word32 = (u_int32_t *)(desc);
 		/* Initialize top 6 words of TSO flags per packet */
-		*word++ = 0;
-		*word++ = 0;
-		*word++ = 0;
+		*word32++ = 0;
+		*word32++ = 0;
+		*word32++ = 0;
 		if (((struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev))
 		    ->ip_tcp_udp_checksum_offload)
-			*word |= (IPV4_CSUM_EN | TCP_IPV4_CSUM_EN |
+			*word32 |= (IPV4_CSUM_EN | TCP_IPV4_CSUM_EN |
 					TCP_IPV6_CSUM_EN | UDP_IPV4_CSUM_EN |
 					UDP_IPV6_CSUM_EN);
 		else
-			*word = 0;
-		word++;
-		*word++ = 0;
-		*word++ = 0;
+			*word32 = 0;
+		word32++;
+		*word32++ = 0;
+		*word32++ = 0;
 
-		cdf_assert_always(word == &(((struct msdu_ext_desc_t *)
-					     desc)->frag_ptr0));
+		cdf_assert_always(word32 == (uint32_t *)
+				&(((struct msdu_ext_desc_t *)desc)->frags[0]));
 
 		/* Each fragment consumes 2 DWORDS */
-		word += (frag_num << 1);
-		*word = frag_phys_addr;
-
-		word++;
-		*word = (frag_len<<16);
-
+		word32 += (frag_num << 1);
+		word64 = (uint64_t *)word32;
+		*word64 = frag_phys_addr;
+		/* The frag_phys address is 37 bits. So, the higher 16 bits will be
+		   for len */
+		word32++;
+		*word32 &= 0x0000ffff;
+		*word32 |= (frag_len << 16);
 	} else {
 		/* For Helium+, this block cannot exist */
 		CDF_ASSERT(0);
 	}
 #else /* !defined(HELIUMPLUS_PADDR64) */
-	word = (uint32_t *) (((char *)desc) + HTT_TX_DESC_LEN + frag_num * 8);
-	*word = frag_phys_addr;
-	word++;
-	*word = frag_len;
+	{
+		uint64_t u64  = (uint64_t)frag_phys_addr;
+		uint32_t u32l = (u64 & 0xffffffff);
+		uint32_t u32h = (uint32_t)((u64 >> 32) & 0x1f);
+		uint64_t *word64;
+
+		word32 = (uint32_t *) (((char *)desc) + HTT_TX_DESC_LEN + frag_num * 8);
+		word64 = (uint64_t *)word32;
+		*word32 = u32l;
+		word32++;
+		*word32 = (u32h << 16) | frag_len;
+	}
 #endif /* defined(HELIUMPLUS_PADDR64) */
 }
 
 void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
 				 void *desc,
-				 uint32_t paddr,
-				 uint32_t frag_desc_paddr_lo,
+				 cdf_dma_addr_t paddr,
+				 cdf_dma_addr_t frag_desc_paddr,
 				 int reset);
 
 /**

+ 18 - 17
core/dp/txrx/ol_tx.c

@@ -288,8 +288,9 @@ cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
 		while (segments) {
 
 			if (msdu_info.tso_info.curr_seg)
-				NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
-					curr_seg->seg.tso_frags[0].paddr_low_32;
+				NBUF_CB_PADDR(msdu) =
+					msdu_info.tso_info.curr_seg->
+					seg.tso_frags[0].paddr_low_32;
 
 			segments--;
 
@@ -317,7 +318,7 @@ cdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
 					 msdu_info.tso_info.curr_seg->next;
 			}
 
-			cdf_nbuf_dec_num_frags(msdu);
+			cdf_nbuf_reset_num_frags(msdu);
 
 			if (msdu_info.tso_info.is_tso) {
 				TXRX_STATS_TSO_INC_SEG(vdev->pdev);
@@ -437,8 +438,9 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
 
 	num_frags = cdf_nbuf_get_num_frags(msdu);
 	/* num_frags are expected to be 2 max */
-	num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS) ?
-		CVG_NBUF_MAX_EXTRA_FRAGS : num_frags;
+	num_frags = (num_frags > NBUF_CB_TX_MAX_EXTRA_FRAGS)
+		? NBUF_CB_TX_MAX_EXTRA_FRAGS
+		: num_frags;
 #if defined(HELIUMPLUS_PADDR64)
 	/*
 	 * Use num_frags - 1, since 1 frag is used to store
@@ -459,17 +461,17 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
 	} else {
 		for (i = 1; i < num_frags; i++) {
 			cdf_size_t frag_len;
-			u_int32_t frag_paddr;
+			cdf_dma_addr_t frag_paddr;
 
 			frag_len = cdf_nbuf_get_frag_len(msdu, i);
-			frag_paddr = cdf_nbuf_get_frag_paddr_lo(msdu, i);
+			frag_paddr = cdf_nbuf_get_frag_paddr(msdu, i);
 #if defined(HELIUMPLUS_PADDR64)
 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
 					 i - 1, frag_paddr, frag_len);
 #if defined(HELIUMPLUS_DEBUG)
-			cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
+			cdf_print("%s:%d: htt_fdesc=%p frag=%d frag_paddr=0x%0llx len=%zu",
 				  __func__, __LINE__, tx_desc->htt_frag_desc,
-				  frag_paddr, frag_len);
+				  i-1, frag_paddr, frag_len);
 			dump_pkt(netbuf, frag_paddr, 64);
 #endif /* HELIUMPLUS_DEBUG */
 #else /* ! defined(HELIUMPLUSPADDR64) */
@@ -556,7 +558,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
 		while (segments) {
 
 			if (msdu_info.tso_info.curr_seg)
-				NBUF_MAPPED_PADDR_LO(msdu) = msdu_info.tso_info.
+				NBUF_CB_PADDR(msdu) = msdu_info.tso_info.
 					curr_seg->seg.tso_frags[0].paddr_low_32;
 
 			segments--;
@@ -617,7 +619,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, cdf_nbuf_t msdu_list)
 				}
 
 				if (msdu_info.tso_info.is_tso) {
-					cdf_nbuf_dec_num_frags(msdu);
+					cdf_nbuf_reset_num_frags(msdu);
 					TXRX_STATS_TSO_INC_SEG(vdev->pdev);
 					TXRX_STATS_TSO_INC_SEG_IDX(vdev->pdev);
 				}
@@ -1195,10 +1197,10 @@ void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
 
 	cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
 		 tx_desc, tx_desc->id);
-	cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
+	cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%llx",
 		 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
-	cdf_print("%s %d: Fragment Descriptor 0x%p\n",
-		 __func__, __LINE__, tx_desc->htt_frag_desc);
+	cdf_print("%s %d: Fragment Descriptor 0x%p (paddr=0x%llx)",
+		 __func__, __LINE__, tx_desc->htt_frag_desc, tx_desc->htt_frag_desc_paddr);
 
 	/* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
 	   is already de-referrable (=> in virtual address space) */
@@ -1308,7 +1310,7 @@ ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
 		htt_tx_desc_frags_table_set(
 			pdev->htt_pdev,
 			tx_desc->htt_tx_desc,
-			cdf_nbuf_get_frag_paddr_lo(tx_mgmt_frm, 1),
+			cdf_nbuf_get_frag_paddr(tx_mgmt_frm, 1),
 			0, 0);
 #if defined(HELIUMPLUS_PADDR64) && defined(HELIUMPLUS_DEBUG)
 		dump_frag_desc(
@@ -1326,8 +1328,7 @@ ol_txrx_mgmt_send(ol_txrx_vdev_handle vdev,
 	tx_desc->pkt_type = type + OL_TXRX_MGMT_TYPE_BASE;
 
 	htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
-	NBUF_SET_PACKET_TRACK(tx_desc->netbuf, NBUF_TX_PKT_MGMT_TRACK);
-	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
+	NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) = NBUF_TX_PKT_MGMT_TRACK;	ol_tx_send_nonstd(pdev, tx_desc, tx_mgmt_frm,
 			  htt_pkt_type_mgmt);
 
 	return 0;               /* accepted the tx mgmt frame */

+ 18 - 15
core/dp/txrx/ol_tx_desc.c

@@ -307,11 +307,11 @@ extern void
 dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc);
 
 void
-dump_pkt(cdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
+dump_pkt(cdf_nbuf_t nbuf, cdf_dma_addr_t nbuf_paddr, int len)
 {
-	cdf_print("%s: Pkt: VA 0x%p PA 0x%x len %d\n", __func__,
+	cdf_print("%s: Pkt: VA 0x%p PA 0x%llx len %d\n", __func__,
 		  cdf_nbuf_data(nbuf), nbuf_paddr, len);
-	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_NONE, 16, 4,
+	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_ADDRESS, 16, 4,
 		       cdf_nbuf_data(nbuf), len, true);
 }
 
@@ -381,8 +381,8 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 	 */
 	num_frags = cdf_nbuf_get_num_frags(netbuf);
 	/* num_frags are expected to be 2 max */
-	num_frags = (num_frags > CVG_NBUF_MAX_EXTRA_FRAGS)
-		? CVG_NBUF_MAX_EXTRA_FRAGS
+	num_frags = (num_frags > NBUF_CB_TX_MAX_EXTRA_FRAGS)
+		? NBUF_CB_TX_MAX_EXTRA_FRAGS
 		: num_frags;
 #if defined(HELIUMPLUS_PADDR64)
 	/*
@@ -405,17 +405,20 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 	} else {
 		for (i = 1; i < num_frags; i++) {
 			cdf_size_t frag_len;
-			uint32_t frag_paddr;
-
+			cdf_dma_addr_t frag_paddr;
+#ifdef HELIUMPLUS_DEBUG
+			void *frag_vaddr;
+			frag_vaddr = cdf_nbuf_get_frag_vaddr(netbuf, i);
+#endif
 			frag_len = cdf_nbuf_get_frag_len(netbuf, i);
-			frag_paddr = cdf_nbuf_get_frag_paddr_lo(netbuf, i);
+			frag_paddr = cdf_nbuf_get_frag_paddr(netbuf, i);
 #if defined(HELIUMPLUS_PADDR64)
 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc, i - 1,
 				 frag_paddr, frag_len);
 #if defined(HELIUMPLUS_DEBUG)
-			cdf_print("%s:%d: htt_fdesc=%p frag_paddr=%u len=%zu\n",
-					  __func__, __LINE__, tx_desc->htt_frag_desc,
-					  frag_paddr, frag_len);
+			cdf_print("%s:%d: htt_fdesc=%p frag=%d frag_vaddr=0x%p frag_paddr=0x%llx len=%zu\n",
+				  __func__, __LINE__, tx_desc->htt_frag_desc,
+				  i-1, frag_vaddr, frag_paddr, frag_len);
 			dump_pkt(netbuf, frag_paddr, 64);
 #endif /* HELIUMPLUS_DEBUG */
 #else /* ! defined(HELIUMPLUSPADDR64) */
@@ -485,10 +488,10 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 	/* check the frame type to see what kind of special steps are needed */
 	if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
 		   (tx_desc->pkt_type != 0xff)) {
-		uint32_t frag_desc_paddr_lo = 0;
+		cdf_dma_addr_t frag_desc_paddr = 0;
 
 #if defined(HELIUMPLUS_PADDR64)
-		frag_desc_paddr_lo = tx_desc->htt_frag_desc_paddr;
+		frag_desc_paddr = tx_desc->htt_frag_desc_paddr;
 		/* FIX THIS -
 		 * The FW currently has trouble using the host's fragments
 		 * table for management frames.  Until this is fixed,
@@ -501,12 +504,12 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 #if defined(HELIUMPLUS_DEBUG)
 		cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
 			  __func__, __LINE__, tx_desc->id,
-			  frag_desc_paddr_lo);
+			  frag_desc_paddr);
 #endif /* HELIUMPLUS_DEBUG */
 #endif /* HELIUMPLUS_PADDR64 */
 		htt_tx_desc_frags_table_set(pdev->htt_pdev,
 					    tx_desc->htt_tx_desc, 0,
-					    frag_desc_paddr_lo, 1);
+					    frag_desc_paddr, 1);
 
 		mgmt_type = tx_desc->pkt_type - OL_TXRX_MGMT_TYPE_BASE;
 		/*

+ 9 - 9
core/dp/txrx/ol_txrx.c

@@ -588,8 +588,8 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 	for (i = 0; i < desc_pool_size; i++) {
 		void *htt_tx_desc;
 		void *htt_frag_desc = NULL;
-		uint32_t frag_paddr_lo = 0;
-		uint32_t paddr_lo;
+		cdf_dma_addr_t frag_paddr = 0;
+		cdf_dma_addr_t paddr;
 
 		if (i == (desc_pool_size - 1))
 			c_element->next = NULL;
@@ -597,7 +597,7 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 			c_element->next = (union ol_tx_desc_list_elem_t *)
 				ol_tx_desc_find(pdev, i + 1);
 
-		htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr_lo, i);
+		htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
 		if (!htt_tx_desc) {
 			CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
 				  "%s: failed to alloc HTT tx desc (%d of %d)",
@@ -607,9 +607,9 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 		}
 
 		c_element->tx_desc.htt_tx_desc = htt_tx_desc;
-		c_element->tx_desc.htt_tx_desc_paddr = paddr_lo;
+		c_element->tx_desc.htt_tx_desc_paddr = paddr;
 		ret = htt_tx_frag_alloc(pdev->htt_pdev,
-			i, &frag_paddr_lo, &htt_frag_desc);
+					i, &frag_paddr, &htt_frag_desc);
 		if (ret) {
 			CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
 				"%s: failed to alloc HTT frag dsc (%d/%d)",
@@ -623,10 +623,10 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 			   of the frag descriptor */
 			memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
 			c_element->tx_desc.htt_frag_desc = htt_frag_desc;
-			c_element->tx_desc.htt_frag_desc_paddr = frag_paddr_lo;
+			c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
 		}
 		CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
-			"%s:%d - %d FRAG VA 0x%p FRAG PA 0x%x",
+			"%s:%d - %d FRAG VA 0x%p FRAG PA 0x%llx",
 			__func__, __LINE__, i,
 			c_element->tx_desc.htt_frag_desc,
 			c_element->tx_desc.htt_frag_desc_paddr);
@@ -1814,7 +1814,7 @@ void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
 	/* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
 
 	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
-		   "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+		   "%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
 		   __func__, peer,
 		   peer->mac_addr.raw[0], peer->mac_addr.raw[1],
 		   peer->mac_addr.raw[2], peer->mac_addr.raw[3],
@@ -1851,7 +1851,7 @@ ol_txrx_peer_find_by_addr(struct ol_txrx_pdev_t *pdev, uint8_t *peer_mac_addr)
 	peer = ol_txrx_peer_find_hash_find(pdev, peer_mac_addr, 0, 0);
 	if (peer) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
-			   "%s: Delete extra reference %p\n", __func__, peer);
+			   "%s: Delete extra reference %p", __func__, peer);
 		/* release the extra reference */
 		ol_txrx_peer_unref_delete(peer);
 	}

+ 4 - 4
core/dp/txrx/ol_txrx_types.h

@@ -69,8 +69,8 @@
 #define OL_TX_NUM_TIDS    18
 #define OL_RX_MCAST_TID   18  /* Mcast TID only between f/w & host */
 
-#define OL_TX_VDEV_MCAST_BCAST    0 // HTT_TX_EXT_TID_MCAST_BCAST
-#define OL_TX_VDEV_DEFAULT_MGMT   1 // HTT_TX_EXT_TID_DEFALT_MGMT
+#define OL_TX_VDEV_MCAST_BCAST    0 /* HTT_TX_EXT_TID_MCAST_BCAST */
+#define OL_TX_VDEV_DEFAULT_MGMT   1 /* HTT_TX_EXT_TID_DEFALT_MGMT */
 #define OL_TX_VDEV_NUM_QUEUES     2
 
 #define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
@@ -130,9 +130,9 @@ struct ol_tx_desc_t {
 	cdf_nbuf_t netbuf;
 	void *htt_tx_desc;
 	uint16_t id;
-	uint32_t htt_tx_desc_paddr;
+	cdf_dma_addr_t htt_tx_desc_paddr;
 	void *htt_frag_desc; /* struct msdu_ext_desc_t * */
-	uint32_t htt_frag_desc_paddr;
+	cdf_dma_addr_t htt_frag_desc_paddr;
 	cdf_atomic_t ref_cnt;
 	enum htt_tx_status status;
 

+ 14 - 1
core/hdd/inc/wlan_hdd_ipa.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -39,6 +39,19 @@
 
 #ifdef IPA_OFFLOAD
 /* Include files */
+#include <wlan_hdd_assoc.h> /* hdd_context_t */
+
+/**
+ * FIXME: Temporary hack - until IPA functionality gets restored
+ *
+ */
+typedef void (*hdd_ipa_nbuf_cb_fn)(cdf_nbuf_t);
+void hdd_ipa_nbuf_cb(cdf_nbuf_t skb);  /* Fwd declare */
+static inline hdd_ipa_nbuf_cb_fn wlan_hdd_stub_ipa_fn(void)
+{
+	return hdd_ipa_nbuf_cb;
+};
+
 CDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx);
 CDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx);
 CDF_STATUS hdd_ipa_process_rxt(void *cds_context, cdf_nbuf_t rxBuf,

+ 38 - 10
core/hdd/src/wlan_hdd_ipa.c

@@ -469,6 +469,30 @@ struct hdd_ipa_priv {
 	cdf_dma_addr_t rx_ready_doorbell_paddr;
 };
 
+/**
+ * FIXME: The following conversion routines will are just stubs.
+ *        They will be implemented fully by another update.
+ *        The stubs will let the compile go ahead, and functionality
+ *        is broken.
+ * This should be OK and IPA is not enabled yet
+ */
+void *wlan_hdd_stub_priv_to_addr(uint32_t priv)
+{
+	void    *vaddr;
+	uint32_t ipa_priv = priv;
+
+	vaddr = &ipa_priv; /* just to use the var */
+	vaddr = NULL;
+	return vaddr;
+}
+
+uint32_t wlan_hdd_stub_addr_to_priv(void *ptr)
+{
+	uint32_t       ipa_priv = 0;
+
+	BUG_ON(ptr == NULL);
+	return ipa_priv;
+}
 #define HDD_IPA_WLAN_CLD_HDR_LEN        sizeof(struct hdd_ipa_cld_hdr)
 #define HDD_IPA_UC_WLAN_CLD_HDR_LEN     0
 #define HDD_IPA_WLAN_TX_HDR_LEN         sizeof(struct hdd_ipa_tx_hdr)
@@ -2650,12 +2674,13 @@ static void hdd_ipa_w2i_cb(void *priv, enum ipa_dp_evt_type evt,
  *
  * Return: None
  */
-static void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
+void hdd_ipa_nbuf_cb(cdf_nbuf_t skb)
 {
 	struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
 
-	HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%lx", NBUF_OWNER_PRIV_DATA(skb));
-	ipa_free_skb((struct ipa_rx_data *)NBUF_OWNER_PRIV_DATA(skb));
+	HDD_IPA_LOG(CDF_TRACE_LEVEL_DEBUG, "%p", wlan_hdd_stub_priv_to_addr(NBUF_CB_TX_IPA_PRIV(skb)));
+	/* FIXME: This is broken; PRIV_DATA is now 31 bits */
+	ipa_free_skb((struct ipa_rx_data *)wlan_hdd_stub_priv_to_addr(NBUF_CB_TX_IPA_PRIV(skb)));
 
 	hdd_ipa->stats.num_tx_comp_cnt++;
 
@@ -2711,18 +2736,21 @@ static void hdd_ipa_send_pkt_to_tl(
 	skb = ipa_tx_desc->skb;
 
 	cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
-	NBUF_OWNER_ID(skb) = IPA_NBUF_OWNER_ID;
-	NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb;
+	cdf_nbuf_ipa_owned_set(skb);
+	/* FIXME: This is broken. No such field in cb any more:
+	   NBUF_CALLBACK_FN(skb) = hdd_ipa_nbuf_cb; */
 	if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx)) {
-		NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr
-			+ HDD_IPA_WLAN_FRAG_HEADER
-			+ HDD_IPA_WLAN_IPA_HEADER;
+		cdf_nbuf_mapped_paddr_set(skb,
+					  ipa_tx_desc->dma_addr
+					  + HDD_IPA_WLAN_FRAG_HEADER
+					  + HDD_IPA_WLAN_IPA_HEADER);
 		ipa_tx_desc->skb->len -=
 			HDD_IPA_WLAN_FRAG_HEADER + HDD_IPA_WLAN_IPA_HEADER;
 	} else
-		NBUF_MAPPED_PADDR_LO(skb) = ipa_tx_desc->dma_addr;
+		cdf_nbuf_mapped_paddr_set(skb, ipa_tx_desc->dma_addr);
 
-	NBUF_OWNER_PRIV_DATA(skb) = (unsigned long)ipa_tx_desc;
+	/* FIXME: This is broken: priv_data is 31 bits */
+	cdf_nbuf_ipa_priv_set(skb, wlan_hdd_stub_addr_to_priv(ipa_tx_desc));
 
 	adapter->stats.tx_bytes += ipa_tx_desc->skb->len;
 

+ 10 - 10
core/hdd/src/wlan_hdd_lro.c

@@ -67,14 +67,14 @@
 static int hdd_lro_get_skb_header(struct sk_buff *skb, void **ip_hdr,
 	void **tcpudp_hdr, u64 *hdr_flags, void *priv)
 {
-	if (NBUF_IPV6_PROTO(skb)) {
+	if (NBUF_CB_RX_IPV6_PROTO(skb)) {
 		hdr_flags = 0;
 		return -EINVAL;
 	}
 
 	*hdr_flags |= (LRO_IPV4 | LRO_TCP);
 	(*ip_hdr) = skb->data;
-	(*tcpudp_hdr) = skb->data + NBUF_TCP_OFFSET(skb);
+	(*tcpudp_hdr) = skb->data + NBUF_CB_RX_TCP_OFFSET(skb);
 	return 0;
 }
 
@@ -215,7 +215,7 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 	struct hdd_lro_desc_info *desc_info = &adapter->lro_info.lro_desc_info;
 
 	*lro_desc = NULL;
-	i = NBUF_FLOW_ID_TOEPLITZ(skb) & LRO_DESC_TABLE_SZ_MASK;
+	i = NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) & LRO_DESC_TABLE_SZ_MASK;
 
 	lro_hash_table = &desc_info->lro_hash_table[i];
 
@@ -321,7 +321,7 @@ static bool hdd_lro_eligible(hdd_adapter_t *adapter, struct sk_buff *skb,
 {
 	struct net_lro_desc *lro_desc = NULL;
 	int hw_lro_eligible =
-		 NBUF_LRO_ELIGIBLE(skb) && (!NBUF_TCP_PURE_ACK(skb));
+		 NBUF_CB_RX_LRO_ELIGIBLE(skb) && (!NBUF_CB_RX_TCP_PURE_ACK(skb));
 
 	if (!hw_lro_eligible)
 		return false;
@@ -624,12 +624,12 @@ enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 	enum hdd_lro_rx_status status = HDD_LRO_NO_RX;
 
 	if ((adapter->dev->features & NETIF_F_LRO) &&
-		 NBUF_TCP_PROTO(skb)) {
+		 NBUF_CB_RX_TCP_PROTO(skb)) {
 		struct iphdr *iph;
 		struct tcphdr *tcph;
 		struct net_lro_desc *lro_desc = NULL;
 		iph = (struct iphdr *)skb->data;
-		tcph = (struct tcphdr *)(skb->data + NBUF_TCP_OFFSET(skb));
+		tcph = (struct tcphdr *)(skb->data + NBUF_CB_RX_TCP_OFFSET(skb));
 		if (hdd_lro_eligible(adapter, skb, iph, tcph, &lro_desc)) {
 			struct net_lro_info hdd_lro_info;
 
@@ -637,11 +637,11 @@ enum hdd_lro_rx_status hdd_lro_rx(hdd_context_t *hdd_ctx,
 
 			hdd_lro_info.lro_desc = lro_desc;
 			hdd_lro_info.lro_eligible = 1;
-			hdd_lro_info.tcp_ack_num = NBUF_TCP_ACK_NUM(skb);
+			hdd_lro_info.tcp_ack_num = NBUF_CB_RX_TCP_ACK_NUM(skb);
 			hdd_lro_info.tcp_data_csum =
-				 csum_unfold(htons(NBUF_TCP_CHKSUM(skb)));
-			hdd_lro_info.tcp_seq_num = NBUF_TCP_SEQ_NUM(skb);
-			hdd_lro_info.tcp_win = NBUF_TCP_WIN(skb);
+				 csum_unfold(htons(NBUF_CB_RX_TCP_CHKSUM(skb)));
+			hdd_lro_info.tcp_seq_num = NBUF_CB_RX_TCP_SEQ_NUM(skb);
+			hdd_lro_info.tcp_win = NBUF_CB_RX_TCP_WIN(skb);
 
 			lro_receive_skb_ext(adapter->lro_info.lro_mgr, skb,
 				 (void *)adapter, &hdd_lro_info);

+ 10 - 10
core/hdd/src/wlan_hdd_napi.c

@@ -56,7 +56,7 @@ struct qca_napi_data *hdd_napi_get_all(void)
 	struct qca_napi_data *rp = NULL;
 	struct hif_opaque_softc *hif;
 
-	NAPI_DEBUG("-->\n");
+	NAPI_DEBUG("-->");
 
 	hif = cds_get_context(CDF_MODULE_ID_HIF);
 	if (unlikely(NULL == hif))
@@ -64,7 +64,7 @@ struct qca_napi_data *hdd_napi_get_all(void)
 	else
 		rp = hif_napi_get_all(hif);
 
-	NAPI_DEBUG("<-- [addr=%p]\n", rp);
+	NAPI_DEBUG("<-- [addr=%p]", rp);
 	return rp;
 }
 
@@ -78,14 +78,14 @@ static uint32_t hdd_napi_get_map(void)
 {
 	uint32_t map = 0;
 
-	NAPI_DEBUG("-->\n");
+	NAPI_DEBUG("-->");
 	/* cache once, use forever */
 	if (hdd_napi_ctx == NULL)
 		hdd_napi_ctx = hdd_napi_get_all();
 	if (hdd_napi_ctx != NULL)
 		map = hdd_napi_ctx->ce_map;
 
-	NAPI_DEBUG("<--[map=0x%08x]\n", map);
+	NAPI_DEBUG("<-- [map=0x%08x]", map);
 	return map;
 }
 
@@ -108,7 +108,7 @@ int hdd_napi_create(void)
 	int     ul_polled, dl_polled;
 	int     rc = 0;
 
-	NAPI_DEBUG("-->\n");
+	NAPI_DEBUG("-->");
 
 	hif_ctx = cds_get_context(CDF_MODULE_ID_HIF);
 	if (unlikely(NULL == hif_ctx)) {
@@ -140,7 +140,7 @@ int hdd_napi_create(void)
 			}
 		}
 	}
-	NAPI_DEBUG("<-- [rc=%d]\n", rc);
+	NAPI_DEBUG("<-- [rc=%d]", rc);
 
 	return rc;
 }
@@ -162,7 +162,7 @@ int hdd_napi_destroy(int force)
 	int i;
 	uint32_t hdd_napi_map = hdd_napi_get_map();
 
-	NAPI_DEBUG("--> (force=%d)\n", force);
+	NAPI_DEBUG("--> (force=%d)", force);
 	if (hdd_napi_map) {
 		struct hif_opaque_softc *hif_ctx;
 
@@ -193,7 +193,7 @@ int hdd_napi_destroy(int force)
 	if (0 == hdd_napi_map)
 		hdd_napi_ctx = NULL;
 
-	NAPI_DEBUG("<-- [rc=%d]\n", rc);
+	NAPI_DEBUG("<-- [rc=%d]", rc);
 	return rc;
 }
 
@@ -241,7 +241,7 @@ int hdd_napi_event(enum qca_napi_event event, void *data)
 	int rc = -EFAULT;  /* assume err */
 	struct hif_opaque_softc *hif;
 
-	NAPI_DEBUG("-->(event=%d, aux=%p)\n", event, data);
+	NAPI_DEBUG("-->(event=%d, aux=%p)", event, data);
 
 	hif = cds_get_context(CDF_MODULE_ID_HIF);
 	if (unlikely(NULL == hif))
@@ -249,7 +249,7 @@ int hdd_napi_event(enum qca_napi_event event, void *data)
 	else
 		rc = hif_napi_event(hif, event, data);
 
-	NAPI_DEBUG("<--[rc=%d]\n", rc);
+	NAPI_DEBUG("<--[rc=%d]", rc);
 	return rc;
 }
 

+ 2 - 2
core/hdd/src/wlan_hdd_softap_tx_rx.c

@@ -258,7 +258,7 @@ int hdd_softap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	++pAdapter->hdd_stats.hddTxRxStats.txXmitClassifiedAC[ac];
 
 #if defined (IPA_OFFLOAD)
-	if (!(NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID)) {
+	if (!cdf_nbuf_ipa_owned_get(skb)) {
 #endif
 		/* Check if the buffer has enough header room */
 		skb = skb_unshare(skb, GFP_ATOMIC);
@@ -299,7 +299,7 @@ int hdd_softap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	/* Zero out skb's context buffer for the driver to use */
 	cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
-	NBUF_SET_PACKET_TRACK(skb, NBUF_TX_PKT_DATA_TRACK);
+	NBUF_CB_TX_PACKET_TRACK(skb) = NBUF_TX_PKT_DATA_TRACK;
 	NBUF_UPDATE_TX_PKT_COUNT(skb, NBUF_TX_PKT_HDD);
 
 	cdf_dp_trace_set_track(skb);

+ 2 - 2
core/hdd/src/wlan_hdd_tx_rx.c

@@ -368,7 +368,7 @@ int hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	/* Get TL AC corresponding to Qdisc queue index/AC. */
 	ac = hdd_qdisc_ac_to_tl_ac[skb->queue_mapping];
 
-	if (!(NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID)) {
+	if (!cdf_nbuf_ipa_owned_get(skb)) {
 		/* Check if the buffer has enough header room */
 		skb = skb_unshare(skb, GFP_ATOMIC);
 		if (!skb)
@@ -476,7 +476,7 @@ int hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 	/* Zero out skb's context buffer for the driver to use */
 	cdf_mem_set(skb->cb, sizeof(skb->cb), 0);
-	NBUF_SET_PACKET_TRACK(skb, NBUF_TX_PKT_DATA_TRACK);
+	NBUF_CB_TX_PACKET_TRACK(skb) = NBUF_TX_PKT_DATA_TRACK;
 	NBUF_UPDATE_TX_PKT_COUNT(skb, NBUF_TX_PKT_HDD);
 
 	cdf_dp_trace_set_track(skb);

+ 1 - 1
core/wma/src/wma_data.c

@@ -2394,7 +2394,7 @@ mgmt_wmi_unified_cmd_send(tp_wma_handle wma_handle, void *tx_frame,
 	bufp += WMI_TLV_HDR_SIZE;
 	cdf_mem_copy(bufp, pData, bufp_len);
 	cdf_nbuf_map_single(cdf_ctx, tx_frame, CDF_DMA_TO_DEVICE);
-	dma_addr = cdf_nbuf_get_frag_paddr_lo(tx_frame, 0);
+	dma_addr = cdf_nbuf_get_frag_paddr(tx_frame, 0);
 	cmd->paddr_lo = (uint32_t)(dma_addr & 0xffffffff);
 #if defined(HELIUMPLUS_PADDR64)
 	cmd->paddr_hi = (uint32_t)((dma_addr >> 32) & 0x1F);

+ 1 - 1
core/wma/src/wma_mgmt.c

@@ -220,7 +220,7 @@ static void wma_send_bcn_buf_ll(tp_wma_handle wma,
 	cmd->vdev_id = vdev_id;
 	cmd->data_len = bcn->len;
 	cmd->frame_ctrl = *((A_UINT16 *) wh->i_fc);
-	cmd->frag_ptr = cdf_nbuf_get_frag_paddr_lo(bcn->buf, 0);
+	cmd->frag_ptr = cdf_nbuf_get_frag_paddr(bcn->buf, 0);
 
 	/* notify Firmware of DTM and mcast/bcast traffic */
 	if (tim_ie->dtim_count == 0) {