Browse Source

Merge "qcacmn: Move the lock initialization to module open for policy manager"

Linux Build Service Account 8 years ago
parent
commit
467fede484

+ 133 - 1
dp/wifi3.0/dp_rx.h

@@ -80,6 +80,114 @@ struct dp_rx_desc {
 	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
 			RX_DESC_COOKIE_INDEX_SHIFT)
 
+/*
+ *dp_rx_xor_block() - xor block of data
+ *@b: destination data block
+ *@a: source data block
+ *@len: length of the data to process
+ *
+ *Returns: None
+ */
+static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len)
+{
+	qdf_size_t i;
+
+	for (i = 0; i < len; i++)
+		b[i] ^= a[i];
+}
+
+/*
+ *dp_rx_rotl() - rotate the bits left
+ *@val: unsigned integer input value
+ *@bits: number of bits
+ *
+ *Returns: Integer with left rotated by number of 'bits'
+ */
+static inline uint32_t dp_rx_rotl(uint32_t val, int bits)
+{
+	return (val << bits) | (val >> (32 - bits));
+}
+
+/*
+ *dp_rx_rotr() - rotate the bits right
+ *@val: unsigned integer input value
+ *@bits: number of bits
+ *
+ *Returns: Integer with right rotated by number of 'bits'
+ */
+static inline uint32_t dp_rx_rotr(uint32_t val, int bits)
+{
+	return (val >> bits) | (val << (32 - bits));
+}
+
+/*
+ *dp_rx_xswap() - swap the bits left
+ *@val: unsigned integer input value
+ *
+ *Returns: Integer with bits swapped
+ */
+static inline uint32_t dp_rx_xswap(uint32_t val)
+{
+	return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8);
+}
+
+/*
+ *dp_rx_get_le32_split() - get little endian 32 bits split
+ *@b0: byte 0
+ *@b1: byte 1
+ *@b2: byte 2
+ *@b3: byte 3
+ *
+ *Returns: Integer with split little endian 32 bits
+ */
+static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2,
+					uint8_t b3)
+{
+	return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24);
+}
+
+/*
+ *dp_rx_get_le32() - get little endian 32 bits
+ *@b0: byte 0
+ *@b1: byte 1
+ *@b2: byte 2
+ *@b3: byte 3
+ *
+ *Returns: Integer with little endian 32 bits
+ */
+static inline uint32_t dp_rx_get_le32(const uint8_t *p)
+{
+	return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]);
+}
+
+/*
+ * dp_rx_put_le32() - put little endian 32 bits
+ * @p: destination char array
+ * @v: source 32-bit integer
+ *
+ * Returns: None
+ */
+static inline void dp_rx_put_le32(uint8_t *p, uint32_t v)
+{
+	p[0] = (v) & 0xff;
+	p[1] = (v >> 8) & 0xff;
+	p[2] = (v >> 16) & 0xff;
+	p[3] = (v >> 24) & 0xff;
+}
+
+/* Extract michal mic block of data */
+#define dp_rx_michael_block(l, r)	\
+	do {					\
+		r ^= dp_rx_rotl(l, 17);	\
+		l += r;				\
+		r ^= dp_rx_xswap(l);		\
+		l += r;				\
+		r ^= dp_rx_rotl(l, 3);	\
+		l += r;				\
+		r ^= dp_rx_rotr(l, 2);	\
+		l += r;				\
+	} while (0)
+
 /**
  * struct dp_rx_desc_list_elem_t
  *
@@ -340,6 +448,7 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
 	return QDF_STATUS_E_FAILURE;
 }
 #endif
+
 /**
  * dp_rx_cookie_2_link_desc_va() - Converts cookie to a virtual address of
  *				   the MSDU Link Descriptor
@@ -392,10 +501,33 @@ void *dp_rx_cookie_2_mon_link_desc_va(struct dp_pdev *pdev,
 	link_desc_va = pdev->link_desc_banks[buf_info->sw_cookie].base_vaddr +
 		(buf_info->paddr -
 			pdev->link_desc_banks[buf_info->sw_cookie].base_paddr);
-
 	return link_desc_va;
 }
 
+/**
+ * dp_rx_defrag_concat() - Concatenate the fragments
+ *
+ * @dst: destination pointer to the buffer
+ * @src: source pointer from where the fragment payload is to be copied
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS dp_rx_defrag_concat(qdf_nbuf_t dst, qdf_nbuf_t src)
+{
+	/*
+	 * Inside qdf_nbuf_cat, if it is necessary to reallocate dst
+	 * to provide space for src, the headroom portion is copied from
+	 * the original dst buffer to the larger new dst buffer.
+	 * (This is needed, because the headroom of the dst buffer
+	 * contains the rx desc.)
+	 */
+	if (qdf_nbuf_cat(dst, src))
+		return QDF_STATUS_E_DEFRAG_ERROR;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+
 /*
  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  *			       called during dp rx initialization

+ 1365 - 0
dp/wifi3.0/dp_rx_defrag.c

@@ -0,0 +1,1365 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "dp_rx.h"
+#include "dp_peer.h"
+#include "hal_api.h"
+#include "qdf_trace.h"
+#include "qdf_nbuf.h"
+#include "dp_rx_defrag.h"
+#include <enet.h>	/* LLC_SNAP_HDR_LEN */
+#include "dp_rx_defrag.h"
+
+const struct dp_rx_defrag_cipher dp_f_ccmp = {
+	"AES-CCM",
+	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
+	IEEE80211_WEP_MICLEN,
+	0,
+};
+
+const struct dp_rx_defrag_cipher dp_f_tkip = {
+	"TKIP",
+	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_EXTIVLEN,
+	IEEE80211_WEP_CRCLEN,
+	IEEE80211_WEP_MICLEN,
+};
+
+const struct dp_rx_defrag_cipher dp_f_wep = {
+	"WEP",
+	IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN,
+	IEEE80211_WEP_CRCLEN,
+	0,
+};
+
+/*
+ * dp_rx_defrag_frames_free(): Free fragment chain
+ * @frames: Fragment chain
+ *
+ * Iterates through the fragment chain and frees them
+ * Returns: None
+ */
+static void dp_rx_defrag_frames_free(qdf_nbuf_t frames)
+{
+	qdf_nbuf_t next, frag = frames;
+
+	while (frag) {
+		next = qdf_nbuf_next(frag);
+		qdf_nbuf_free(frag);
+		frag = next;
+	}
+}
+
+/*
+ * dp_rx_clear_saved_desc_info(): Clears descriptor info
+ * @peer: Pointer to the peer data structure
+ * @tid: Transmit ID (TID)
+ *
+ * Saves MPDU descriptor info and MSDU link pointer from REO
+ * ring descriptor. The cache is created per peer, per TID
+ *
+ * Returns: None
+ */
+static void dp_rx_clear_saved_desc_info(struct dp_peer *peer, unsigned tid)
+{
+	hal_rx_clear_mpdu_desc_info(
+		&peer->rx_tid[tid].transcap_rx_mpdu_desc_info);
+
+	hal_rx_clear_msdu_link_ptr(
+		&peer->rx_tid[tid].transcap_msdu_link_ptr[0],
+		HAL_RX_MAX_SAVED_RING_DESC);
+}
+
+/*
+ * dp_rx_defrag_waitlist_add(): Update per-PDEV defrag wait list
+ * @peer: Pointer to the peer data structure
+ * @tid: Transmit ID (TID)
+ *
+ * Appends per-tid fragments to global fragment wait list
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_waitlist_add(struct dp_peer *peer, unsigned tid)
+{
+	struct dp_soc *psoc = peer->vdev->pdev->soc;
+	struct dp_rx_tid *rx_reorder = &peer->rx_tid[tid];
+
+	/* TODO: use LIST macros instead of TAIL macros */
+	TAILQ_INSERT_TAIL(&psoc->rx.defrag.waitlist, rx_reorder,
+				defrag_waitlist_elem);
+}
+
+/*
+ * dp_rx_defrag_waitlist_remove(): Remove fragments from waitlist
+ * @peer: Pointer to the peer data structure
+ * @tid: Transmit ID (TID)
+ *
+ * Remove fragments from waitlist
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_waitlist_remove(struct dp_peer *peer, unsigned tid)
+{
+	struct dp_pdev *pdev = peer->vdev->pdev;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_rx_tid *rx_reorder;
+
+	if (tid > DP_MAX_TIDS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"TID out of bounds: %d", tid);
+		qdf_assert(0);
+		return;
+	}
+
+	rx_reorder = &peer->rx_tid[tid];
+
+	if (rx_reorder->defrag_waitlist_elem.tqe_next != NULL) {
+
+		TAILQ_REMOVE(&soc->rx.defrag.waitlist, rx_reorder,
+				defrag_waitlist_elem);
+		rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
+		rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
+	} else if (rx_reorder->defrag_waitlist_elem.tqe_prev == NULL) {
+
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"waitlist->tqe_prev is NULL");
+		rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
+		qdf_assert(0);
+	}
+}
+
+/*
+ * dp_rx_defrag_fraglist_insert(): Create a per-sequence fragment list
+ * @peer: Pointer to the peer data structure
+ * @tid: Transmit ID (TID)
+ * @head_addr: Pointer to head list
+ * @tail_addr: Pointer to tail list
+ * @frag: Incoming fragment
+ * @all_frag_present: Flag to indicate whether all fragments are received
+ *
+ * Build a per-tid, per-sequence fragment list.
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_fraglist_insert(struct dp_peer *peer, unsigned tid,
+	qdf_nbuf_t *head_addr, qdf_nbuf_t *tail_addr, qdf_nbuf_t frag,
+	uint8_t *all_frag_present)
+{
+	qdf_nbuf_t next;
+	qdf_nbuf_t prev = NULL;
+	qdf_nbuf_t cur;
+	uint16_t head_fragno, cur_fragno, next_fragno;
+	uint8_t last_morefrag = 1, count = 0;
+	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+	uint8_t *rx_desc_info;
+
+	qdf_assert(frag);
+	qdf_assert(head_addr);
+	qdf_assert(tail_addr);
+
+	rx_desc_info = qdf_nbuf_data(frag);
+	cur_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
+
+	/* If this is the first fragment */
+	if (!(*head_addr)) {
+		*head_addr = *tail_addr = frag;
+		qdf_nbuf_set_next(*tail_addr, NULL);
+		rx_tid->curr_frag_num = cur_fragno;
+
+		goto end;
+	}
+
+	/* In sequence fragment */
+	if (cur_fragno > rx_tid->curr_frag_num) {
+		qdf_nbuf_set_next(*tail_addr, frag);
+		*tail_addr = frag;
+		qdf_nbuf_set_next(*tail_addr, NULL);
+		rx_tid->curr_frag_num = cur_fragno;
+	} else {
+		/* Out of sequence fragment */
+		cur = *head_addr;
+		rx_desc_info = qdf_nbuf_data(cur);
+		head_fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
+
+		if (cur_fragno == head_fragno) {
+			qdf_nbuf_free(frag);
+			*all_frag_present = 0;
+		} else if (head_fragno > cur_fragno) {
+			qdf_nbuf_set_next(frag, cur);
+			cur = frag;
+			*head_addr = frag; /* head pointer to be updated */
+		} else {
+			while ((cur_fragno > head_fragno) && cur != NULL) {
+				prev = cur;
+				cur = qdf_nbuf_next(cur);
+				rx_desc_info = qdf_nbuf_data(cur);
+				head_fragno =
+					dp_rx_frag_get_mpdu_frag_number(
+								rx_desc_info);
+			}
+			qdf_nbuf_set_next(prev, frag);
+			qdf_nbuf_set_next(frag, cur);
+		}
+	}
+
+	next = qdf_nbuf_next(*head_addr);
+
+	rx_desc_info = qdf_nbuf_data(*tail_addr);
+	last_morefrag = hal_rx_get_rx_more_frag_bit(rx_desc_info);
+
+	/* TODO: optimize the loop */
+	if (!last_morefrag) {
+		/* Check if all fragments are present */
+		do {
+			rx_desc_info = qdf_nbuf_data(next);
+			next_fragno =
+				dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
+			count++;
+
+			if (next_fragno != count)
+				break;
+
+			next = qdf_nbuf_next(next);
+		} while (next);
+
+		if (!next) {
+			*all_frag_present = 1;
+			return;
+		}
+	}
+
+end:
+	*all_frag_present = 0;
+}
+
+
+/*
+ * dp_rx_defrag_tkip_decap(): decap tkip encrypted fragment
+ * @msdu: Pointer to the fragment
+ * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
+ *
+ * decap tkip encrypted fragment
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_tkip_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
+{
+	uint8_t *ivp, *orig_hdr;
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+
+	/* start of 802.11 header info */
+	orig_hdr = (uint8_t *)(qdf_nbuf_data(msdu) + rx_desc_len);
+
+	/* TKIP header is located post 802.11 header */
+	ivp = orig_hdr + hdrlen;
+	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"IEEE80211_WEP_EXTIV is missing in TKIP fragment");
+		return QDF_STATUS_E_DEFRAG_ERROR;
+	}
+
+	qdf_mem_move(orig_hdr + dp_f_tkip.ic_header, orig_hdr, hdrlen);
+
+	qdf_nbuf_pull_head(msdu, dp_f_tkip.ic_header);
+	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_trailer);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_defrag_ccmp_demic(): Remove MIC information from CCMP fragment
+ * @nbuf: Pointer to the fragment buffer
+ * @hdrlen: 802.11 header length (mostly useful in 4 addr frames)
+ *
+ * Remove MIC information from CCMP fragment
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_ccmp_demic(qdf_nbuf_t nbuf, uint16_t hdrlen)
+{
+	uint8_t *ivp, *orig_hdr;
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+
+	/* start of the 802.11 header */
+	orig_hdr = (uint8_t *)(qdf_nbuf_data(nbuf) + rx_desc_len);
+
+	/* CCMP header is located after 802.11 header */
+	ivp = orig_hdr + hdrlen;
+	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
+		return QDF_STATUS_E_DEFRAG_ERROR;
+
+	qdf_nbuf_trim_tail(nbuf, dp_f_ccmp.ic_trailer);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_defrag_ccmp_decap(): decap CCMP encrypted fragment
+ * @nbuf: Pointer to the fragment
+ * @hdrlen: length of the header information
+ *
+ * decap CCMP encrypted fragment
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_ccmp_decap(qdf_nbuf_t nbuf, uint16_t hdrlen)
+{
+	uint8_t *ivp, *origHdr;
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+
+	origHdr = (uint8_t *) (qdf_nbuf_data(nbuf) + rx_desc_len);
+	ivp = origHdr + hdrlen;
+
+	if (!(ivp[IEEE80211_WEP_IVLEN] & IEEE80211_WEP_EXTIV))
+		return QDF_STATUS_E_DEFRAG_ERROR;
+
+	qdf_mem_move(origHdr + dp_f_ccmp.ic_header, origHdr, hdrlen);
+	qdf_nbuf_pull_head(nbuf, dp_f_ccmp.ic_header);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_defrag_wep_decap(): decap WEP encrypted fragment
+ * @msdu: Pointer to the fragment
+ * @hdrlen: length of the header information
+ *
+ * decap WEP encrypted fragment
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_wep_decap(qdf_nbuf_t msdu, uint16_t hdrlen)
+{
+	uint8_t *origHdr;
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+
+	origHdr = (uint8_t *) (qdf_nbuf_data(msdu) + rx_desc_len);
+	qdf_mem_move(origHdr + dp_f_wep.ic_header, origHdr, hdrlen);
+
+	qdf_nbuf_pull_head(msdu, dp_f_wep.ic_header);
+	qdf_nbuf_trim_tail(msdu, dp_f_wep.ic_trailer);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_defrag_hdrsize(): Calculate the header size of the received fragment
+ * @nbuf: Pointer to the fragment
+ *
+ * Calculate the header size of the received fragment
+ *
+ * Returns: header size (uint16_t)
+ */
+static uint16_t dp_rx_defrag_hdrsize(qdf_nbuf_t nbuf)
+{
+	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
+	uint16_t size = sizeof(struct ieee80211_frame);
+	uint16_t fc = 0;
+	uint32_t to_ds, fr_ds;
+	uint8_t frm_ctrl_valid;
+	uint16_t frm_ctrl_field;
+
+	to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
+	fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
+	frm_ctrl_valid = hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr);
+	frm_ctrl_field = hal_rx_get_frame_ctrl_field(rx_tlv_hdr);
+
+	if (to_ds && fr_ds)
+		size += IEEE80211_ADDR_LEN;
+
+	if (frm_ctrl_valid) {
+		fc = frm_ctrl_field;
+
+		/* use 1-st byte for validation */
+		if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
+			size += sizeof(uint16_t);
+			/* use 2-nd byte for validation */
+			if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
+				size += sizeof(struct ieee80211_htc);
+		}
+	}
+
+	return size;
+}
+
+/*
+ * dp_rx_defrag_michdr(): Calculate a psuedo MIC header
+ * @wh0: Pointer to the wireless header of the fragment
+ * @hdr: Array to hold the psuedo header
+ *
+ * Calculate a psuedo MIC header
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_michdr(const struct ieee80211_frame *wh0,
+				uint8_t hdr[])
+{
+	const struct ieee80211_frame_addr4 *wh =
+		(const struct ieee80211_frame_addr4 *)wh0;
+
+	switch (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+	case IEEE80211_FC1_DIR_NODS:
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+					   wh->i_addr2);
+		break;
+	case IEEE80211_FC1_DIR_TODS:
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+					   wh->i_addr2);
+		break;
+	case IEEE80211_FC1_DIR_FROMDS:
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr1); /* DA */
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+					   wh->i_addr3);
+		break;
+	case IEEE80211_FC1_DIR_DSTODS:
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr, wh->i_addr3); /* DA */
+		DP_RX_DEFRAG_IEEE80211_ADDR_COPY(hdr + IEEE80211_ADDR_LEN,
+					   wh->i_addr4);
+		break;
+	}
+
+	/*
+	 * Bit 7 is IEEE80211_FC0_SUBTYPE_QOS for data frame, but
+	 * it could also be set for deauth, disassoc, action, etc. for
+	 * a mgt type frame. It comes into picture for MFP.
+	 */
+	if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
+		const struct ieee80211_qosframe *qwh =
+			(const struct ieee80211_qosframe *)wh;
+		hdr[12] = qwh->i_qos[0] & IEEE80211_QOS_TID;
+	} else {
+		hdr[12] = 0;
+	}
+
+	hdr[13] = hdr[14] = hdr[15] = 0;	/* reserved */
+}
+
+/*
+ * dp_rx_defrag_mic(): Calculate MIC header
+ * @key: Pointer to the key
+ * @wbuf: fragment buffer
+ * @off: Offset
+ * @data_len: Data lengh
+ * @mic: Array to hold MIC
+ *
+ * Calculate a psuedo MIC header
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_mic(const uint8_t *key, qdf_nbuf_t wbuf,
+		uint16_t off, uint16_t data_len, uint8_t mic[])
+{
+	uint8_t hdr[16] = { 0, };
+	uint32_t l, r;
+	const uint8_t *data;
+	uint32_t space;
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+
+	dp_rx_defrag_michdr((struct ieee80211_frame *)(qdf_nbuf_data(wbuf)
+		+ rx_desc_len), hdr);
+	l = dp_rx_get_le32(key);
+	r = dp_rx_get_le32(key + 4);
+
+	/* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+	l ^= dp_rx_get_le32(hdr);
+	dp_rx_michael_block(l, r);
+	l ^= dp_rx_get_le32(&hdr[4]);
+	dp_rx_michael_block(l, r);
+	l ^= dp_rx_get_le32(&hdr[8]);
+	dp_rx_michael_block(l, r);
+	l ^= dp_rx_get_le32(&hdr[12]);
+	dp_rx_michael_block(l, r);
+
+	/* first buffer has special handling */
+	data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len + off;
+	space = qdf_nbuf_len(wbuf) - rx_desc_len - off;
+
+	for (;; ) {
+		if (space > data_len)
+			space = data_len;
+
+		/* collect 32-bit blocks from current buffer */
+		while (space >= sizeof(uint32_t)) {
+			l ^= dp_rx_get_le32(data);
+			dp_rx_michael_block(l, r);
+			data += sizeof(uint32_t);
+			space -= sizeof(uint32_t);
+			data_len -= sizeof(uint32_t);
+		}
+		if (data_len < sizeof(uint32_t))
+			break;
+
+		wbuf = qdf_nbuf_next(wbuf);
+		if (wbuf == NULL)
+			return QDF_STATUS_E_DEFRAG_ERROR;
+
+		if (space != 0) {
+			const uint8_t *data_next;
+			/*
+			 * Block straddles buffers, split references.
+			 */
+			data_next =
+				(uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
+			if ((qdf_nbuf_len(wbuf) - rx_desc_len) <
+				sizeof(uint32_t) - space) {
+				return QDF_STATUS_E_DEFRAG_ERROR;
+			}
+			switch (space) {
+			case 1:
+				l ^= dp_rx_get_le32_split(data[0],
+					data_next[0], data_next[1],
+					data_next[2]);
+				data = data_next + 3;
+				space = (qdf_nbuf_len(wbuf) - rx_desc_len)
+					- 3;
+				break;
+			case 2:
+				l ^= dp_rx_get_le32_split(data[0], data[1],
+						    data_next[0], data_next[1]);
+				data = data_next + 2;
+				space = (qdf_nbuf_len(wbuf) - rx_desc_len)
+					- 2;
+				break;
+			case 3:
+				l ^= dp_rx_get_le32_split(data[0], data[1],
+					data[2], data_next[0]);
+				data = data_next + 1;
+				space = (qdf_nbuf_len(wbuf) - rx_desc_len)
+					- 1;
+				break;
+			}
+			dp_rx_michael_block(l, r);
+			data_len -= sizeof(uint32_t);
+		} else {
+			/*
+			 * Setup for next buffer.
+			 */
+			data = (uint8_t *) qdf_nbuf_data(wbuf) + rx_desc_len;
+			space = qdf_nbuf_len(wbuf) - rx_desc_len;
+		}
+	}
+	/* Last block and padding (0x5a, 4..7 x 0) */
+	switch (data_len) {
+	case 0:
+		l ^= dp_rx_get_le32_split(0x5a, 0, 0, 0);
+		break;
+	case 1:
+		l ^= dp_rx_get_le32_split(data[0], 0x5a, 0, 0);
+		break;
+	case 2:
+		l ^= dp_rx_get_le32_split(data[0], data[1], 0x5a, 0);
+		break;
+	case 3:
+		l ^= dp_rx_get_le32_split(data[0], data[1], data[2], 0x5a);
+		break;
+	}
+	dp_rx_michael_block(l, r);
+	dp_rx_michael_block(l, r);
+	dp_rx_put_le32(mic, l);
+	dp_rx_put_le32(mic + 4, r);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_defrag_tkip_demic(): Remove MIC header from the TKIP frame
+ * @key: Pointer to the key
+ * @msdu: fragment buffer
+ * @hdrlen: Length of the header information
+ *
+ * Remove MIC information from the TKIP frame
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key,
+					qdf_nbuf_t msdu, uint16_t hdrlen)
+{
+	QDF_STATUS status;
+	uint32_t pktlen;
+	uint8_t mic[IEEE80211_WEP_MICLEN];
+	uint8_t mic0[IEEE80211_WEP_MICLEN];
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+
+	pktlen = qdf_nbuf_len(msdu) - rx_desc_len;
+
+	status = dp_rx_defrag_mic(key, msdu, hdrlen,
+				pktlen - (hdrlen + dp_f_tkip.ic_miclen), mic);
+
+	if (QDF_IS_STATUS_ERROR(status))
+		return status;
+
+	qdf_nbuf_copy_bits(msdu, pktlen - dp_f_tkip.ic_miclen + rx_desc_len,
+				dp_f_tkip.ic_miclen, (caddr_t)mic0);
+
+	if (!qdf_mem_cmp(mic, mic0, dp_f_tkip.ic_miclen))
+		return QDF_STATUS_E_DEFRAG_ERROR;
+
+	qdf_nbuf_trim_tail(msdu, dp_f_tkip.ic_miclen);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_defrag_decap_recombine(): Recombine the fragments
+ * @peer: Pointer to the peer
+ * @frag_list: list of fragments
+ * @tid: Transmit identifier
+ * @hdrsize: Header size
+ *
+ * Recombine fragments
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_decap_recombine(struct dp_peer *peer,
+			qdf_nbuf_t head_msdu, unsigned tid, uint16_t hdrsize)
+{
+	qdf_nbuf_t msdu = head_msdu;
+	uint8_t i;
+	uint8_t num_ring_desc_saved = peer->rx_tid[tid].curr_ring_desc_idx;
+	uint8_t num_msdus;
+
+	/* Stitch fragments together */
+	for (i = 0; (i < num_ring_desc_saved) && msdu; i++) {
+
+		struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info =
+			&peer->rx_tid[tid].transcap_msdu_link_ptr[i];
+
+		struct hal_rx_mpdu_desc_info *mpdu_desc_info =
+			&peer->rx_tid[tid].transcap_rx_mpdu_desc_info;
+
+		num_msdus = hal_rx_chain_msdu_links(msdu, msdu_link_ptr_info,
+				mpdu_desc_info);
+
+		msdu = qdf_nbuf_next(msdu);
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_rx_defrag_err() - rx err handler
+ * @pdev: handle to pdev object
+ * @vdev_id: vdev id
+ * @peer_mac_addr: peer mac address
+ * @tid: TID
+ * @tsf32: TSF
+ * @err_type: error type
+ * @rx_frame: rx frame
+ * @pn: PN Number
+ * @key_id: key id
+ *
+ * This function handles rx error and send MIC error notification
+ *
+ * Return: None
+ */
+static void dp_rx_defrag_err(uint8_t vdev_id, uint8_t *peer_mac_addr,
+	int tid, uint32_t tsf32, uint32_t err_type, qdf_nbuf_t rx_frame,
+	uint64_t *pn, uint8_t key_id)
+{
+	/* TODO: Who needs to know about the TKIP MIC error */
+}
+
+/*
+ * dp_rx_defrag_qos_decap(): Remove QOS header from the frame
+ * @nbuf: Pointer to the frame buffer
+ * @hdrlen: Length of the header information
+ *
+ * Recombine fragments
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_qos_decap(qdf_nbuf_t nbuf, uint16_t hdrlen)
+{
+	struct ieee80211_frame *wh;
+	uint16_t qoslen;
+	int pkt_tlv_size = sizeof(struct rx_pkt_tlvs); /* pkt TLV hdr size */
+	uint16_t fc = 0;
+
+	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
+
+	/* Get the frame control field if it is valid */
+	if (hal_rx_get_mpdu_frame_control_valid(rx_tlv_hdr))
+		fc = hal_rx_get_frame_ctrl_field(rx_tlv_hdr);
+
+	wh = (struct ieee80211_frame *)(qdf_nbuf_data(nbuf) + pkt_tlv_size);
+
+	if (DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(fc & 0xff)) {
+		qoslen = sizeof(struct ieee80211_qoscntl);
+
+		/* Qos frame with Order bit set indicates a HTC frame */
+		if (((fc & 0xff00) >> 8) & IEEE80211_FC1_ORDER)
+			qoslen += sizeof(struct ieee80211_htc);
+
+		/* remove QoS field from header */
+		hdrlen -= qoslen;
+		qdf_mem_move((uint8_t *)wh + qoslen, wh, hdrlen);
+
+		wh = (struct ieee80211_frame *)qdf_nbuf_pull_head(nbuf,
+							pkt_tlv_size +
+							qoslen);
+		/* clear QoS bit */
+		if (wh)
+			wh->i_fc[0] &= ~IEEE80211_FC0_SUBTYPE_QOS;
+	}
+}
+
+/*
+ * dp_rx_defrag_nwifi_to_8023(): Transcap 802.11 to 802.3
+ * @msdu: Pointer to the fragment buffer
+ *
+ * Transcap the fragment from 802.11 to 802.3
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t msdu)
+{
+	struct ieee80211_frame wh;
+	uint32_t hdrsize;
+	struct llc_snap_hdr_t llchdr;
+	struct ethernet_hdr_t *eth_hdr;
+	int rx_desc_len = sizeof(struct rx_pkt_tlvs);
+	struct ieee80211_frame *wh_ptr;
+
+	wh_ptr = (struct ieee80211_frame *)(qdf_nbuf_data(msdu) +
+		rx_desc_len);
+	qdf_mem_copy(&wh, wh_ptr, sizeof(wh));
+	hdrsize = sizeof(struct ieee80211_frame);
+	qdf_mem_copy(&llchdr, ((uint8_t *) (qdf_nbuf_data(msdu) +
+		rx_desc_len)) + hdrsize,
+		sizeof(struct llc_snap_hdr_t));
+
+	/*
+	 * Now move the data pointer to the beginning of the mac header :
+	 * new-header = old-hdr + (wifihdrsize + llchdrsize - ethhdrsize)
+	 */
+	qdf_nbuf_pull_head(msdu, (rx_desc_len + hdrsize +
+		sizeof(struct llc_snap_hdr_t) -
+		sizeof(struct ethernet_hdr_t)));
+	eth_hdr = (struct ethernet_hdr_t *)(qdf_nbuf_data(msdu));
+
+	switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) {
+	case IEEE80211_FC1_DIR_NODS:
+		qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
+			IEEE80211_ADDR_LEN);
+		qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2,
+			IEEE80211_ADDR_LEN);
+		break;
+	case IEEE80211_FC1_DIR_TODS:
+		qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr3,
+			IEEE80211_ADDR_LEN);
+		qdf_mem_copy(eth_hdr->src_addr, wh.i_addr2,
+			IEEE80211_ADDR_LEN);
+		break;
+	case IEEE80211_FC1_DIR_FROMDS:
+		qdf_mem_copy(eth_hdr->dest_addr, wh.i_addr1,
+			IEEE80211_ADDR_LEN);
+		qdf_mem_copy(eth_hdr->src_addr, wh.i_addr3,
+			IEEE80211_ADDR_LEN);
+		break;
+	case IEEE80211_FC1_DIR_DSTODS:
+		break;
+	}
+
+	/* TODO: Is it requried to copy rx_pkt_tlvs
+	 * to the start of data buffer?
+	 */
+	qdf_mem_copy(eth_hdr->ethertype, llchdr.ethertype,
+			sizeof(llchdr.ethertype));
+}
+
+/*
+ * dp_rx_defrag_reo_reinject(): Reinject the fragment chain back into REO
+ * @peer: Pointer to the peer
+ * @tid: Transmit Identifier
+ *
+ * Reinject the fragment chain back into REO
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
+					unsigned tid)
+{
+	struct dp_pdev *pdev = peer->vdev->pdev;
+	struct dp_soc *soc = pdev->soc;
+	QDF_STATUS status = QDF_STATUS_E_FAILURE;
+	void *ring_desc;
+	enum hal_reo_error_status error;
+	struct hal_rx_mpdu_desc_info *saved_mpdu_desc_info;
+	void *hal_srng = soc->reo_reinject_ring.hal_srng;
+	struct hal_rx_msdu_link_ptr_info *saved_msdu_link_ptr;
+
+	if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
+
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"HAL RING Access For WBM Release SRNG Failed: %p",
+			hal_srng);
+		goto done;
+	}
+
+	ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
+
+	qdf_assert(ring_desc);
+
+	error = HAL_RX_ERROR_STATUS_GET(ring_desc);
+
+	if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"HAL RING 0x%p:error %d", hal_srng, error);
+
+		/* Don't know how to deal with this condition -- assert */
+		qdf_assert(0);
+		goto done;
+	}
+
+	saved_mpdu_desc_info =
+		&peer->rx_tid[tid].transcap_rx_mpdu_desc_info;
+
+	/* first msdu link pointer */
+	saved_msdu_link_ptr =
+		&peer->rx_tid[tid].transcap_msdu_link_ptr[0];
+
+	hal_rx_defrag_update_src_ring_desc(ring_desc,
+		saved_mpdu_desc_info, saved_msdu_link_ptr);
+
+	status = QDF_STATUS_SUCCESS;
+done:
+	hal_srng_access_end(soc->hal_soc, hal_srng);
+	return status;
+}
+
+/*
+ * dp_rx_defrag(): Defragment the fragment chain
+ * @peer: Pointer to the peer
+ * @tid: Transmit Identifier
+ * @frag_list: Pointer to head list
+ * @frag_list_tail: Pointer to tail list
+ *
+ * Defragment the fragment chain
+ *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
+			qdf_nbuf_t frag_list, qdf_nbuf_t frag_list_tail)
+{
+	qdf_nbuf_t tmp_next;
+	qdf_nbuf_t cur = frag_list, msdu;
+
+	uint32_t index, tkip_demic = 0;
+	uint16_t hdr_space;
+	QDF_STATUS status;
+	uint8_t key[DEFRAG_IEEE80211_KEY_LEN];
+	struct dp_vdev *vdev = peer->vdev;
+
+	cur = frag_list;
+	hdr_space = dp_rx_defrag_hdrsize(cur);
+	index = hal_rx_msdu_is_wlan_mcast(cur) ?
+		dp_sec_mcast : dp_sec_ucast;
+
+	switch (peer->security[index].sec_type) {
+	case htt_sec_type_tkip:
+		tkip_demic = 1;
+
+	case htt_sec_type_tkip_nomic:
+		while (cur) {
+			tmp_next = qdf_nbuf_next(cur);
+			if (dp_rx_defrag_tkip_decap(cur, hdr_space)) {
+
+				/* TKIP decap failed, discard frags */
+				dp_rx_defrag_frames_free(frag_list);
+
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"dp_rx_defrag: TKIP decap failed");
+
+				return QDF_STATUS_E_DEFRAG_ERROR;
+			}
+			cur = tmp_next;
+		}
+		break;
+
+	case htt_sec_type_aes_ccmp:
+		while (cur) {
+			tmp_next = qdf_nbuf_next(cur);
+			if (dp_rx_defrag_ccmp_demic(cur, hdr_space)) {
+
+				/* CCMP demic failed, discard frags */
+				dp_rx_defrag_frames_free(frag_list);
+
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"dp_rx_defrag: CCMP demic failed");
+
+				return QDF_STATUS_E_DEFRAG_ERROR;
+			}
+			if (dp_rx_defrag_ccmp_decap(cur, hdr_space)) {
+
+				/* CCMP decap failed, discard frags */
+				dp_rx_defrag_frames_free(frag_list);
+
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"dp_rx_defrag: CCMP decap failed");
+
+				return QDF_STATUS_E_DEFRAG_ERROR;
+			}
+			cur = tmp_next;
+		}
+		break;
+	case htt_sec_type_wep40:
+	case htt_sec_type_wep104:
+	case htt_sec_type_wep128:
+		while (cur) {
+			tmp_next = qdf_nbuf_next(cur);
+			if (dp_rx_defrag_wep_decap(cur, hdr_space)) {
+
+				/* WEP decap failed, discard frags */
+				dp_rx_defrag_frames_free(frag_list);
+
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"dp_rx_defrag: WEP decap failed");
+
+				return QDF_STATUS_E_DEFRAG_ERROR;
+			}
+			cur = tmp_next;
+		}
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_TXRX,
+			QDF_TRACE_LEVEL_ERROR,
+			"dp_rx_defrag: Did not match any security type");
+		break;
+	}
+
+	if (tkip_demic) {
+		msdu = frag_list_tail; /* Only last fragment has the MIC */
+
+		qdf_mem_copy(key,
+			peer->security[index].michael_key,
+			sizeof(peer->security[index].michael_key));
+		if (dp_rx_defrag_tkip_demic(key, msdu, hdr_space)) {
+			qdf_nbuf_free(msdu);
+			dp_rx_defrag_err(vdev->vdev_id, peer->mac_addr.raw,
+				tid, 0, QDF_STATUS_E_DEFRAG_ERROR, msdu,
+				NULL, 0);
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"dp_rx_defrag: TKIP demic failed");
+			return QDF_STATUS_E_DEFRAG_ERROR;
+		}
+	}
+
+	dp_rx_defrag_qos_decap(cur, hdr_space);
+
+	/* Convert the header to 802.3 header */
+	dp_rx_defrag_nwifi_to_8023(cur);
+
+	status = dp_rx_defrag_decap_recombine(peer, cur, tid, hdr_space);
+
+	if (QDF_IS_STATUS_ERROR(status)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"dp_rx_defrag_decap_recombine failed");
+
+		qdf_assert(0);
+	}
+
+	return status;
+}
+
+/*
+ * dp_rx_defrag_cleanup(): Clean up activities
+ * @peer: Pointer to the peer
+ * @tid: Transmit Identifier
+ * @seq: Sequence number
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_cleanup(struct dp_peer *peer, unsigned tid,
+							uint16_t seq)
+{
+	struct dp_rx_reorder_array_elem *rx_reorder_array_elem =
+				&peer->rx_tid[tid].array[seq];
+
+	/* Free up nbufs */
+	dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
+
+	/* Free up saved ring descriptors */
+	dp_rx_clear_saved_desc_info(peer, tid);
+
+	rx_reorder_array_elem->head = NULL;
+	rx_reorder_array_elem->tail = NULL;
+	peer->rx_tid[tid].defrag_timeout_ms = 0;
+	peer->rx_tid[tid].curr_frag_num = 0;
+	peer->rx_tid[tid].curr_seq_num = 0;
+	peer->rx_tid[tid].curr_ring_desc_idx = 0;
+}
+
+/*
+ * dp_rx_defrag_save_info_from_ring_desc(): Save info from REO ring descriptor
+ * @ring_desc: Pointer to the ring descriptor
+ * @peer: Pointer to the peer
+ * @tid: Transmit Identifier
+ * @mpdu_desc_info: MPDU descriptor info
+ *
+ * Returns: None
+ */
+static void dp_rx_defrag_save_info_from_ring_desc(void *ring_desc,
+	struct dp_peer *peer, unsigned tid,
+	struct hal_rx_mpdu_desc_info *mpdu_desc_info)
+{
+	struct dp_pdev *pdev = peer->vdev->pdev;
+	void *msdu_link_desc_va = NULL;
+	uint8_t idx = peer->rx_tid[tid].curr_ring_desc_idx;
+	uint8_t rbm;
+
+	struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info =
+		&peer->rx_tid[tid].transcap_msdu_link_ptr[++idx];
+	struct hal_rx_mpdu_desc_info *tmp_mpdu_desc_info =
+		&peer->rx_tid[tid].transcap_rx_mpdu_desc_info;
+	struct hal_buf_info hbi;
+
+	rbm = hal_rx_ret_buf_manager_get(ring_desc);
+	if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Invalid RBM while chaining frag MSDUs");
+		return;
+	}
+
+	hal_rx_reo_buf_paddr_get(ring_desc, &hbi);
+
+	msdu_link_desc_va =
+		dp_rx_cookie_2_link_desc_va(pdev->soc, &hbi);
+
+	hal_rx_defrag_save_info_from_ring_desc(msdu_link_desc_va,
+		msdu_link_ptr_info, &hbi);
+
+	qdf_mem_copy(tmp_mpdu_desc_info, mpdu_desc_info,
+		sizeof(*tmp_mpdu_desc_info));
+}
+
+/*
+ * dp_rx_defrag_store_fragment(): Store incoming fragments
+ * @soc: Pointer to the SOC data structure
+ * @ring_desc: Pointer to the ring descriptor
+ * @mpdu_desc_info: MPDU descriptor info
+ * @msdu_info: Pointer to MSDU descriptor info
+ * @tid: Traffic Identifier
+ * @rx_desc: Pointer to rx descriptor
+  *
+ * Returns: QDF_STATUS
+ */
+static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
+			void *ring_desc,
+			union dp_rx_desc_list_elem_t **head,
+			union dp_rx_desc_list_elem_t **tail,
+			struct hal_rx_mpdu_desc_info *mpdu_desc_info,
+			struct hal_rx_msdu_desc_info *msdu_info,
+			unsigned tid, struct dp_rx_desc *rx_desc)
+{
+	uint8_t idx;
+	struct dp_rx_reorder_array_elem *rx_reorder_array_elem;
+	struct dp_pdev *pdev;
+	struct dp_peer *peer;
+	uint16_t peer_id;
+	uint16_t rxseq, seq;
+	uint8_t fragno, more_frag, all_frag_present = 0;
+	uint16_t seq_num = mpdu_desc_info->mpdu_seq;
+	QDF_STATUS status;
+	struct dp_rx_tid *rx_tid;
+	uint8_t mpdu_sequence_control_valid;
+	uint8_t mpdu_frame_control_valid;
+	qdf_nbuf_t frag = rx_desc->nbuf;
+	uint8_t *rx_desc_info;
+
+	/* Check if the packet is from a valid peer */
+	peer_id = DP_PEER_METADATA_PEER_ID_GET(
+					mpdu_desc_info->peer_meta_data);
+	peer = dp_peer_find_by_id(soc, peer_id);
+
+	if (!peer) {
+		/* We should not recieve anything from unknown peer
+		 * however, that might happen while we are in the monitor mode.
+		 * We don't need to handle that here
+		 */
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Unknown peer, dropping the fragment");
+
+		qdf_nbuf_free(frag);
+		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
+
+		return QDF_STATUS_E_DEFRAG_ERROR;
+	}
+
+	pdev = peer->vdev->pdev;
+	rx_tid = &peer->rx_tid[tid];
+
+	seq = seq_num & (peer->rx_tid[tid].ba_win_size - 1);
+	qdf_assert(seq == 0);
+	rx_reorder_array_elem = &peer->rx_tid[tid].array[seq];
+
+	rx_desc_info = qdf_nbuf_data(frag);
+	mpdu_sequence_control_valid =
+		hal_rx_get_mpdu_sequence_control_valid(rx_desc_info);
+
+	/* Invalid MPDU sequence control field, MPDU is of no use */
+	if (!mpdu_sequence_control_valid) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Invalid MPDU seq control field, dropping MPDU");
+		qdf_nbuf_free(frag);
+		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
+
+		qdf_assert(0);
+		goto end;
+	}
+
+	mpdu_frame_control_valid =
+		hal_rx_get_mpdu_frame_control_valid(rx_desc_info);
+
+	/* Invalid frame control field */
+	if (!mpdu_frame_control_valid) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Invalid frame control field, dropping MPDU");
+		qdf_nbuf_free(frag);
+		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
+
+		qdf_assert(0);
+		goto end;
+	}
+
+	/* Current mpdu sequence */
+	rxseq = hal_rx_get_rx_sequence(rx_desc_info);
+	more_frag = hal_rx_get_rx_more_frag_bit(rx_desc_info);
+
+	/* HW does not populate the fragment number as of now
+	 * need to get from the 802.11 header
+	 */
+	fragno = dp_rx_frag_get_mpdu_frag_number(rx_desc_info);
+
+	/*
+	 * !more_frag: no more fragments to be delivered
+	 * !frag_no: packet is not fragmented
+	 * !rx_reorder_array_elem->head: no saved fragments so far
+	 */
+	if ((!more_frag) && (!fragno) && (!rx_reorder_array_elem->head)) {
+		/* We should not get into this situation here.
+		 * It means an unfragmented packet with fragment flag
+		 * is delivered over the REO exception ring.
+		 * Typically it follows normal rx path.
+		 */
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Rcvd unfragmented pkt on REO Err srng, dropping");
+		qdf_nbuf_free(frag);
+		dp_rx_add_to_free_desc_list(head, tail, rx_desc);
+
+		qdf_assert(0);
+		goto end;
+	}
+
+	/* Check if the fragment is for the same sequence or a different one */
+	if (rx_reorder_array_elem->head) {
+
+		if (rxseq != rx_tid->curr_seq_num) {
+
+			/* Drop stored fragments if out of sequence
+			 * fragment is received
+			 */
+			dp_rx_defrag_frames_free(rx_reorder_array_elem->head);
+
+			rx_reorder_array_elem->head = NULL;
+			rx_reorder_array_elem->tail = NULL;
+
+			/*
+			 * The sequence number for this fragment becomes the
+			 * new sequence number to be processed
+			 */
+			rx_tid->curr_seq_num = rxseq;
+
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s mismatch, dropping earlier sequence ",
+				(rxseq == rx_tid->curr_seq_num)
+				? "address"
+				: "seq number");
+		}
+	} else {
+		/* Start of a new sequence */
+		rx_tid->curr_seq_num = rxseq;
+	}
+
+	/*
+	 * If the earlier sequence was dropped, this will be the fresh start.
+	 * Else, continue with next fragment in a given sequence
+	 */
+	dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
+			&rx_reorder_array_elem->tail, frag,
+			&all_frag_present);
+
+	/*
+	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
+	 * packet sequence has more than 6 MSDUs for some reason, we will
+	 * have to use the next MSDU link descriptor and chain them together
+	 * before reinjection
+	 */
+	if (more_frag == 0 || fragno == HAL_RX_NUM_MSDU_DESC) {
+		/*
+		 * Deep copy of MSDU link pointer and msdu descriptor structs
+		 */
+		idx = peer->rx_tid[tid].curr_ring_desc_idx;
+		if (idx < HAL_RX_MAX_SAVED_RING_DESC) {
+			dp_rx_defrag_save_info_from_ring_desc(ring_desc,
+				peer, tid, mpdu_desc_info);
+
+			peer->rx_tid[tid].curr_ring_desc_idx++;
+		} else {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"Max ring descr saved, dropping fragment");
+			/*
+			 * Free up saved fragments and ring descriptors if any
+			 */
+			goto end;
+		}
+	}
+
+	/* TODO: handle fragment timeout gracefully */
+	if (pdev->soc->rx.flags.defrag_timeout_check) {
+		dp_rx_defrag_waitlist_remove(peer, tid);
+		goto end;
+	}
+
+	/* Yet to receive more fragments for this sequence number */
+	if (!all_frag_present) {
+		uint32_t now_ms =
+			qdf_system_ticks_to_msecs(qdf_system_ticks());
+
+		peer->rx_tid[tid].defrag_timeout_ms =
+			now_ms + pdev->soc->rx.defrag.timeout_ms;
+
+		dp_rx_defrag_waitlist_add(peer, tid);
+		goto end;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+		"All fragments received for sequence: %d", rxseq);
+
+	/* Process the fragments */
+	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
+		rx_reorder_array_elem->tail);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Fragment processing failed");
+		goto end;
+	}
+
+	/* Re-inject the fragments back to REO for further processing */
+	status = dp_rx_defrag_reo_reinject(peer, tid);
+	if (QDF_IS_STATUS_SUCCESS(status))
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+		"Fragmented sequence successfully reinjected");
+	else
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"Fragmented sequence reinjection failed");
+
+end:
+	dp_rx_defrag_cleanup(peer, tid, seq);
+	return QDF_STATUS_E_DEFRAG_ERROR;
+}
+
+/**
+ * dp_rx_frag_handle() - Handles fragmented Rx frames
+ *
+ * @soc: core txrx main context
+ * @ring_desc: opaque pointer to the REO error ring descriptor
+ * @mpdu_desc_info: MPDU descriptor information from ring descriptor
+ * @head: head of the local descriptor free-list
+ * @tail: tail of the local descriptor free-list
+ * @quota: No. of units (packets) that can be serviced in one shot.
+ *
+ * This function implements RX 802.11 fragmentation handling
+ * The handling is mostly same as legacy fragmentation handling.
+ * If required, this function can re-inject the frames back to
+ * REO ring (with proper setting to by-pass fragmentation check
+ * but use duplicate detection / re-ordering and routing these frames
+ * to a different core.
+ *
+ * Return: uint32_t: No. of elements processed
+ */
+uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
+		struct hal_rx_mpdu_desc_info *mpdu_desc_info,
+		union dp_rx_desc_list_elem_t **head,
+		union dp_rx_desc_list_elem_t **tail,
+		uint32_t quota)
+{
+	uint32_t rx_bufs_used = 0;
+	void *link_desc_va;
+	struct hal_buf_info buf_info;
+	struct hal_rx_msdu_list msdu_list; /* per MPDU list of MSDUs */
+	uint32_t tid;
+	int idx;
+	QDF_STATUS status;
+
+	qdf_assert(soc);
+	qdf_assert(mpdu_desc_info);
+
+	/* Fragment from a valid peer */
+	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
+
+	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
+
+	qdf_assert(link_desc_va);
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"Number of MSDUs to process, num_msdus: %d",
+		mpdu_desc_info->msdu_count);
+
+
+	if (qdf_unlikely(mpdu_desc_info->msdu_count == 0)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Not sufficient MSDUs to process");
+		return rx_bufs_used;
+	}
+
+	/* Get msdu_list for the given MPDU */
+	hal_rx_msdu_list_get(link_desc_va, &msdu_list,
+		mpdu_desc_info->msdu_count);
+
+	/* Process all MSDUs in the current MPDU */
+	for (idx = 0; (idx < mpdu_desc_info->msdu_count) && quota--; idx++) {
+		struct dp_rx_desc *rx_desc =
+			dp_rx_cookie_2_va_rxdma_buf(soc,
+				msdu_list.sw_cookie[idx]);
+
+		qdf_assert(rx_desc);
+
+		tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start);
+
+		/* Process fragment-by-fragment */
+		status = dp_rx_defrag_store_fragment(soc, ring_desc,
+				head, tail, mpdu_desc_info,
+				&msdu_list.msdu_info[idx], tid,
+				rx_desc);
+		if (QDF_IS_STATUS_SUCCESS(status))
+			rx_bufs_used++;
+		else
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Rx Defragmentation error. mpdu_seq: 0x%x msdu_count: %d mpdu_flags: %d",
+			mpdu_desc_info->mpdu_seq, mpdu_desc_info->msdu_count,
+			mpdu_desc_info->mpdu_flags);
+	}
+
+	return rx_bufs_used;
+}

+ 131 - 0
dp/wifi3.0/dp_rx_defrag.h

@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_RX_DEFRAG_H
+#define _DP_RX_DEFRAG_H
+
+#include "hal_rx.h"
+
+#ifdef CONFIG_MCL
+#include <cds_ieee80211_common.h>
+#else
+#include <ieee80211.h>
+#endif
+
+#define DEFRAG_IEEE80211_ADDR_LEN	6
+#define DEFRAG_IEEE80211_KEY_LEN	8
+#define DEFRAG_IEEE80211_FCS_LEN	4
+
+#define DP_RX_DEFRAG_IEEE80211_ADDR_COPY(dst, src) \
+	qdf_mem_copy(dst, src, IEEE80211_ADDR_LEN)
+
+#define DP_RX_DEFRAG_IEEE80211_QOS_HAS_SEQ(wh) \
+	(((wh) & \
+	(IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_QOS)) == \
+	(IEEE80211_FC0_TYPE_DATA | IEEE80211_FC0_SUBTYPE_QOS))
+
+/**
+ * struct dp_rx_defrag_cipher: structure to indicate cipher header
+ * @ic_name: Name
+ * @ic_header: header length
+ * @ic_trailer: trail length
+ * @ic_miclen: MIC length
+ */
+struct dp_rx_defrag_cipher {
+	const char *ic_name;
+	uint16_t ic_header;
+	uint8_t ic_trailer;
+	uint8_t ic_miclen;
+};
+
+uint32_t dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
+		struct hal_rx_mpdu_desc_info *mpdu_desc_info,
+		union dp_rx_desc_list_elem_t **head,
+		union dp_rx_desc_list_elem_t **tail,
+		uint32_t quota);
+
+/*
+ * dp_rx_frag_get_mac_hdr() - Return pointer to the mac hdr
+ * @rx_desc_info: Pointer to the pkt_tlvs in the
+ * nbuf (pkt_tlvs->mac_hdr->data)
+ *
+ * It is inefficient to peek into the packet for received
+ * frames but these APIs are required to get to some of
+ * 802.11 fields that hardware does not populate in the
+ * rx meta data.
+ *
+ * Returns: pointer to ieee80211_frame
+ */
+static inline
+struct ieee80211_frame *dp_rx_frag_get_mac_hdr(uint8_t *rx_desc_info)
+{
+	int rx_desc_len = hal_rx_get_desc_len();
+	return (struct ieee80211_frame *)(rx_desc_info + rx_desc_len);
+}
+
+/*
+ * dp_rx_frag_get_mpdu_seq_number() - Get mpdu sequence number
+ * @rx_desc_info: Pointer to the pkt_tlvs in the
+ * nbuf (pkt_tlvs->mac_hdr->data)
+ *
+ * Returns: uint16_t, rx sequence number
+ */
+static inline
+uint16_t dp_rx_frag_get_mpdu_seq_number(uint8_t *rx_desc_info)
+{
+	struct ieee80211_frame *mac_hdr;
+	mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info);
+
+	return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) >>
+		IEEE80211_SEQ_SEQ_SHIFT;
+}
+
+/*
+ * dp_rx_frag_get_mpdu_frag_number() - Get mpdu fragment number
+ * @rx_desc_info: Pointer to the pkt_tlvs in the
+ * nbuf (pkt_tlvs->mac_hdr->data)
+ *
+ * Returns: uint8_t, receive fragment number
+ */
+static inline
+uint8_t dp_rx_frag_get_mpdu_frag_number(uint8_t *rx_desc_info)
+{
+	struct ieee80211_frame *mac_hdr;
+	mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info);
+
+	return qdf_le16_to_cpu(*(uint16_t *) mac_hdr->i_seq) &
+		IEEE80211_SEQ_FRAG_MASK;
+}
+
+/*
+ * dp_rx_frag_get_more_frag_bit() - Get more fragment bit
+ * @rx_desc_info: Pointer to the pkt_tlvs in the
+ * nbuf (pkt_tlvs->mac_hdr->data)
+ *
+ * Returns: uint8_t, get more fragment bit
+ */
+static inline
+uint8_t dp_rx_frag_get_more_frag_bit(uint8_t *rx_desc_info)
+{
+	struct ieee80211_frame *mac_hdr;
+	mac_hdr = dp_rx_frag_get_mac_hdr(rx_desc_info);
+
+	return mac_hdr->i_fc[1] & IEEE80211_FC1_MORE_FRAG;
+}
+
+#endif /* _DP_RX_DEFRAG_H */

+ 6 - 41
dp/wifi3.0/dp_rx_err.c

@@ -28,38 +28,8 @@
 #else
 #include <ieee80211.h>
 #endif
-
-
-/**
- * dp_rx_frag_handle() - Handles fragmented Rx frames
- *
- * @soc: core txrx main context
- * @ring_desc: opaque pointer to the REO error ring descriptor
- * @mpdu_desc_info: MPDU descriptor information from ring descriptor
- * @head: head of the local descriptor free-list
- * @tail: tail of the local descriptor free-list
- * @quota: No. of units (packets) that can be serviced in one shot.
- *
- * This function implements RX 802.11 fragmentation handling
- * The handling is mostly same as legacy fragmentation handling.
- * If required, this function can re-inject the frames back to
- * REO ring (with proper setting to by-pass fragmentation check
- * but use duplicate detection / re-ordering and routing these frames
- * to a different core.
- *
- * Return: uint32_t: No. of elements processed
- */
-static uint32_t
-dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
-		  struct hal_rx_mpdu_desc_info *mpdu_desc_info,
-		  union dp_rx_desc_list_elem_t **head,
-		  union dp_rx_desc_list_elem_t **tail,
-		  uint32_t quota)
-{
-	uint32_t rx_bufs_used = 0;
-
-	return rx_bufs_used;
-}
+#include "dp_rx_defrag.h"
+#include <enet.h>	/* LLC_SNAP_HDR_LEN */
 
 /**
  * dp_rx_msdus_drop() - Drops all MSDU's per MPDU
@@ -75,14 +45,12 @@ dp_rx_frag_handle(struct dp_soc *soc, void *ring_desc,
  *
  * Return: uint32_t: No. of elements processed
  */
-static uint32_t
-dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
+static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
 		 struct hal_rx_mpdu_desc_info *mpdu_desc_info,
 		 union dp_rx_desc_list_elem_t **head,
 		 union dp_rx_desc_list_elem_t **tail,
 		 uint32_t quota)
 {
-	uint8_t num_msdus;
 	uint32_t rx_bufs_used = 0;
 	void *link_desc_va;
 	struct hal_buf_info buf_info;
@@ -93,11 +61,9 @@ dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
 
 	link_desc_va = dp_rx_cookie_2_link_desc_va(soc, &buf_info);
 
-	qdf_assert(rx_msdu_link_desc);
-
 	/* No UNMAP required -- this is "malloc_consistent" memory */
-
-	hal_rx_msdu_list_get(link_desc_va, &msdu_list, &num_msdus);
+	hal_rx_msdu_list_get(link_desc_va, &msdu_list,
+		mpdu_desc_info->msdu_count);
 
 	for (i = 0; (i < HAL_RX_NUM_MSDU_DESC) && quota--; i++) {
 		struct dp_rx_desc *rx_desc =
@@ -521,7 +487,7 @@ dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
 			/* Call appropriate handler */
 			DP_STATS_INC(soc, rx.err.invalid_rbm, 1);
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
-			FL("Invalid RBM %d"), rbm);
+				FL("Invalid RBM %d"), rbm);
 			continue;
 		}
 
@@ -657,7 +623,6 @@ dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
 		if (qdf_unlikely(rbm != HAL_RX_BUF_RBM_SW3_BM)) {
 			/* TODO */
 			/* Call appropriate handler */
-
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 				FL("Invalid RBM %d"), rbm);
 			continue;

+ 1 - 1
dp/wifi3.0/dp_rx_mon_dest.c

@@ -138,7 +138,7 @@ dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
 
 		num_msdus = msdu_cnt;
 
-		hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus);
+		hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, num_msdus);
 
 		msdu_cnt -= num_msdus;
 

+ 19 - 2
dp/wifi3.0/dp_types.h

@@ -30,6 +30,12 @@
 #include <htt_common.h>
 
 #include <cdp_txrx_cmn.h>
+#ifdef CONFIG_MCL
+#include <cds_ieee80211_common.h>
+#else
+#include <ieee80211.h>
+#endif
+
 #ifndef CONFIG_WIN
 #include <wdi_event_api.h>    /* WDI subscriber event list */
 #endif
@@ -40,6 +46,7 @@
 #include "hal_rx.h"
 #include <hal_api.h>
 #include <hal_api_mon.h>
+#include "hal_rx.h"
 
 #define MAX_TCL_RING 3
 #define MAX_RXDMA_ERRORS 32
@@ -305,6 +312,18 @@ struct dp_rx_tid {
 
 	/* only used for defrag right now */
 	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
+
+	/* MSDU link pointers used for reinjection */
+	struct hal_rx_msdu_link_ptr_info
+		transcap_msdu_link_ptr[HAL_RX_MAX_SAVED_RING_DESC];
+
+	struct hal_rx_mpdu_desc_info transcap_rx_mpdu_desc_info;
+	uint8_t curr_ring_desc_idx;
+
+	/* Sequence and fragments that are being processed currently */
+	uint32_t curr_seq_num;
+	uint32_t curr_frag_num;
+
 	uint32_t defrag_timeout_ms;
 	uint16_t dialogtoken;
 	uint16_t statuscode;
@@ -903,9 +922,7 @@ struct dp_peer {
 
 	struct {
 		enum htt_sec_type sec_type;
-#ifdef notyet /* TODO: See if this is required for defrag support */
 		u_int32_t michael_key[2]; /* relevant for TKIP */
-#endif
 	} security[2]; /* 0 -> multicast, 1 -> unicast */
 
 	/*

+ 407 - 21
dp/wifi3.0/hal_rx.h

@@ -357,7 +357,6 @@ static inline void hal_rx_mpdu_desc_info_get(void *desc_addr,
 		HAL_RX_MPDU_DESC_PEER_META_DATA_GET(mpdu_info);
 }
 
-
 /*
  * @ hal_rx_msdu_desc_info_get: Gets the flags related to MSDU desciptor.
  * @				  Specifically flags needed are:
@@ -1081,7 +1080,7 @@ hal_rx_msdu_get_keyid(uint8_t *buf)
 	return (keyid_octet >> 6) & 0x3;
 }
 
-#define HAL_RX_MSDU_START_RSSI_GET(_rx_msdu_start) 	\
+#define HAL_RX_MSDU_START_RSSI_GET(_rx_msdu_start)	\
 	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start,  \
 		RX_MSDU_START_5_USER_RSSI_OFFSET)),	\
 		RX_MSDU_START_5_USER_RSSI_MASK,		\
@@ -1107,10 +1106,10 @@ hal_rx_msdu_start_get_rssi(uint8_t *buf)
 
 }
 
-#define HAL_RX_MSDU_START_FREQ_GET(_rx_msdu_start) 		\
-	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start,  	\
+#define HAL_RX_MSDU_START_FREQ_GET(_rx_msdu_start)		\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start,		\
 		RX_MSDU_START_7_SW_PHY_META_DATA_OFFSET)),      \
-		RX_MSDU_START_7_SW_PHY_META_DATA_MASK,  	\
+		RX_MSDU_START_7_SW_PHY_META_DATA_MASK,		\
 		RX_MSDU_START_7_SW_PHY_META_DATA_LSB))
 
 /*
@@ -1135,10 +1134,10 @@ hal_rx_msdu_start_get_freq(uint8_t *buf)
 }
 
 
-#define HAL_RX_MSDU_START_PKT_TYPE_GET(_rx_msdu_start) 	\
+#define HAL_RX_MSDU_START_PKT_TYPE_GET(_rx_msdu_start)	\
 	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_msdu_start,  \
 		RX_MSDU_START_5_PKT_TYPE_OFFSET)),      \
-		RX_MSDU_START_5_PKT_TYPE_MASK,  	\
+		RX_MSDU_START_5_PKT_TYPE_MASK,		\
 		RX_MSDU_START_5_PKT_TYPE_LSB))
 
 /*
@@ -1161,10 +1160,10 @@ hal_rx_msdu_start_get_pkt_type(uint8_t *buf)
 	return pkt_type;
 }
 
-#define HAL_RX_MSDU_START_NSS_GET(_rx_msdu_start)      	\
+#define HAL_RX_MSDU_START_NSS_GET(_rx_msdu_start)	\
 	(_HAL_MS((*_OFFSET_TO_WORD_PTR((_rx_msdu_start),\
-	RX_MSDU_START_5_NSS_OFFSET)),          		\
-	RX_MSDU_START_5_NSS_MASK,              		\
+	RX_MSDU_START_5_NSS_OFFSET)),			\
+	RX_MSDU_START_5_NSS_MASK,			\
 	RX_MSDU_START_5_NSS_LSB))
 
 /*
@@ -1540,7 +1539,9 @@ hal_rx_mpdu_end_mic_err_get(uint8_t *buf)
 		RX_MSDU_LINK_8_RX_MSDU_DETAILS_MSDU_0_OFFSET))
 
 #define HAL_RX_NUM_MSDU_DESC 6
+#define HAL_RX_MAX_SAVED_RING_DESC 16
 
+/* TODO: rework the structure */
 struct hal_rx_msdu_list {
 	struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
 	uint32_t sw_cookie[HAL_RX_NUM_MSDU_DESC];
@@ -1552,24 +1553,28 @@ struct hal_buf_info {
 };
 
 /**
- * hal_rx_msdu_link_desc_get: API to get the MSDU information
+ * hal_rx_msdu_link_desc_get(): API to get the MSDU information
  * from the MSDU link descriptor
  *
- * @ msdu_link_desc: Opaque pointer used by HAL to get to the
+ * @msdu_link_desc: Opaque pointer used by HAL to get to the
  * MSDU link descriptor (struct rx_msdu_link)
- * @ msdu_list: Return the list of MSDUs contained in this link descriptor
+ *
+ * @msdu_list: Return the list of MSDUs contained in this link descriptor
+ *
+ * @num_msdus: Number of MSDUs in the MPDU
+ *
  * Return: void
  */
 static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
-			struct hal_rx_msdu_list *msdu_list, uint8_t *num_msdus)
+			struct hal_rx_msdu_list *msdu_list, uint8_t num_msdus)
 {
 	struct rx_msdu_details *msdu_details;
 	struct rx_msdu_desc_info *msdu_desc_info;
 	struct rx_msdu_link *msdu_link = (struct rx_msdu_link *)msdu_link_desc;
 	int i;
 
-	if (*num_msdus > HAL_RX_NUM_MSDU_DESC)
-		*num_msdus = HAL_RX_NUM_MSDU_DESC;
+	if (num_msdus > HAL_RX_NUM_MSDU_DESC)
+		num_msdus = HAL_RX_NUM_MSDU_DESC;
 
 	msdu_details = HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link);
 
@@ -1577,8 +1582,7 @@ static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
 		"[%s][%d] msdu_link=%p msdu_details=%p\n",
 		__func__, __LINE__, msdu_link, msdu_details);
 
-
-	for (i = 0; i < *num_msdus; i++) {
+	for (i = 0; i < num_msdus; i++) {
 		msdu_desc_info = HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[i]);
 		msdu_list->msdu_info[i].msdu_flags =
 			 HAL_RX_MSDU_FLAGS_GET(msdu_desc_info);
@@ -1592,7 +1596,6 @@ static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
 			"[%s][%d] i=%d sw_cookie=%d\n",
 			__func__, __LINE__, i, msdu_list->sw_cookie[i]);
 	}
-
 }
 
 /**
@@ -1919,7 +1922,7 @@ enum hal_rx_wbm_rxdma_push_reason {
 
 /**
  * hal_rx_dump_rx_attention_tlv: dump RX attention TLV in structured
- * 				 humman readable format.
+ *				 humman readable format.
  * @ rx_attn: pointer the rx_attention TLV in pkt.
  * @ dbg_level: log level.
  *
@@ -2527,6 +2530,389 @@ static inline uint8_t hal_srng_ring_id_get(void *hal_ring)
 	return ((struct hal_srng *)hal_ring)->ring_id;
 }
 
-#endif /* _HAL_RX_H */
+/* Rx MSDU link pointer info */
+struct hal_rx_msdu_link_ptr_info {
+	struct rx_msdu_link msdu_link;
+	struct hal_buf_info msdu_link_buf_info;
+};
+
+/**
+ * hal_rx_get_pkt_tlvs(): Function to retrieve pkt tlvs from nbuf
+ *
+ * @nbuf: Pointer to data buffer field
+ * Returns: pointer to rx_pkt_tlvs
+ */
+static inline
+struct rx_pkt_tlvs *hal_rx_get_pkt_tlvs(uint8_t *rx_buf_start)
+{
+	return (struct rx_pkt_tlvs *)rx_buf_start;
+}
+
+/**
+ * hal_rx_get_mpdu_info(): Function to retrieve mpdu info from pkt tlvs
+ *
+ * @pkt_tlvs: Pointer to pkt_tlvs
+ * Returns: pointer to rx_mpdu_info structure
+ */
+static inline
+struct rx_mpdu_info *hal_rx_get_mpdu_info(struct rx_pkt_tlvs *pkt_tlvs)
+{
+	return &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start.rx_mpdu_info_details;
+}
+
+/**
+ * hal_rx_get_rx_sequence(): Function to retrieve rx sequence number
+ *
+ * @nbuf: Network buffer
+ * Returns: rx sequence number
+ */
+#define DOT11_SEQ_FRAG_MASK		0x000f
+#define DOT11_FC1_MORE_FRAG_OFFSET	0x04
+
+#define HAL_RX_MPDU_GET_SEQUENCE_NUMBER(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+		RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_OFFSET)),	\
+		RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_MASK,	\
+		RX_MPDU_INFO_2_MPDU_SEQUENCE_NUMBER_LSB))
+static inline
+uint16_t hal_rx_get_rx_sequence(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf);
+	struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs);
+	uint16_t seq_number = 0;
+
+	seq_number =
+		HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) >> 4;
+
+	/* Skip first 4-bits for fragment number */
+	return seq_number;
+}
+
+/**
+ * hal_rx_get_rx_fragment_number(): Function to retrieve rx fragment number
+ *
+ * @nbuf: Network buffer
+ * Returns: rx fragment number
+ */
+static inline
+uint8_t hal_rx_get_rx_fragment_number(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf);
+	struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs);
+	uint8_t frag_number = 0;
+
+	frag_number = HAL_RX_MPDU_GET_SEQUENCE_NUMBER(rx_mpdu_info) &
+		DOT11_SEQ_FRAG_MASK;
+
+	/* Return first 4 bits as fragment number */
+	return frag_number;
+}
+
+#define HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+		RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_OFFSET)),	\
+		RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_MASK,	\
+		RX_MPDU_INFO_14_MPDU_FRAME_CONTROL_FIELD_LSB))
+/**
+ * hal_rx_get_rx_more_frag_bit(): Function to retrieve more fragment bit
+ *
+ * @nbuf: Network buffer
+ * Returns: rx more fragment bit
+ */
+static inline
+uint8_t hal_rx_get_rx_more_frag_bit(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf);
+	struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs);
+	uint16_t frame_ctrl = 0;
+
+	frame_ctrl = HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(rx_mpdu_info) >>
+		DOT11_FC1_MORE_FRAG_OFFSET;
+
+	/* more fragment bit if at offset bit 4 */
+	return frame_ctrl;
+}
+
+/**
+ * hal_rx_get_frame_ctrl_field(): Function to retrieve frame control field
+ *
+ * @nbuf: Network buffer
+ * Returns: rx more fragment bit
+ *
+ */
+static inline
+uint8_t hal_rx_get_frame_ctrl_field(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf);
+	struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs);
+	uint16_t frame_ctrl = 0;
+
+	frame_ctrl = HAL_RX_MPDU_GET_FRAME_CONTROL_FIELD(rx_mpdu_info);
+
+	return frame_ctrl;
+}
+
+/*
+ * hal_rx_msdu_is_wlan_mcast(): Check if the buffer is for multicast address
+ *
+ * @nbuf: Network buffer
+ * Returns: flag to indicate whether the nbuf has MC/BC address
+ */
+static inline
+uint32_t hal_rx_msdu_is_wlan_mcast(qdf_nbuf_t nbuf)
+{
+	uint8 *buf = qdf_nbuf_data(nbuf);
+
+	struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
+	struct rx_attention *rx_attn = &pkt_tlvs->attn_tlv.rx_attn;
+
+	return rx_attn->mcast_bcast;
+}
+
+#define HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+		RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_OFFSET)),	\
+		RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_MASK,	\
+		RX_MPDU_INFO_2_MPDU_SEQUENCE_CONTROL_VALID_LSB))
+/*
+ * hal_rx_get_mpdu_sequence_control_valid(): Get mpdu sequence control valid
+ *
+ * @nbuf: Network buffer
+ * Returns: value of sequence control valid field
+ */
+static inline
+uint8_t hal_rx_get_mpdu_sequence_control_valid(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf);
+	struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs);
+	uint8_t seq_ctrl_valid = 0;
+
+	seq_ctrl_valid =
+		HAL_RX_MPDU_GET_SEQUENCE_CONTROL_VALID(rx_mpdu_info);
+
+	return seq_ctrl_valid;
+}
+
+#define HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+		RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_OFFSET)),	\
+		RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_MASK,	\
+		RX_MPDU_INFO_2_MPDU_FRAME_CONTROL_VALID_LSB))
+/*
+ * hal_rx_get_mpdu_frame_control_valid(): Retrieves mpdu frame control valid
+ *
+ * @nbuf: Network buffer
+ * Returns: value of frame control valid field
+ */
+static inline
+uint8_t hal_rx_get_mpdu_frame_control_valid(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = hal_rx_get_pkt_tlvs(buf);
+	struct rx_mpdu_info *rx_mpdu_info = hal_rx_get_mpdu_info(pkt_tlvs);
+	uint8_t frm_ctrl_valid = 0;
+
+	frm_ctrl_valid =
+		HAL_RX_MPDU_GET_FRAME_CONTROL_VALID(rx_mpdu_info);
+
+	return frm_ctrl_valid;
+}
+
+/*
+ * hal_rx_clear_mpdu_desc_info(): Clears mpdu_desc_info
+ *
+ * @rx_mpdu_desc_info: HAL view of rx mpdu desc info
+ * Returns: None
+ */
+static inline
+void hal_rx_clear_mpdu_desc_info(
+		struct hal_rx_mpdu_desc_info *rx_mpdu_desc_info)
+{
+	qdf_mem_zero(rx_mpdu_desc_info,
+		sizeof(*rx_mpdu_desc_info));
+}
+
+/*
+ * hal_rx_clear_msdu_link_ptr(): Clears msdu_link_ptr
+ *
+ * @msdu_link_ptr: HAL view of msdu link ptr
+ * @size: number of msdu link pointers
+ * Returns: None
+ */
+static inline
+void hal_rx_clear_msdu_link_ptr(struct hal_rx_msdu_link_ptr_info *msdu_link_ptr,
+				int size)
+{
+	qdf_mem_zero(msdu_link_ptr,
+		(sizeof(*msdu_link_ptr) * size));
+}
 
+/*
+ * hal_rx_chain_msdu_links() - Chains msdu link pointers
+ * @msdu_link_ptr: msdu link pointer
+ * @mpdu_desc_info: mpdu descriptor info
+ *
+ * Build a list of msdus using msdu link pointer. If the
+ * number of msdus are more, chain them together
+ *
+ * Returns: Number of processed msdus
+ */
+static inline
+int hal_rx_chain_msdu_links(qdf_nbuf_t msdu,
+	struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info,
+	struct hal_rx_mpdu_desc_info *mpdu_desc_info)
+{
+	int j;
+	struct rx_msdu_link *msdu_link_ptr =
+		&msdu_link_ptr_info->msdu_link;
+	struct rx_msdu_link *prev_msdu_link_ptr = NULL;
+	struct rx_msdu_details *msdu_details =
+		HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link_ptr);
+	uint8_t num_msdus = mpdu_desc_info->msdu_count;
+	struct rx_msdu_desc_info *msdu_desc_info;
+	uint8_t fragno, more_frag;
+	uint8_t *rx_desc_info;
+	struct hal_rx_msdu_list msdu_list;
+
+	for (j = 0; j < num_msdus; j++) {
+		msdu_desc_info =
+			HAL_RX_MSDU_DESC_INFO_GET(&msdu_details[j]);
+		msdu_list.msdu_info[j].msdu_flags =
+			HAL_RX_MSDU_FLAGS_GET(msdu_desc_info);
+		msdu_list.msdu_info[j].msdu_len =
+			HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info);
+		msdu_list.sw_cookie[j] = HAL_RX_BUF_COOKIE_GET(
+				&msdu_details[j].buffer_addr_info_details);
+	}
+
+	/* Chain msdu links together */
+	if (prev_msdu_link_ptr) {
+		/* 31-0 bits of the physical address */
+		prev_msdu_link_ptr->
+			next_msdu_link_desc_addr_info.buffer_addr_31_0 =
+			msdu_link_ptr_info->msdu_link_buf_info.paddr &
+			BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK;
+		/* 39-32 bits of the physical address */
+		prev_msdu_link_ptr->
+			next_msdu_link_desc_addr_info.buffer_addr_39_32
+			= ((msdu_link_ptr_info->msdu_link_buf_info.paddr
+						>> 32) &&
+				BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK);
+		prev_msdu_link_ptr->
+			next_msdu_link_desc_addr_info.sw_buffer_cookie =
+			msdu_link_ptr_info->msdu_link_buf_info.sw_cookie;
+	}
 
+	/* There is space for only 6 MSDUs in a MSDU link descriptor */
+	if (num_msdus < HAL_RX_NUM_MSDU_DESC) {
+		/* mark first and last MSDUs */
+		rx_desc_info = qdf_nbuf_data(msdu);
+		fragno = hal_rx_get_rx_fragment_number(rx_desc_info);
+		more_frag = hal_rx_get_rx_more_frag_bit(rx_desc_info);
+
+		/* TODO: create skb->fragslist[] */
+
+		if (more_frag == 0) {
+			msdu_list.msdu_info[num_msdus].msdu_flags |=
+				RX_MSDU_DESC_INFO_0_LAST_MSDU_IN_MPDU_FLAG_MASK;
+		} else if (fragno == 1) {
+			msdu_list.msdu_info[num_msdus].msdu_flags |=
+			RX_MSDU_DESC_INFO_0_FIRST_MSDU_IN_MPDU_FLAG_MASK;
+
+			msdu_list.msdu_info[num_msdus].msdu_flags |=
+				RX_MSDU_DESC_INFO_0_MSDU_CONTINUATION_MASK;
+		}
+
+		num_msdus++;
+
+		/* Number of MSDUs per mpdu descriptor is updated */
+		mpdu_desc_info->msdu_count += num_msdus;
+	} else {
+		num_msdus = 0;
+		prev_msdu_link_ptr = msdu_link_ptr;
+	}
+
+	return num_msdus;
+}
+
+/*
+ * hal_rx_defrag_update_src_ring_desc(): updates reo src ring desc
+ *
+ * @ring_desc: HAL view of ring descriptor
+ * @mpdu_des_info: saved mpdu desc info
+ * @msdu_link_ptr: saved msdu link ptr
+ *
+ * API used explicitely for rx defrag to update ring desc with
+ * mpdu desc info and msdu link ptr before reinjecting the
+ * packet back to REO
+ *
+ * Returns: None
+ */
+static inline
+void hal_rx_defrag_update_src_ring_desc(void *ring_desc,
+	void *saved_mpdu_desc_info,
+	struct hal_rx_msdu_link_ptr_info *saved_msdu_link_ptr)
+{
+	struct reo_entrance_ring *reo_ent_ring;
+	struct rx_mpdu_desc_info *reo_ring_mpdu_desc_info;
+	struct hal_buf_info buf_info;
+
+	reo_ent_ring = (struct reo_entrance_ring *)ring_desc;
+	reo_ring_mpdu_desc_info = &reo_ent_ring->
+		reo_level_mpdu_frame_info.rx_mpdu_desc_info_details;
+
+	qdf_mem_copy(&reo_ring_mpdu_desc_info, saved_mpdu_desc_info,
+		sizeof(*reo_ring_mpdu_desc_info));
+
+	/*
+	 * TODO: Check for additional fields that need configuration in
+	 * reo_ring_mpdu_desc_info
+	 */
+
+	/* Update msdu_link_ptr in the reo entrance ring */
+	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
+	buf_info.paddr = saved_msdu_link_ptr->msdu_link_buf_info.paddr;
+	buf_info.sw_cookie =
+		saved_msdu_link_ptr->msdu_link_buf_info.sw_cookie;
+}
+
+/*
+ * hal_rx_defrag_save_info_from_ring_desc(): Saves info from ring desc
+ *
+ * @msdu_link_desc_va: msdu link descriptor handle
+ * @msdu_link_ptr_info: HAL view of msdu link pointer info
+ *
+ * API used to save msdu link information along with physical
+ * address. The API also copues the sw cookie.
+ *
+ * Returns: None
+ */
+static inline
+void hal_rx_defrag_save_info_from_ring_desc(void *msdu_link_desc_va,
+	struct hal_rx_msdu_link_ptr_info *msdu_link_ptr_info,
+	struct hal_buf_info *hbi)
+{
+	struct rx_msdu_link *msdu_link_ptr =
+		(struct rx_msdu_link *)msdu_link_desc_va;
+
+	qdf_mem_copy(&msdu_link_ptr_info->msdu_link, msdu_link_ptr,
+		sizeof(struct rx_msdu_link));
+
+	msdu_link_ptr_info->msdu_link_buf_info.paddr = hbi->paddr;
+	msdu_link_ptr_info->msdu_link_buf_info.sw_cookie = hbi->sw_cookie;
+}
+
+/*
+ * hal_rx_get_desc_len(): Returns rx descriptor length
+ *
+ * Returns the size of rx_pkt_tlvs which follows the
+ * data in the nbuf
+ *
+ * Returns: Length of rx descriptor
+ */
+static inline
+uint16_t hal_rx_get_desc_len(void)
+{
+	return sizeof(struct rx_pkt_tlvs);
+}
+
+#endif /* _HAL_RX_H */

+ 2 - 1
hal/wifi3.0/hal_api.h

@@ -947,4 +947,5 @@ static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
  */
 extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
 	struct hal_srng_params *ring_params);
-#endif /* _HAL_API_H_ */
+
+#endif /* _HAL_APIH_ */

+ 3 - 0
hal/wifi3.0/hal_internal.h

@@ -33,6 +33,7 @@
 #include "qdf_types.h"
 #include "qdf_lock.h"
 #include "qdf_mem.h"
+#include "qdf_nbuf.h"
 #include "wcss_seq_hwiobase.h"
 #include "tlv_hdr.h"
 #include "tlv_tag_def.h"
@@ -62,9 +63,11 @@
 #include "rx_ppdu_start_user_info.h"
 #include "rx_ppdu_end_user_stats.h"
 #include "rx_ppdu_end_user_stats_ext.h"
+#include "rx_mpdu_desc_info.h"
 #include "tx_msdu_extension.h"
 #include "wcss_version.h"
 #include "pld_common.h"
+#include "rx_msdu_link.h"
 
 /* TBD: This should be movded to shared HW header file */
 enum hal_srng_ring_id {

+ 8 - 8
hif/src/ce/ce_main.c

@@ -1113,7 +1113,7 @@ void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
 		HIF_INFO("%s, srng rings do not support fastpath", __func__);
 		return;
 	}
-	HIF_INFO("%s, Enabling fastpath mode", __func__);
+	HIF_DBG("%s, Enabling fastpath mode", __func__);
 	scn->fastpath_mode_on = true;
 }
 
@@ -1172,7 +1172,7 @@ void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
 		return;
 
 	if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
-		HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
+		HIF_DBG("%s %d Fastpath mode ON, Cleaning up HTT Tx CE",
 			 __func__, __LINE__);
 		sw_index = src_ring->sw_index;
 		write_index = src_ring->sw_index;
@@ -1666,7 +1666,7 @@ static int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
 		attr = hif_state->host_ce_config[pipe_num];
 		if (attr.src_nentries) {
 			/* pipe used to send to target */
-			HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
+			HIF_DBG("%s: pipe_num:%d pipe_info:0x%p",
 					 __func__, pipe_num, pipe_info);
 			ce_send_cb_register(pipe_info->ce_hdl,
 					    hif_pci_ce_send_done, pipe_info,
@@ -2499,17 +2499,17 @@ int hif_config_ce(struct hif_softc *scn)
 	}
 	scn->athdiag_procfs_inited = true;
 
-	HIF_INFO_MED("%s: ce_init done", __func__);
+	HIF_DBG("%s: ce_init done", __func__);
 
 	init_tasklet_workers(hif_hdl);
 	hif_fake_apps_init_ctx(scn);
 
-	HIF_TRACE("%s: X, ret = %d", __func__, rv);
+	HIF_DBG("%s: X, ret = %d", __func__, rv);
 
 #ifdef ADRASTEA_SHADOW_REGISTERS
-	HIF_INFO("%s, Using Shadow Registers instead of CE Registers", __func__);
+	HIF_DBG("%s, Using Shadow Registers instead of CE Registers", __func__);
 	for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
-		HIF_INFO("%s Shadow Register%d is mapped to address %x",
+		HIF_DBG("%s Shadow Register%d is mapped to address %x",
 			  __func__, i,
 			  (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
 	}
@@ -3010,7 +3010,7 @@ static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
 	low_paddr  = BITS0_TO_31(paddr_rri_on_ddr);
 	high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
 
-	HIF_INFO("%s using srri and drri from DDR", __func__);
+	HIF_DBG("%s using srri and drri from DDR", __func__);
 
 	WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
 	WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);

+ 1 - 1
hif/src/dispatcher/dummy.c

@@ -39,7 +39,7 @@
  */
 void hif_dummy_bus_prevent_linkdown(struct hif_softc *scn, bool flag)
 {
-	HIF_ERROR("wlan: %s pcie power collapse ignored",
+	HIF_DBG("wlan: %s pcie power collapse ignored",
 			(flag ? "disable" : "enable"));
 }
 

+ 2 - 2
hif/src/hif_main.c

@@ -544,7 +544,7 @@ QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
 
 	scn->hif_init_done = true;
 
-	HIF_TRACE("%s: OK", __func__);
+	HIF_DBG("%s: OK", __func__);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -568,7 +568,7 @@ void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
 
 	scn->notice_send = false;
 
-	HIF_INFO("%s: X", __func__);
+	HIF_DBG("%s: X", __func__);
 }
 
 void hif_display_stats(struct hif_opaque_softc *hif_ctx)

+ 14 - 14
hif/src/hif_napi.c

@@ -140,7 +140,7 @@ int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
 			goto hnc_err;
 		}
 
-		HIF_INFO("%s: NAPI structures initialized, rc=%d",
+		HIF_DBG("%s: NAPI structures initialized, rc=%d",
 			 __func__, rc);
 	}
 	for (i = 0; i < hif->ce_count; i++) {
@@ -186,7 +186,7 @@ int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
 		 * protection as there should be no-one around yet
 		 */
 		napid->ce_map |= (0x01 << i);
-		HIF_INFO("%s: NAPI id %d created for pipe %d", __func__,
+		HIF_DBG("%s: NAPI id %d created for pipe %d", __func__,
 			 napii->id, i);
 	}
 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
@@ -242,7 +242,7 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
 			if (force) {
 				napi_disable(&(napii->napi));
-				HIF_INFO("%s: NAPI entry %d force disabled",
+				HIF_DBG("%s: NAPI entry %d force disabled",
 					 __func__, id);
 				NAPI_DEBUG("NAPI %d force disabled", id);
 			} else {
@@ -265,7 +265,7 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
 
 			napid->ce_map &= ~(0x01 << ce);
 			napii->scale  = 0;
-			HIF_INFO("%s: NAPI %d destroyed\n", __func__, id);
+			HIF_DBG("%s: NAPI %d destroyed\n", __func__, id);
 
 			/* if there are no active instances and
 			 * if they are all destroyed,
@@ -278,7 +278,7 @@ int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
 				qdf_spinlock_destroy(&(napid->lock));
 				memset(napid,
 				       0, sizeof(struct qca_napi_data));
-				HIF_INFO("%s: no NAPI instances. Zapped.",
+				HIF_DBG("%s: no NAPI instances. Zapped.",
 					 __func__);
 			}
 		}
@@ -323,7 +323,7 @@ int hif_napi_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
 				napii = &(napid->napis[i]);
 				napii->lro_flush_cb = lro_flush_handler;
 				napii->lro_ctx = data;
-				HIF_ERROR("Registering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
+				HIF_DBG("Registering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
 					i, napii->id, napii->lro_flush_cb,
 					napii->lro_ctx);
 				rc++;
@@ -359,7 +359,7 @@ void hif_napi_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl,
 			ce_state = scn->ce_id_to_state[i];
 			if ((ce_state != NULL) && (ce_state->htt_rx_data)) {
 				napii = &(napid->napis[i]);
-				HIF_ERROR("deRegistering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
+				HIF_DBG("deRegistering LRO for ce_id %d NAPI callback for %d flush_cb %p, lro_data %p\n",
 					i, napii->id, napii->lro_flush_cb,
 					napii->lro_ctx);
 				qdf_spin_lock_bh(&napii->lro_unloading_lock);
@@ -492,25 +492,25 @@ int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
 	case NAPI_EVT_INT_STATE: {
 		int on = (data != ((void *)0));
 
-		HIF_INFO("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
+		HIF_DBG("%s: recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
 			 __func__, event,
 			 on, prev_state);
 		if (on)
 			if (prev_state & HIF_NAPI_CONF_UP) {
-				HIF_INFO("%s: duplicate NAPI conf ON msg",
+				HIF_DBG("%s: duplicate NAPI conf ON msg",
 					 __func__);
 			} else {
-				HIF_INFO("%s: setting state to ON",
+				HIF_DBG("%s: setting state to ON",
 					 __func__);
 				napid->state |= HIF_NAPI_CONF_UP;
 			}
 		else /* off request */
 			if (prev_state & HIF_NAPI_CONF_UP) {
-				HIF_INFO("%s: setting state to OFF",
+				HIF_DBG("%s: setting state to OFF",
 				 __func__);
 				napid->state &= ~HIF_NAPI_CONF_UP;
 			} else {
-				HIF_INFO("%s: duplicate NAPI conf OFF msg",
+				HIF_DBG("%s: duplicate NAPI conf OFF msg",
 					 __func__);
 			}
 		break;
@@ -640,7 +640,7 @@ int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
 				}
 		}
 	} else {
-		HIF_INFO("%s: no change in hif napi state (still %d)",
+		HIF_DBG("%s: no change in hif napi state (still %d)",
 			 __func__, prev_state);
 	}
 
@@ -1520,7 +1520,7 @@ static inline void hif_napi_bl_irq(struct qca_napi_data *napid, bool bl_flag)
 		else
 			irq_modify_status(napid->napis[i].irq,
 					  IRQ_NO_BALANCING, 0);
-		HIF_INFO("%s: bl_flag %d CE %d", __func__, bl_flag, i);
+		HIF_DBG("%s: bl_flag %d CE %d", __func__, bl_flag, i);
 	}
 }
 

+ 2 - 2
hif/src/pcie/if_pci.c

@@ -2723,7 +2723,7 @@ static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  */
 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
 {
-	HIF_ERROR("wlan: %s pcie power collapse",
+	HIF_DBG("wlan: %s pcie power collapse",
 			(flag ? "disable" : "enable"));
 	hif_runtime_prevent_linkdown(scn, flag);
 	pld_wlan_pm_control(scn->qdf_dev->dev, flag);
@@ -2731,7 +2731,7 @@ void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
 #else
 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
 {
-	HIF_ERROR("wlan: %s pcie power collapse",
+	HIF_DBG("wlan: %s pcie power collapse",
 			(flag ? "disable" : "enable"));
 	hif_runtime_prevent_linkdown(scn, flag);
 }

+ 1 - 1
hif/src/snoc/if_snoc.c

@@ -289,7 +289,7 @@ QDF_STATUS hif_snoc_enable_bus(struct hif_softc *ol_sc,
 	/* the bus should remain on durring suspend for snoc */
 	hif_vote_link_up(GET_HIF_OPAQUE_HDL(ol_sc));
 
-	HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
+	HIF_DBG("%s: X - hif_type = 0x%x, target_type = 0x%x",
 		  __func__, hif_type, target_type);
 
 	return QDF_STATUS_SUCCESS;

File diff suppressed because it is too large
+ 602 - 377
os_if/linux/qca_vendor.h


+ 6 - 1
os_if/linux/scan/src/wlan_cfg80211_scan.c

@@ -34,7 +34,9 @@
 #include <wlan_cfg80211_scan.h>
 #include <qdf_mem.h>
 #include <wlan_utility.h>
+#ifdef WLAN_POLICY_MGR_ENABLE
 #include <wlan_policy_mgr_api.h>
+#endif
 
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
 static uint32_t hdd_config_sched_scan_start_delay(
@@ -973,17 +975,19 @@ int wlan_cfg80211_scan(struct wlan_objmgr_pdev *pdev,
 	if (request->n_channels) {
 		char chl[(request->n_channels * 5) + 1];
 		int len = 0;
+#ifdef WLAN_POLICY_MGR_ENABLE
 		bool ap_or_go_present =
 			policy_mgr_mode_specific_connection_count(
 			     psoc, QDF_SAP_MODE, NULL) ||
 			     policy_mgr_mode_specific_connection_count(
 			     psoc, QDF_P2P_GO_MODE, NULL);
+#endif
 
 		for (i = 0; i < request->n_channels; i++) {
 			channel = request->channels[i]->hw_value;
 			if (wlan_is_dsrc_channel(wlan_chan_to_freq(channel)))
 				continue;
-
+#ifdef WLAN_POLICY_MGR_ENABLE
 			if (ap_or_go_present) {
 				bool ok;
 				int ret;
@@ -1001,6 +1005,7 @@ int wlan_cfg80211_scan(struct wlan_objmgr_pdev *pdev,
 				if (!ok)
 					continue;
 			}
+#endif
 
 			len += snprintf(chl + len, 5, "%d ", channel);
 			req->scan_req.chan_list[i] = wlan_chan_to_freq(channel);

+ 15 - 0
qdf/inc/qdf_defer.h

@@ -140,6 +140,21 @@ static inline qdf_workqueue_t *qdf_create_workqueue(char *name)
 	return  __qdf_create_workqueue(name);
 }
 
+/**
+ * qdf_create_singlethread_workqueue() - create a single threaded workqueue
+ * @name: string
+ *
+ * This API creates a dedicated work queue with a single worker thread to avoid
+ * wasting unnecessary resources when works which needs to be submitted in this
+ * queue are not very critical and frequent.
+ *
+ * Return: pointer of type qdf_workqueue_t
+ */
+static inline qdf_workqueue_t *qdf_create_singlethread_workqueue(char *name)
+{
+	return  __qdf_create_singlethread_workqueue(name);
+}
+
 /**
  * qdf_queue_work - Queue the work/task
  * @hdl: OS handle

+ 1 - 0
qdf/inc/qdf_status.h

@@ -139,6 +139,7 @@ typedef enum {
 	QDF_STATUS_CRYPTO_MIC_FAILURE,
 	QDF_STATUS_CRYPTO_ENCRYPT_FAILED,
 	QDF_STATUS_CRYPTO_DECRYPT_FAILED,
+	QDF_STATUS_E_DEFRAG_ERROR,
 	QDF_STATUS_MAX
 } QDF_STATUS;
 

+ 16 - 1
qdf/linux/src/i_qdf_defer.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -308,6 +308,21 @@ static inline __qdf_workqueue_t *__qdf_create_workqueue(char *name)
 	return create_workqueue(name);
 }
 
+/**
+ * __qdf_create_singlethread_workqueue() - create a single threaded workqueue
+ * @name: string
+ *
+ * This API creates a dedicated work queue with a single worker thread to avoid
+ * wasting unnecessary resources when works which needs to be submitted in this
+ * queue are not very critical and frequent.
+ *
+ * Return: pointer of type qdf_workqueue_t
+ */
+static inline __qdf_workqueue_t *__qdf_create_singlethread_workqueue(char *name)
+{
+	return create_singlethread_workqueue(name);
+}
+
 /**
  * __qdf_flush_workqueue - flush the workqueue
  * @hdl: OS handle

+ 1 - 1
umac/cmn_services/policy_mgr/inc/wlan_policy_mgr_public_struct.h

@@ -35,7 +35,7 @@
  */
 
 /* Include files */
-#include "wmi_unified.h"
+#include <wmi_unified_api.h>
 
 #define POLICY_MGR_MAX_CHANNEL_LIST 128
 

+ 22 - 15
umac/cmn_services/policy_mgr/src/wlan_policy_mgr_init_deinit.c

@@ -318,7 +318,21 @@ QDF_STATUS policy_mgr_deinit(void)
 
 QDF_STATUS policy_mgr_psoc_open(struct wlan_objmgr_psoc *psoc)
 {
-	/* placeholder for now */
+	struct policy_mgr_psoc_priv_obj *pm_ctx;
+
+	pm_ctx = policy_mgr_get_context(psoc);
+	if (!pm_ctx) {
+		policy_mgr_err("Invalid Context");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create(
+		&pm_ctx->qdf_conc_list_lock))) {
+		policy_mgr_err("Failed to init qdf_conc_list_lock");
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -332,6 +346,13 @@ QDF_STATUS policy_mgr_psoc_close(struct wlan_objmgr_psoc *psoc)
 		return QDF_STATUS_E_FAILURE;
 	}
 
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_destroy(
+		&pm_ctx->qdf_conc_list_lock))) {
+		policy_mgr_err("Failed to destroy qdf_conc_list_lock");
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+
 	if (pm_ctx->hw_mode.hw_mode_list) {
 		qdf_mem_free(pm_ctx->hw_mode.hw_mode_list);
 		pm_ctx->hw_mode.hw_mode_list = NULL;
@@ -374,13 +395,6 @@ QDF_STATUS policy_mgr_psoc_enable(struct wlan_objmgr_psoc *psoc)
 		return status;
 	}
 
-	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create(
-		&pm_ctx->qdf_conc_list_lock))) {
-		policy_mgr_err("Failed to init qdf_conc_list_lock");
-		QDF_ASSERT(0);
-		return status;
-	}
-
 	pm_ctx->do_hw_mode_change = false;
 	pm_ctx->hw_mode_change_in_progress = POLICY_MGR_HW_MODE_NOT_IN_PROGRESS;
 	/* reset sap mandatory channels */
@@ -440,13 +454,6 @@ QDF_STATUS policy_mgr_psoc_disable(struct wlan_objmgr_psoc *psoc)
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_destroy(
-		&pm_ctx->qdf_conc_list_lock))) {
-		policy_mgr_err("Failed to destroy qdf_conc_list_lock");
-		status = QDF_STATUS_E_FAILURE;
-		QDF_ASSERT(0);
-	}
-
 	/* destroy connection_update_done_evt */
 	if (!QDF_IS_STATUS_SUCCESS(qdf_event_destroy
 		(&pm_ctx->connection_update_done_evt))) {

+ 75 - 54
umac/regulatory/core/src/reg_db.c

@@ -312,7 +312,7 @@ enum reg_domain {
 	WORLD_6C = 0x6C,
 };
 
-static const struct country_code_to_reg_domain g_all_countries[] = {
+const struct country_code_to_reg_domain g_all_countries[] = {
 	{CTRY_AFGHANISTAN, ETSI1_WORLD, "AF", "AF" , 40, 160, 0},
 	{CTRY_ALBANIA, ETSI1_WORLD, "AL", "AL", 40, 160, 0},
 	{CTRY_ALGERIA, APL13_WORLD, "DZ", "DZ", 40, 160, 0},
@@ -571,7 +571,7 @@ enum reg_domains_5g {
 };
 
 
-static const struct reg_domain_pair g_reg_dmn_pairs[] = {
+const struct reg_domain_pair g_reg_dmn_pairs[] = {
 	{NULL1_WORLD, NULL1, WORLD},
 
 	{FCC1_FCCA, FCC1, FCCA},
@@ -623,18 +623,18 @@ static const struct reg_domain_pair g_reg_dmn_pairs[] = {
 	{MKK11_FCCA, MKK11, FCCA},
 	{MKK11_MKKC, MKK11, MKKC},
 
-	{WORLD_60, WORLD_2G_3, WORLD_5G_2},
-	{WORLD_61, WORLD_2G_3, WORLD_5G_2},
-	{WORLD_62, WORLD_2G_3, WORLD_5G_2},
-	{WORLD_63, WORLD_2G_2, WORLD_5G_1},
-	{WORLD_65, WORLD_2G_2, WORLD_5G_1},
-	{WORLD_64, WORLD_2G_1, WORLD_5G_1},
-	{WORLD_66, WORLD_2G_1, WORLD_5G_2},
-	{WORLD_69, WORLD_2G_1, WORLD_5G_2},
-	{WORLD_67, WORLD_2G_2, WORLD_5G_2},
-	{WORLD_68, WORLD_2G_2, WORLD_5G_2},
-	{WORLD_6A, WORLD_2G_2, WORLD_5G_2},
-	{WORLD_6C, WORLD_2G_2, WORLD_5G_2},
+	{WORLD_60, WORLD_5G_2, WORLD_2G_3},
+	{WORLD_61, WORLD_5G_2, WORLD_2G_3},
+	{WORLD_62, WORLD_5G_2, WORLD_2G_3},
+	{WORLD_63, WORLD_5G_1, WORLD_2G_2},
+	{WORLD_65, WORLD_5G_1, WORLD_2G_2},
+	{WORLD_64, WORLD_5G_1, WORLD_2G_1},
+	{WORLD_66, WORLD_5G_2, WORLD_2G_1},
+	{WORLD_69, WORLD_5G_2, WORLD_2G_1},
+	{WORLD_67, WORLD_5G_2, WORLD_2G_2},
+	{WORLD_68, WORLD_5G_2, WORLD_2G_2},
+	{WORLD_6A, WORLD_5G_2, WORLD_2G_2},
+	{WORLD_6C, WORLD_5G_2, WORLD_2G_2},
 };
 
 enum reg_rules_2g {
@@ -648,7 +648,7 @@ enum reg_rules_2g {
 	CHAN_14_2,
 };
 
-static const struct regulatory_rule reg_rules_2g[] = {
+const struct regulatory_rule reg_rules_2g[] = {
 
 	[CHAN_1_11_1] = {2402, 2472, 40, 30, 0},
 	[CHAN_1_11_2] = {2402, 2472, 40, 20, 0},
@@ -661,17 +661,17 @@ static const struct regulatory_rule reg_rules_2g[] = {
 };
 
 
-static const struct regdomain regdomains_2g[] = {
+const struct regdomain regdomains_2g[] = {
 
-	[FCCA] = {CTL_FCC, DFS_UNINIT_REG, 0, 1, {CHAN_1_11_1} },
-	[WORLD] = {CTL_ETSI, DFS_UNINIT_REG, 0, 1, {CHAN_1_13_1} },
-	[MKKA] = {CTL_MKK, DFS_UNINIT_REG, 0, 2, {CHAN_1_13_1, CHAN_14_1} },
-	[MKKC] = {CTL_MKK, DFS_UNINIT_REG, 0, 1, {CHAN_1_13_1} },
-	[ETSIC] = {CTL_ETSI, DFS_UNINIT_REG, 0, 1, {CHAN_1_13_2} },
-	[WORLD_2G_1] = {CTL_FCC, DFS_UNINIT_REG, 0, 1, {CHAN_1_11_2} },
-	[WORLD_2G_2] = {CTL_FCC, DFS_UNINIT_REG, 0, 2,
+	[FCCA] = {CTL_FCC, DFS_UNINIT_REG, 0, 6, 1, {CHAN_1_11_1} },
+	[WORLD] = {CTL_ETSI, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_1} },
+	[MKKA] = {CTL_MKK, DFS_UNINIT_REG, 0, 0, 2, {CHAN_1_13_1, CHAN_14_1} },
+	[MKKC] = {CTL_MKK, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_1} },
+	[ETSIC] = {CTL_ETSI, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_13_2} },
+	[WORLD_2G_1] = {CTL_FCC, DFS_UNINIT_REG, 0, 0, 1, {CHAN_1_11_2} },
+	[WORLD_2G_2] = {CTL_FCC, DFS_UNINIT_REG, 0, 0, 2,
 			{CHAN_1_11_2, CHAN_12_13_1} },
-	[WORLD_2G_3] = {CTL_FCC, DFS_UNINIT_REG, 0, 3,
+	[WORLD_2G_3] = {CTL_FCC, DFS_UNINIT_REG, 0, 0, 3,
 			{CHAN_1_11_2, CHAN_12_13_1, CHAN_14_2} },
 };
 
@@ -709,7 +709,7 @@ enum reg_rules_5g {
 	CHAN_5735_5775_1,
 };
 
-static const struct regulatory_rule reg_rules_5g[] = {
+const struct regulatory_rule reg_rules_5g[] = {
 
 	[CHAN_4910_4990_1] = {4910, 4990, 20, 20, 0},
 	[CHAN_4940_4990_1] = {4940, 4990, 20, 33, 0},
@@ -743,117 +743,138 @@ static const struct regulatory_rule reg_rules_5g[] = {
 };
 
 
-static const struct regdomain regdomains_5g[] = {
+const struct regdomain regdomains_5g[] = {
 
-	[FCC1] = {CTL_FCC, DFS_FCC_REG, 2, 3, {CHAN_5170_5250_1,
+	[FCC1] = {CTL_FCC, DFS_FCC_REG, 2, 6, 3, {CHAN_5170_5250_1,
 					      CHAN_5250_5330_1,
 					      CHAN_5735_5835_1} },
 
-	[FCC2] = {CTL_FCC, DFS_CN_REG, 2, 3, {CHAN_5170_5250_2,
+	[FCC2] = {CTL_FCC, DFS_CN_REG, 2, 6, 3, {CHAN_5170_5250_2,
 					     CHAN_5250_5330_1,
 					     CHAN_5735_5835_1} },
 
-	[FCC3] = {CTL_FCC, DFS_FCC_REG, 2, 4, {CHAN_5170_5250_2,
+	[FCC3] = {CTL_FCC, DFS_FCC_REG, 2, 6, 4, {CHAN_5170_5250_2,
 					      CHAN_5250_5330_1,
 					      CHAN_5490_5730_1,
 					      CHAN_5735_5835_1} },
 
-	[FCC4] = {CTL_FCC, DFS_FCC_REG, 2, 4, {CHAN_4940_4990_1,
+	[FCC4] = {CTL_FCC, DFS_FCC_REG, 2, 6, 4, {CHAN_4940_4990_1,
 					      CHAN_5170_5250_1,
 					      CHAN_5250_5330_1,
 					      CHAN_5735_5835_1} },
 
-	[FCC6] = {CTL_FCC, DFS_FCC_REG, 2, 5, {CHAN_5170_5250_2,
+	[FCC6] = {CTL_FCC, DFS_FCC_REG, 2, 6, 5, {CHAN_5170_5250_2,
 					      CHAN_5250_5330_1,
 					      CHAN_5490_5590_1,
 					      CHAN_5650_5730_1,
 					      CHAN_5735_5835_1} },
 
-	[FCC8] = {CTL_FCC, DFS_FCC_REG, 2, 4, {CHAN_5170_5250_4,
+	[FCC8] = {CTL_FCC, DFS_FCC_REG, 2, 6, 4, {CHAN_5170_5250_4,
 					      CHAN_5250_5330_1,
 					      CHAN_5490_5730_1,
 					      CHAN_5735_5835_1} },
 
-	[ETSI1] = {CTL_ETSI, DFS_ETSI_REG, 5, 3, {CHAN_5170_5250_2,
+	[ETSI1] = {CTL_ETSI, DFS_ETSI_REG, 5, 0, 3, {CHAN_5170_5250_2,
 						 CHAN_5250_5330_1,
 						 CHAN_5490_5710_1} },
 
-	[ETSI3] = {CTL_ETSI, DFS_ETSI_REG, 5, 2, {CHAN_5170_5250_3,
+	[ETSI3] = {CTL_ETSI, DFS_ETSI_REG, 5, 0, 2, {CHAN_5170_5250_3,
 						 CHAN_5250_5330_2} },
 
-	[ETSI4] = {CTL_ETSI, DFS_ETSI_REG, 5, 2, {CHAN_5170_5250_1,
+	[ETSI4] = {CTL_ETSI, DFS_ETSI_REG, 5, 0, 2, {CHAN_5170_5250_1,
 						 CHAN_5250_5330_3} },
 
-	[ETSI8] = {CTL_ETSI, DFS_ETSI_REG, 20, 4, {CHAN_5170_5250_3,
+	[ETSI8] = {CTL_ETSI, DFS_ETSI_REG, 20, 0, 4, {CHAN_5170_5250_3,
 						  CHAN_5250_5330_2,
 						  CHAN_5490_5730_2,
 						  CHAN_5735_5835_2} },
 
-	[ETSI9] = {CTL_ETSI, DFS_ETSI_REG, 20, 4, {CHAN_5170_5250_3,
+	[ETSI9] = {CTL_ETSI, DFS_ETSI_REG, 20, 0, 4, {CHAN_5170_5250_3,
 						  CHAN_5250_5330_2,
 						  CHAN_5490_5670_1,
 						  CHAN_5735_5835_3} },
 
-	[APL1] = {CTL_ETSI, DFS_UNINIT_REG, 2, 1, {CHAN_5735_5835_2} },
+	[APL1] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 1, {CHAN_5735_5835_2} },
 
-	[APL2] = {CTL_ETSI, DFS_UNINIT_REG, 2, 1, {CHAN_5735_5815_1} },
+	[APL2] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 1, {CHAN_5735_5815_1} },
 
-	[APL4] = {CTL_ETSI, DFS_UNINIT_REG, 2, 2, {CHAN_5170_5250_2,
+	[APL4] = {CTL_ETSI, DFS_UNINIT_REG, 2, 0, 2, {CHAN_5170_5250_2,
 						  CHAN_5735_5835_1} },
 
-	[APL6] = {CTL_ETSI, DFS_ETSI_REG, 2, 3, {CHAN_5170_5250_3,
+	[APL6] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 3, {CHAN_5170_5250_3,
 						CHAN_5250_5330_2,
 						CHAN_5735_5835_3} },
 
-	[APL8] = {CTL_ETSI, DFS_ETSI_REG, 2, 2, {CHAN_5250_5330_4,
+	[APL8] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 2, {CHAN_5250_5330_4,
 						CHAN_5735_5835_2} },
 
-	[APL9] = {CTL_ETSI, DFS_KR_REG, 2, 4, {CHAN_5170_5250_3,
+	[APL9] = {CTL_ETSI, DFS_KR_REG, 2, 6, 4, {CHAN_5170_5250_3,
 					      CHAN_5250_5330_2,
 					      CHAN_5490_5630_1,
 					      CHAN_5735_5815_1} },
 
-	[APL10] = {CTL_ETSI, DFS_ETSI_REG, 2, 4, {CHAN_5170_5250_3,
+	[APL10] = {CTL_ETSI, DFS_ETSI_REG, 2, 6, 4, {CHAN_5170_5250_3,
 						 CHAN_5250_5330_2,
 						 CHAN_5490_5710_1,
 						 CHAN_5735_5815_1} },
 
-	[APL12] = {CTL_ETSI, DFS_ETSI_REG, 2, 3, {CHAN_5170_5250_1,
+	[APL12] = {CTL_ETSI, DFS_ETSI_REG, 2, 0, 3, {CHAN_5170_5250_1,
 						 CHAN_5490_5570_1,
 						 CHAN_5735_5775_1} },
 
-	[APL14] = {CTL_FCC, DFS_CN_REG, 2, 3, {CHAN_5170_5250_2,
+	[APL14] = {CTL_FCC, DFS_CN_REG, 2, 0, 3, {CHAN_5170_5250_2,
 					      CHAN_5250_5330_1,
 					      CHAN_5735_5835_4} },
 
-	[MKK3] = {CTL_MKK, DFS_UNINIT_REG, 2, 1, {CHAN_5170_5250_3} },
+	[MKK3] = {CTL_MKK, DFS_UNINIT_REG, 2, 0, 1, {CHAN_5170_5250_3} },
 
-	[MKK4] = {CTL_MKK, DFS_MKK_REG, 2, 2, {CHAN_5170_5250_3,
+	[MKK4] = {CTL_MKK, DFS_MKK_REG, 2, 0, 2, {CHAN_5170_5250_3,
 					      CHAN_5250_5330_2} },
 
-	[MKK5] = {CTL_MKK, DFS_MKK_REG, 2, 3, {CHAN_5170_5250_3,
+	[MKK5] = {CTL_MKK, DFS_MKK_REG, 2, 0, 3, {CHAN_5170_5250_3,
 					      CHAN_5250_5330_2,
 					      CHAN_5490_5710_2} },
 
-	[MKK9] = {CTL_MKK, DFS_UNINIT_REG, 2, 3, {CHAN_5170_5250_3,
+	[MKK9] = {CTL_MKK, DFS_UNINIT_REG, 2, 0, 3, {CHAN_5170_5250_3,
 						 CHAN_4910_4990_1,
 						 CHAN_5030_5090_1} },
 
-	[MKK10] = {CTL_MKK, DFS_MKK_REG, 2, 4, {CHAN_5170_5250_3,
+	[MKK10] = {CTL_MKK, DFS_MKK_REG, 2, 0, 4, {CHAN_5170_5250_3,
 					       CHAN_5250_5330_2,
 					       CHAN_4910_4990_1,
 					       CHAN_5030_5090_1} },
 
-	[MKK11] = {CTL_MKK, DFS_MKK_REG, 2, 5, {CHAN_5170_5250_3,
+	[MKK11] = {CTL_MKK, DFS_MKK_REG, 2, 0, 5, {CHAN_5170_5250_3,
 					       CHAN_5250_5330_2,
 					       CHAN_5490_5710_2,
 					       CHAN_4910_4990_1,
 					       CHAN_5030_5090_1} },
 
-	[WORLD_5G_1] = {CTL_FCC, DFS_UNINIT_REG, 2, 2, {CHAN_5170_5330_1,
+	[WORLD_5G_1] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 2, {CHAN_5170_5330_1,
 						       CHAN_5735_5835_5} },
 
-	[WORLD_5G_2] = {CTL_FCC, DFS_UNINIT_REG, 2, 3, {CHAN_5170_5330_1,
+	[WORLD_5G_2] = {CTL_FCC, DFS_UNINIT_REG, 2, 0, 3, {CHAN_5170_5330_1,
 						       CHAN_5490_5730_3,
 						       CHAN_5735_5835_5} },
 };
+
+QDF_STATUS reg_get_num_countries(int *num_countries)
+{
+	*num_countries = QDF_ARRAY_SIZE(g_all_countries);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_get_num_reg_dmn_pairs(int *num_reg_dmn)
+{
+	*num_reg_dmn = QDF_ARRAY_SIZE(g_reg_dmn_pairs);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_get_default_country(uint16_t *default_country)
+{
+	*default_country = CTRY_UNITED_STATES;
+
+	return QDF_STATUS_SUCCESS;
+}

+ 7 - 0
umac/regulatory/core/src/reg_db.h

@@ -95,6 +95,7 @@ struct regdomain   {
 	uint8_t ctl_val;
 	enum dfs_reg dfs_region;
 	uint16_t min_bw;
+	uint8_t ant_gain;
 	uint8_t num_reg_rules;
 	uint8_t reg_rule_id[MAX_REG_RULES];
 };
@@ -145,4 +146,10 @@ enum ctl_value {
 	CTL_NONE = 0xff
 };
 
+QDF_STATUS reg_get_num_countries(int *num_countries);
+
+QDF_STATUS reg_get_num_reg_dmn_pairs(int *num_reg_dmn);
+
+QDF_STATUS reg_get_default_country(uint16_t *default_country);
+
 #endif

+ 339 - 0
umac/regulatory/core/src/reg_db_parser.c

@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: reg_db_parser.c
+ * This file provides regulatory data base parser functions.
+ */
+
+#include <qdf_types.h>
+#include "reg_db_parser.h"
+#include <qdf_mem.h>
+#include "reg_priv.h"
+
+QDF_STATUS reg_is_country_code_valid(uint8_t alpha[3])
+{
+	uint16_t i;
+	int num_countries;
+
+	reg_get_num_countries(&num_countries);
+
+	for (i = 0; i < num_countries; i++) {
+		if ((g_all_countries[i].alpha2[0] == alpha[0]) &&
+				(g_all_countries[i].alpha2[1] == alpha[1]) &&
+				(g_all_countries[i].alpha2[2] == alpha[2]))
+			return QDF_STATUS_SUCCESS;
+		else
+			continue;
+	}
+
+	return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS reg_regrules_assign(uint8_t dmn_id_2g,
+	uint8_t dmn_id_5g,
+	uint8_t ant_gain_2g,
+	uint8_t ant_gain_5g,
+	struct cur_regulatory_info *reg_info)
+
+{
+	uint8_t k;
+	uint8_t rule_index;
+	struct cur_reg_rule *r_r_2g = reg_info->reg_rules_2g_ptr;
+	struct cur_reg_rule *r_r_5g = reg_info->reg_rules_5g_ptr;
+
+	for (k = 0; k < reg_info->num_2g_reg_rules; k++) {
+		rule_index = regdomains_2g[dmn_id_2g].reg_rule_id[k];
+		r_r_2g->start_freq = reg_rules_2g[rule_index].start_freq;
+		r_r_2g->end_freq = reg_rules_2g[rule_index].end_freq;
+		r_r_2g->max_bw = reg_rules_2g[rule_index].max_bw;
+		r_r_2g->reg_power = reg_rules_2g[rule_index].reg_power;
+		r_r_2g->flags = reg_rules_2g[rule_index].flags;
+		r_r_2g->ant_gain = ant_gain_2g;
+		r_r_2g++;
+	}
+
+	for (k = 0; k < reg_info->num_5g_reg_rules; k++) {
+		rule_index = regdomains_5g[dmn_id_5g].reg_rule_id[k];
+		r_r_5g->start_freq = reg_rules_5g[rule_index].start_freq;
+		r_r_5g->end_freq = reg_rules_5g[rule_index].end_freq;
+		r_r_5g->max_bw = reg_rules_5g[rule_index].max_bw;
+		r_r_5g->reg_power = reg_rules_5g[rule_index].reg_power;
+		r_r_5g->flags = reg_rules_5g[rule_index].flags;
+		r_r_2g->ant_gain = ant_gain_5g;
+		r_r_5g++;
+	}
+
+	if ((r_r_2g == reg_info->reg_rules_2g_ptr) &&
+			(r_r_5g == reg_info->reg_rules_5g_ptr))
+		return QDF_STATUS_E_FAILURE;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_get_rdpair_from_country_iso(uint8_t *alpha,
+	uint16_t *country_index,
+	uint16_t *regdmn_pair)
+{
+	uint16_t i, j;
+	int num_countries;
+	int num_reg_dmn;
+
+	reg_get_num_countries(&num_countries);
+	reg_get_num_reg_dmn_pairs(&num_reg_dmn);
+
+	for (i = 0; i < num_countries; i++) {
+		if ((g_all_countries[i].alpha2[0] == alpha[0]) &&
+				(g_all_countries[i].alpha2[1] == alpha[1]) &&
+				(g_all_countries[i].alpha2[2] == alpha[2]))
+			break;
+	}
+
+	if (i == num_countries) {
+		*country_index = -1;
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	for (j = 0; j < num_reg_dmn; j++) {
+		if (g_reg_dmn_pairs[j].reg_dmn_pair_id ==
+				g_all_countries[i].reg_dmn_pair_id)
+			break;
+	}
+
+	if (j == num_reg_dmn) {
+		*regdmn_pair = -1;
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	*country_index = i;
+	*regdmn_pair = j;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_get_rdpair_from_regdmn_id(uint16_t regdmn_id,
+		uint16_t *regdmn_pair)
+{
+	uint16_t j;
+	int num_reg_dmn;
+
+	reg_get_num_reg_dmn_pairs(&num_reg_dmn);
+
+	for (j = 0; j < num_reg_dmn; j++) {
+		if (g_reg_dmn_pairs[j].reg_dmn_pair_id == regdmn_id)
+			break;
+	}
+
+	if (j == num_reg_dmn) {
+		*regdmn_pair = -1;
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	*regdmn_pair = j;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_get_rdpair_from_country_code(uint16_t cc,
+		uint16_t *country_index,
+		uint16_t *regdmn_pair)
+{
+	uint16_t i, j;
+	int num_countries;
+	int num_reg_dmn;
+
+	reg_get_num_countries(&num_countries);
+	reg_get_num_reg_dmn_pairs(&num_reg_dmn);
+
+	for (i = 0; i < num_countries; i++) {
+		if (g_all_countries[i].country_code == cc)
+			break;
+	}
+
+	if (i == num_countries) {
+		*country_index = -1;
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	for (j = 0; j < num_reg_dmn; j++) {
+		if (g_reg_dmn_pairs[j].reg_dmn_pair_id ==
+				g_all_countries[i].reg_dmn_pair_id)
+			break;
+	}
+
+	if (j == num_reg_dmn) {
+		*regdmn_pair = -1;
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	*country_index = i;
+	*regdmn_pair = j;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline QDF_STATUS reg_get_reginfo_form_country_code_and_regdmn_pair(
+		struct cur_regulatory_info *reg_info,
+		uint16_t country_index,
+		uint16_t regdmn_pair)
+{
+	uint8_t rule_size_2g, rule_size_5g;
+	uint8_t dmn_id_5g, dmn_id_2g;
+	uint8_t ant_gain_2g, ant_gain_5g;
+	QDF_STATUS err;
+
+	dmn_id_5g = g_reg_dmn_pairs[regdmn_pair].dmn_id_5g;
+	dmn_id_2g = g_reg_dmn_pairs[regdmn_pair].dmn_id_2g;
+
+	rule_size_2g = QDF_ARRAY_SIZE(regdomains_2g[dmn_id_2g].reg_rule_id);
+	rule_size_5g = QDF_ARRAY_SIZE(regdomains_5g[dmn_id_5g].reg_rule_id);
+
+	if (((rule_size_2g + rule_size_5g) >=
+				regdomains_2g[dmn_id_2g].num_reg_rules +
+				regdomains_5g[dmn_id_5g].num_reg_rules)) {
+
+		qdf_mem_copy(reg_info->alpha2,
+				g_all_countries[country_index].alpha2,
+				sizeof(g_all_countries[country_index].alpha2));
+
+		reg_info->dfs_region = regdomains_5g[dmn_id_5g].dfs_region;
+		reg_info->phybitmap =
+			g_all_countries[country_index].phymode_bitmap;
+
+		reg_info->max_bw_2g = g_all_countries[country_index].max_bw_2g;
+		reg_info->max_bw_5g = g_all_countries[country_index].max_bw_5g;
+
+		reg_info->min_bw_2g = regdomains_2g[dmn_id_2g].min_bw;
+		reg_info->min_bw_5g = regdomains_5g[dmn_id_5g].min_bw;
+
+		ant_gain_2g = regdomains_2g[dmn_id_2g].ant_gain;
+		ant_gain_5g = regdomains_5g[dmn_id_5g].ant_gain;
+
+		reg_info->num_2g_reg_rules =
+			regdomains_2g[dmn_id_2g].num_reg_rules;
+		reg_info->num_5g_reg_rules =
+			regdomains_5g[dmn_id_5g].num_reg_rules;
+
+		reg_info->reg_rules_2g_ptr = (struct cur_reg_rule *)
+			qdf_mem_malloc((reg_info->num_2g_reg_rules) *
+					sizeof(struct cur_reg_rule));
+		reg_info->reg_rules_5g_ptr = (struct cur_reg_rule *)
+			qdf_mem_malloc((reg_info->num_5g_reg_rules) *
+					sizeof(struct cur_reg_rule));
+
+		err = reg_regrules_assign(dmn_id_2g, dmn_id_5g,
+				ant_gain_2g, ant_gain_5g, reg_info);
+
+		if (err == QDF_STATUS_E_FAILURE) {
+			reg_err("%s : No rule found for country index = %d regdmn_pair = %d\n",
+					__func__, country_index, regdmn_pair);
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		return QDF_STATUS_SUCCESS;
+	} else if (!(((rule_size_2g + rule_size_5g) >=
+				regdomains_2g[dmn_id_2g].num_reg_rules +
+				regdomains_5g[dmn_id_5g].num_reg_rules)))
+	    return QDF_STATUS_E_NOMEM;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline QDF_STATUS reg_get_reginfo_form_regdmn_pair(
+		struct cur_regulatory_info *reg_info,
+		uint16_t regdmn_pair)
+{
+	uint8_t rule_size_2g, rule_size_5g;
+	uint8_t dmn_id_5g, dmn_id_2g;
+	uint8_t ant_gain_2g, ant_gain_5g;
+	QDF_STATUS err;
+
+	dmn_id_5g = g_reg_dmn_pairs[regdmn_pair].dmn_id_5g;
+	dmn_id_2g = g_reg_dmn_pairs[regdmn_pair].dmn_id_2g;
+
+	rule_size_2g = QDF_ARRAY_SIZE(regdomains_2g[dmn_id_2g].reg_rule_id);
+	rule_size_5g = QDF_ARRAY_SIZE(regdomains_5g[dmn_id_5g].reg_rule_id);
+
+	if (((rule_size_2g + rule_size_5g) >=
+		    regdomains_2g[dmn_id_2g].num_reg_rules +
+		    regdomains_5g[dmn_id_5g].num_reg_rules)) {
+
+		qdf_mem_zero(reg_info->alpha2, sizeof(reg_info->alpha2));
+
+		reg_info->dfs_region = regdomains_5g[dmn_id_5g].dfs_region;
+		reg_info->phybitmap = 0;
+
+		reg_info->max_bw_2g = 40;
+		reg_info->max_bw_5g = 160;
+
+		reg_info->min_bw_2g = regdomains_2g[dmn_id_2g].min_bw;
+		reg_info->min_bw_5g = regdomains_5g[dmn_id_5g].min_bw;
+
+		ant_gain_2g = regdomains_2g[dmn_id_2g].ant_gain;
+		ant_gain_5g = regdomains_5g[dmn_id_5g].ant_gain;
+
+		reg_info->num_2g_reg_rules =
+			regdomains_2g[dmn_id_2g].num_reg_rules;
+		reg_info->num_5g_reg_rules =
+			regdomains_5g[dmn_id_5g].num_reg_rules;
+
+		reg_info->reg_rules_2g_ptr = (struct cur_reg_rule *)
+			qdf_mem_malloc((reg_info->num_2g_reg_rules) *
+					sizeof(struct cur_reg_rule));
+		reg_info->reg_rules_5g_ptr = (struct cur_reg_rule *)
+			qdf_mem_malloc((reg_info->num_5g_reg_rules) *
+					sizeof(struct cur_reg_rule));
+
+		err = reg_regrules_assign(dmn_id_2g, dmn_id_5g,
+			ant_gain_2g, ant_gain_5g, reg_info);
+		if (err == QDF_STATUS_E_FAILURE) {
+			reg_err("%s : No rule found for regdmn_pair = %d\n",
+					__func__, regdmn_pair);
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		return QDF_STATUS_SUCCESS;
+	} else if (!(((rule_size_2g + rule_size_5g) >=
+			regdomains_2g[dmn_id_2g].num_reg_rules +
+			regdomains_5g[dmn_id_5g].num_reg_rules)))
+		return QDF_STATUS_E_NOMEM;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/* Given a country code the function finds current  regulatory information */
+QDF_STATUS reg_get_cur_reginfo(struct cur_regulatory_info *reg_info,
+		uint16_t country_index,
+		uint16_t regdmn_pair)
+{
+	if ((country_index != (uint16_t)(-1)) &&
+			(regdmn_pair != (uint16_t)(-1)))
+		return reg_get_reginfo_form_country_code_and_regdmn_pair(
+				reg_info,
+				country_index,
+				regdmn_pair);
+	else if (regdmn_pair != (uint16_t)(-1))
+		return reg_get_reginfo_form_regdmn_pair(
+				reg_info,
+				regdmn_pair);
+	else
+		return QDF_STATUS_E_FAILURE;
+
+	return QDF_STATUS_SUCCESS;
+}

+ 55 - 0
umac/regulatory/core/src/reg_db_parser.h

@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/**
+ * DOC: reg_db.h
+ * This file contains regulatory data base parser function declarations
+ */
+
+#include <reg_services_public_struct.h>
+
+extern const struct country_code_to_reg_domain g_all_countries[];
+extern const struct reg_domain_pair g_reg_dmn_pairs[];
+extern const struct regulatory_rule reg_rules_2g[];
+extern const struct regdomain regdomains_2g[];
+extern const struct regulatory_rule reg_rules_5g[];
+extern const struct regdomain regdomains_5g[];
+
+QDF_STATUS reg_is_country_code_valid(uint8_t alpha[3]);
+
+QDF_STATUS reg_regrules_assign(uint8_t dmn_id_2g,
+		uint8_t dmn_id_5g,
+		uint8_t ant_gain_2g,
+		uint8_t ant_gain_5g,
+		struct cur_regulatory_info *reg_info);
+
+QDF_STATUS reg_get_cur_reginfo(struct cur_regulatory_info *reg_info,
+		uint16_t country_index,
+		uint16_t regdmn_pair);
+
+QDF_STATUS  reg_get_rdpair_from_country_iso(uint8_t *alpha,
+		uint16_t *country_index,
+		uint16_t *regdmn_pair);
+
+QDF_STATUS reg_get_rdpair_from_country_code(uint16_t cc,
+		uint16_t *country_index,
+		uint16_t *regdmn_pair);
+
+QDF_STATUS reg_get_rdpair_from_regdmn_id(uint16_t regdmn_id,
+		uint16_t *regdmn_pair);

+ 3 - 0
umac/regulatory/core/src/reg_priv.h

@@ -48,6 +48,9 @@
 struct wlan_regulatory_psoc_priv_obj {
 	struct regulatory_channel mas_chan_list[NUM_CHANNELS];
 	bool offload_enabled;
+	uint8_t num_phy;
+	uint16_t reg_dmn_pair;
+	uint16_t ctry_code;
 	bool nol_chan[NUM_CHANNELS];
 	char default_country[REG_ALPHA2_LEN + 1];
 	char current_country[REG_ALPHA2_LEN + 1];

+ 480 - 278
umac/regulatory/core/src/reg_services.c

@@ -32,6 +32,7 @@
 
 #include "reg_services.h"
 #include "reg_priv.h"
+#include "reg_db_parser.h"
 
 #define MAX_PWR_FCC_CHAN_12 8
 #define MAX_PWR_FCC_CHAN_13 2
@@ -1042,7 +1043,7 @@ static void reg_fill_channel_info(enum channel_enum chan_enum,
 			REGULATORY_CHAN_NO_IR;
 
 		master_list[chan_enum].state =
-			CHANNEL_STATE_PASSIVE;
+			CHANNEL_STATE_DFS;
 	}
 
 	if (reg_rule->flags & REGULATORY_CHAN_RADAR) {
@@ -1143,6 +1144,234 @@ static void do_auto_bw_correction(uint32_t num_reg_rules,
 }
 
 
+static void
+modify_chan_list_for_dfs_channels(struct regulatory_channel *chan_list,
+				  bool dfs_enabled)
+{
+	enum channel_enum chan_enum;
+
+	if (dfs_enabled)
+		return;
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
+		if (CHANNEL_STATE_DFS ==
+		    chan_list[chan_enum].state) {
+			chan_list[chan_enum].state =
+				CHANNEL_STATE_DISABLE;
+			chan_list[chan_enum].chan_flags |=
+				REGULATORY_CHAN_DISABLED;
+		}
+	}
+}
+
+static void
+modify_chan_list_for_indoor_channels(struct regulatory_channel
+				     *chan_list,
+				     bool indoor_chan_enabled)
+{
+	enum channel_enum chan_enum;
+
+	if (indoor_chan_enabled)
+		return;
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
+		if (REGULATORY_CHAN_INDOOR_ONLY &
+		    chan_list[chan_enum].chan_flags) {
+			chan_list[chan_enum].state =
+				CHANNEL_STATE_PASSIVE;
+			chan_list[chan_enum].chan_flags |=
+				REGULATORY_CHAN_NO_IR;
+		}
+	}
+}
+
+static void
+modify_chan_list_for_band(struct regulatory_channel
+			  *chan_list,
+			  enum band_info band_val)
+{
+	enum channel_enum chan_enum;
+
+	if (BAND_2G == band_val) {
+		for (chan_enum = MIN_5GHZ_CHANNEL;
+		     chan_enum <= MAX_5GHZ_CHANNEL;
+		     chan_enum++) {
+			chan_list[chan_enum].chan_flags |=
+				REGULATORY_CHAN_DISABLED;
+			chan_list[chan_enum].state =
+				CHANNEL_STATE_DISABLE;
+		}
+	}
+
+	if (BAND_5G == band_val) {
+		for (chan_enum = MIN_24GHZ_CHANNEL;
+		     chan_enum <= MAX_24GHZ_CHANNEL;
+		     chan_enum++) {
+			chan_list[chan_enum].chan_flags |=
+				REGULATORY_CHAN_DISABLED;
+			chan_list[chan_enum].state =
+				CHANNEL_STATE_DISABLE;
+		}
+	}
+}
+
+static void
+modify_chan_list_for_fcc_channel(struct regulatory_channel
+				 *chan_list,
+				 bool set_fcc_channel)
+{
+	if (set_fcc_channel) {
+		chan_list[CHAN_ENUM_12].tx_power = MAX_PWR_FCC_CHAN_12;
+		chan_list[CHAN_ENUM_13].tx_power = MAX_PWR_FCC_CHAN_13;
+	}
+}
+
+static void
+modify_chan_list_for_nol_list(struct regulatory_channel
+			      *chan_list)
+{
+	enum channel_enum chan_enum;
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS;
+	     chan_enum++) {
+		if (chan_list[chan_enum].nol_chan) {
+			chan_list[chan_enum].state =
+				CHANNEL_STATE_DISABLE;
+			chan_list[chan_enum].chan_flags |=
+				REGULATORY_CHAN_DISABLED;
+		}
+	}
+}
+
+static void
+modify_chan_list_for_freq_range(struct regulatory_channel
+				*chan_list,
+				uint32_t low_freq_2g,
+				uint32_t high_freq_2g,
+				uint32_t low_freq_5g,
+				uint32_t high_freq_5g)
+{
+	uint32_t low_limit_2g = NUM_CHANNELS;
+	uint32_t high_limit_2g = NUM_CHANNELS;
+	uint32_t low_limit_5g = NUM_CHANNELS;
+	uint32_t high_limit_5g = NUM_CHANNELS;
+	enum channel_enum chan_enum;
+	bool chan_in_range;
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
+		if ((chan_list[chan_enum].center_freq - 10) >= low_freq_2g) {
+			low_limit_2g = chan_enum;
+			break;
+		}
+	}
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
+		if ((chan_list[chan_enum].center_freq - 10) >= low_freq_5g) {
+			low_limit_5g = chan_enum;
+			break;
+		}
+	}
+
+	for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) {
+		if (chan_list[chan_enum].center_freq + 10 <= high_freq_2g) {
+			high_limit_2g = chan_enum;
+			break;
+		}
+		if (chan_enum == 0)
+			break;
+	}
+
+	for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) {
+		if (chan_list[chan_enum].center_freq + 10 <= high_freq_5g) {
+			high_limit_5g = chan_enum;
+			break;
+		}
+		if (chan_enum == 0)
+			break;
+	}
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
+		chan_in_range = false;
+		if  ((low_limit_2g <= chan_enum) &&
+		     (high_limit_2g >= chan_enum) &&
+		     (low_limit_2g != NUM_CHANNELS) &&
+		     (high_limit_2g != NUM_CHANNELS))
+			chan_in_range = true;
+		if  ((low_limit_5g <= chan_enum) &&
+		     (high_limit_5g >= chan_enum) &&
+		     (low_limit_5g != NUM_CHANNELS) &&
+		     (high_limit_5g != NUM_CHANNELS))
+			chan_in_range = true;
+		if (!chan_in_range) {
+			chan_list[chan_enum].chan_flags |=
+				REGULATORY_CHAN_DISABLED;
+			chan_list[chan_enum].state =
+				CHANNEL_STATE_DISABLE;
+		}
+	}
+}
+
+static void reg_change_pdev_for_new_mas_chan_list(struct wlan_objmgr_psoc *psoc,
+						  void *object, void *arg)
+{
+	struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object;
+	struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj;
+	struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj;
+	enum channel_enum chan_enum;
+
+	wlan_psoc_obj_lock(psoc);
+	psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *)
+		wlan_objmgr_psoc_get_comp_private_obj(psoc,
+				       WLAN_UMAC_COMP_REGULATORY);
+	wlan_psoc_obj_unlock(psoc);
+
+	if (NULL == psoc_priv_obj) {
+		reg_err("psoc priv obj is NULL");
+		return;
+	}
+
+	wlan_pdev_obj_lock(pdev);
+	pdev_priv_obj = (struct wlan_regulatory_pdev_priv_obj *)
+		wlan_objmgr_pdev_get_comp_private_obj(pdev,
+					       WLAN_UMAC_COMP_REGULATORY);
+	wlan_pdev_obj_unlock(pdev);
+
+	if (NULL == pdev_priv_obj) {
+		reg_err("pdev priv obj is NULL");
+		return;
+	}
+
+	qdf_mem_copy(pdev_priv_obj->cur_chan_list,
+		     psoc_priv_obj->mas_chan_list,
+		     NUM_CHANNELS * sizeof(struct regulatory_channel));
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++)
+		pdev_priv_obj->cur_chan_list[chan_enum].nol_chan =
+			psoc_priv_obj->nol_chan[chan_enum];
+
+	modify_chan_list_for_freq_range(pdev_priv_obj->cur_chan_list,
+					pdev_priv_obj->range_2g_low,
+					pdev_priv_obj->range_2g_high,
+					pdev_priv_obj->range_5g_low,
+					pdev_priv_obj->range_5g_high);
+
+	modify_chan_list_for_dfs_channels(pdev_priv_obj->cur_chan_list,
+					  pdev_priv_obj->dfs_enabled);
+
+	modify_chan_list_for_indoor_channels(pdev_priv_obj->cur_chan_list,
+					 pdev_priv_obj->indoor_chan_enabled);
+
+	modify_chan_list_for_band(pdev_priv_obj->cur_chan_list,
+				  pdev_priv_obj->band_capability);
+
+	modify_chan_list_for_nol_list(pdev_priv_obj->cur_chan_list);
+
+	modify_chan_list_for_fcc_channel(pdev_priv_obj->cur_chan_list,
+					 pdev_priv_obj->set_fcc_channel);
+
+}
+
+
 QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info
 					*regulat_info)
 {
@@ -1151,6 +1380,8 @@ QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info
 	struct cur_reg_rule *reg_rule_2g, *reg_rule_5g;
 	uint16_t min_bw_2g, max_bw_2g, min_bw_5g, max_bw_5g;
 	struct regulatory_channel *mas_chan_list;
+	QDF_STATUS status;
+	enum channel_enum chan_enum;
 
 	reg_debug("process reg master chan list");
 
@@ -1165,8 +1396,23 @@ QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	soc_reg->offload_enabled = true;
+	mas_chan_list = soc_reg->mas_chan_list;
+
+	for (chan_enum = 0; chan_enum < NUM_CHANNELS;
+	     chan_enum++) {
+		mas_chan_list[chan_enum].chan_num =
+			channel_map[chan_enum].chan_num;
+		mas_chan_list[chan_enum].center_freq =
+			channel_map[chan_enum].center_freq;
+		mas_chan_list[chan_enum].chan_flags |=
+			REGULATORY_CHAN_DISABLED;
+		mas_chan_list[chan_enum].state =
+			CHANNEL_STATE_DISABLE;
+		mas_chan_list[chan_enum].nol_chan = false;
+		soc_reg->nol_chan[chan_enum] = false;
+	}
 
+	soc_reg->offload_enabled = regulat_info->offload_enabled;
 	soc_reg->phybitmap = regulat_info->phybitmap;
 	soc_reg->dfs_region = regulat_info->dfs_region;
 	qdf_mem_copy(soc_reg->default_country, regulat_info->alpha2,
@@ -1174,8 +1420,6 @@ QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info
 	qdf_mem_copy(soc_reg->current_country, regulat_info->alpha2,
 		     REG_ALPHA2_LEN);
 
-	mas_chan_list = soc_reg->mas_chan_list;
-
 	min_bw_2g = regulat_info->min_bw_2g;
 	max_bw_2g = regulat_info->max_bw_2g;
 	reg_rule_2g = regulat_info->reg_rules_2g_ptr;
@@ -1190,18 +1434,26 @@ QDF_STATUS reg_process_master_chan_list(struct cur_regulatory_info
 	update_max_bw_per_rule(num_5g_reg_rules,
 			       reg_rule_5g, max_bw_5g);
 
-	do_auto_bw_correction(num_5g_reg_rules, reg_rule_5g, max_bw_5g);
+	if (num_5g_reg_rules != 0)
+		do_auto_bw_correction(num_5g_reg_rules, reg_rule_5g,
+				      max_bw_5g);
 
-	populate_band_channels(CHAN_ENUM_1, CHAN_ENUM_14,
-			       reg_rule_2g, num_2g_reg_rules,
-			       min_bw_2g, mas_chan_list);
+	if (num_2g_reg_rules != 0)
+		populate_band_channels(CHAN_ENUM_1, CHAN_ENUM_14,
+				       reg_rule_2g, num_2g_reg_rules,
+				       min_bw_2g, mas_chan_list);
 
-	populate_band_channels(CHAN_ENUM_36, CHAN_ENUM_165,
-			       reg_rule_5g,
-			       num_5g_reg_rules,
-			       min_bw_5g, mas_chan_list);
+	if (num_5g_reg_rules != 0)
+		populate_band_channels(CHAN_ENUM_36, CHAN_ENUM_165,
+				       reg_rule_5g,
+				       num_5g_reg_rules,
+				       min_bw_5g, mas_chan_list);
 
-	return QDF_STATUS_SUCCESS;
+	status = wlan_objmgr_iterate_obj_list(regulat_info->psoc, WLAN_PDEV_OP,
+					      reg_change_pdev_for_new_mas_chan_list,
+					      NULL, 1, WLAN_REGULATORY_SB_ID);
+
+	return status;
 }
 
 /**
@@ -1253,221 +1505,54 @@ QDF_STATUS wlan_regulatory_psoc_obj_created_notification(
 		soc_reg_obj->nol_chan[chan_enum] = false;
 	}
 
-	status = wlan_objmgr_psoc_component_obj_attach(psoc,
-			WLAN_UMAC_COMP_REGULATORY, soc_reg_obj,
-			QDF_STATUS_SUCCESS);
-
-	reg_debug("reg psoc obj created with status %d", status);
-
-	return status;
-}
-
-/**
- * wlan_regulatory_psoc_obj_destroyed_notification() - PSOC obj delete callback
- * @psoc: PSOC object
- * @arg_list: Variable argument list
- *
- * This callback is registered with object manager during initialization to
- * get notified when the object is deleted.
- *
- * Return: Success or Failure
- */
-QDF_STATUS wlan_regulatory_psoc_obj_destroyed_notification(
-		struct wlan_objmgr_psoc *psoc, void *arg_list)
-{
-	QDF_STATUS status;
-	struct wlan_regulatory_psoc_priv_obj *soc_reg;
-
-	wlan_psoc_obj_lock(psoc);
-	soc_reg = wlan_objmgr_psoc_get_comp_private_obj(psoc,
-					    WLAN_UMAC_COMP_REGULATORY);
-	wlan_psoc_obj_unlock(psoc);
-
-	if (NULL == soc_reg) {
-		reg_err("reg psoc private obj is NULL");
-		return QDF_STATUS_E_FAULT;
-	}
-
-	soc_reg->psoc_ptr = NULL;
-
-	status = wlan_objmgr_psoc_component_obj_detach(psoc,
-			WLAN_UMAC_COMP_REGULATORY,
-			soc_reg);
-	if (status != QDF_STATUS_SUCCESS)
-		reg_err("soc_reg private obj detach failed");
-
-	reg_debug("reg psoc obj detached with status %d", status);
-
-	qdf_mem_free(soc_reg);
-
-	return status;
-}
-
-static void
-modify_chan_list_for_dfs_channels(struct regulatory_channel *chan_list,
-				  bool dfs_enabled)
-{
-	enum channel_enum chan_enum;
-
-	if (dfs_enabled)
-		return;
-
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
-		if (CHANNEL_STATE_DFS ==
-		    chan_list[chan_enum].state) {
-			chan_list[chan_enum].state =
-				CHANNEL_STATE_DISABLE;
-			chan_list[chan_enum].chan_flags |=
-				REGULATORY_CHAN_DISABLED;
-		}
-	}
-}
-
-static void
-modify_chan_list_for_indoor_channels(struct regulatory_channel
-				     *chan_list,
-				     bool indoor_chan_enabled)
-{
-	enum channel_enum chan_enum;
-
-	if (indoor_chan_enabled)
-		return;
-
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
-		if (REGULATORY_CHAN_INDOOR_ONLY &
-		    chan_list[chan_enum].chan_flags) {
-			chan_list[chan_enum].state =
-				CHANNEL_STATE_PASSIVE;
-			chan_list[chan_enum].chan_flags |=
-				REGULATORY_CHAN_NO_IR;
-		}
-	}
-}
-
-static void
-modify_chan_list_for_band(struct regulatory_channel
-			  *chan_list,
-			  enum band_info band_val)
-{
-	enum channel_enum chan_enum;
-
-	if (BAND_2G == band_val) {
-		for (chan_enum = MIN_5GHZ_CHANNEL;
-		     chan_enum <= MAX_5GHZ_CHANNEL;
-		     chan_enum++) {
-			chan_list[chan_enum].chan_flags |=
-				REGULATORY_CHAN_DISABLED;
-			chan_list[chan_enum].state =
-				CHANNEL_STATE_DISABLE;
-		}
-	}
-
-	if (BAND_5G == band_val) {
-		for (chan_enum = MIN_24GHZ_CHANNEL;
-		     chan_enum <= MAX_24GHZ_CHANNEL;
-		     chan_enum++) {
-			chan_list[chan_enum].chan_flags |=
-				REGULATORY_CHAN_DISABLED;
-			chan_list[chan_enum].state =
-				CHANNEL_STATE_DISABLE;
-		}
-	}
-}
-
-static void
-modify_chan_list_for_fcc_channel(struct regulatory_channel
-				 *chan_list,
-				 bool set_fcc_channel)
-{
-	if (set_fcc_channel) {
-		chan_list[CHAN_ENUM_12].tx_power = MAX_PWR_FCC_CHAN_12;
-		chan_list[CHAN_ENUM_13].tx_power = MAX_PWR_FCC_CHAN_13;
-	}
-}
-
-static void
-modify_chan_list_for_nol_list(struct regulatory_channel
-			      *chan_list)
-{
-	enum channel_enum chan_enum;
+	status = wlan_objmgr_psoc_component_obj_attach(psoc,
+			WLAN_UMAC_COMP_REGULATORY, soc_reg_obj,
+			QDF_STATUS_SUCCESS);
 
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS;
-	     chan_enum++) {
-		if (chan_list[chan_enum].nol_chan) {
-			chan_list[chan_enum].state =
-				CHANNEL_STATE_DISABLE;
-			chan_list[chan_enum].chan_flags |=
-				REGULATORY_CHAN_DISABLED;
-		}
-	}
+	reg_debug("reg psoc obj created with status %d", status);
+
+	return status;
 }
 
-static void
-modify_chan_list_for_freq_range(struct regulatory_channel
-				*chan_list,
-				uint32_t low_freq_2g,
-				uint32_t high_freq_2g,
-				uint32_t low_freq_5g,
-				uint32_t high_freq_5g)
+/**
+ * wlan_regulatory_psoc_obj_destroyed_notification() - PSOC obj delete callback
+ * @psoc: PSOC object
+ * @arg_list: Variable argument list
+ *
+ * This callback is registered with object manager during initialization to
+ * get notified when the object is deleted.
+ *
+ * Return: Success or Failure
+ */
+QDF_STATUS wlan_regulatory_psoc_obj_destroyed_notification(
+		struct wlan_objmgr_psoc *psoc, void *arg_list)
 {
-	uint32_t low_limit_2g = NUM_CHANNELS;
-	uint32_t high_limit_2g = NUM_CHANNELS;
-	uint32_t low_limit_5g = NUM_CHANNELS;
-	uint32_t high_limit_5g = NUM_CHANNELS;
-	enum channel_enum chan_enum;
-	bool chan_in_range;
+	QDF_STATUS status;
+	struct wlan_regulatory_psoc_priv_obj *soc_reg;
 
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
-		if ((chan_list[chan_enum].center_freq - 10) >= low_freq_2g) {
-			low_limit_2g = chan_enum;
-			break;
-		}
-	}
+	wlan_psoc_obj_lock(psoc);
+	soc_reg = wlan_objmgr_psoc_get_comp_private_obj(psoc,
+					    WLAN_UMAC_COMP_REGULATORY);
+	wlan_psoc_obj_unlock(psoc);
 
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
-		if ((chan_list[chan_enum].center_freq - 10) >= low_freq_5g) {
-			low_limit_5g = chan_enum;
-			break;
-		}
+	if (NULL == soc_reg) {
+		reg_err("reg psoc private obj is NULL");
+		return QDF_STATUS_E_FAULT;
 	}
 
-	for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) {
-		if (chan_list[chan_enum].center_freq + 10 <= high_freq_2g) {
-			high_limit_2g = chan_enum;
-			break;
-		}
-		if (chan_enum == 0)
-			break;
-	}
+	soc_reg->psoc_ptr = NULL;
 
-	for (chan_enum = NUM_CHANNELS - 1; chan_enum >= 0; chan_enum--) {
-		if (chan_list[chan_enum].center_freq + 10 <= high_freq_5g) {
-			high_limit_5g = chan_enum;
-			break;
-		}
-		if (chan_enum == 0)
-			break;
-	}
+	status = wlan_objmgr_psoc_component_obj_detach(psoc,
+			WLAN_UMAC_COMP_REGULATORY,
+			soc_reg);
+	if (status != QDF_STATUS_SUCCESS)
+		reg_err("soc_reg private obj detach failed");
 
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++) {
-		chan_in_range = false;
-		if  ((low_limit_2g <= chan_enum) &&
-		     (high_limit_2g >= chan_enum) &&
-		     (low_limit_2g != NUM_CHANNELS) &&
-		     (high_limit_2g != NUM_CHANNELS))
-			chan_in_range = true;
-		if  ((low_limit_5g <= chan_enum) &&
-		     (high_limit_5g >= chan_enum) &&
-		     (low_limit_5g != NUM_CHANNELS) &&
-		     (high_limit_5g != NUM_CHANNELS))
-			chan_in_range = true;
-		if (!chan_in_range) {
-			chan_list[chan_enum].chan_flags |=
-				REGULATORY_CHAN_DISABLED;
-			chan_list[chan_enum].state =
-				CHANNEL_STATE_DISABLE;
-		}
-	}
+	reg_debug("reg psoc obj detached with status %d", status);
+
+	qdf_mem_free(soc_reg);
+
+	return status;
 }
 
 /**
@@ -1891,66 +1976,6 @@ bool reg_is_regdb_offloaded(struct wlan_objmgr_psoc *psoc)
 }
 
 
-static void reg_change_pdev_for_new_mas_chan_list(struct wlan_objmgr_psoc *psoc,
-						  void *object, void *arg)
-{
-	struct wlan_objmgr_pdev *pdev = (struct wlan_objmgr_pdev *)object;
-	struct wlan_regulatory_psoc_priv_obj *psoc_priv_obj;
-	struct wlan_regulatory_pdev_priv_obj *pdev_priv_obj;
-	enum channel_enum chan_enum;
-
-	wlan_psoc_obj_lock(psoc);
-	psoc_priv_obj = (struct wlan_regulatory_psoc_priv_obj *)
-		wlan_objmgr_psoc_get_comp_private_obj(psoc,
-				       WLAN_UMAC_COMP_REGULATORY);
-	wlan_psoc_obj_unlock(psoc);
-
-	if (NULL == psoc_priv_obj) {
-		reg_err("psoc priv obj is NULL");
-		return;
-	}
-
-	wlan_pdev_obj_lock(pdev);
-	pdev_priv_obj = (struct wlan_regulatory_pdev_priv_obj *)
-		wlan_objmgr_pdev_get_comp_private_obj(pdev,
-					       WLAN_UMAC_COMP_REGULATORY);
-	wlan_pdev_obj_unlock(pdev);
-
-	if (NULL == pdev_priv_obj) {
-		reg_err("pdev priv obj is NULL");
-		return;
-	}
-
-	qdf_mem_copy(pdev_priv_obj->cur_chan_list,
-		     psoc_priv_obj->mas_chan_list,
-		     NUM_CHANNELS * sizeof(struct regulatory_channel));
-
-	for (chan_enum = 0; chan_enum < NUM_CHANNELS; chan_enum++)
-		pdev_priv_obj->cur_chan_list[chan_enum].nol_chan =
-			psoc_priv_obj->nol_chan[chan_enum];
-
-	modify_chan_list_for_freq_range(pdev_priv_obj->cur_chan_list,
-					pdev_priv_obj->range_2g_low,
-					pdev_priv_obj->range_2g_high,
-					pdev_priv_obj->range_5g_low,
-					pdev_priv_obj->range_5g_high);
-
-	modify_chan_list_for_dfs_channels(pdev_priv_obj->cur_chan_list,
-					  pdev_priv_obj->dfs_enabled);
-
-	modify_chan_list_for_indoor_channels(pdev_priv_obj->cur_chan_list,
-					 pdev_priv_obj->indoor_chan_enabled);
-
-	modify_chan_list_for_band(pdev_priv_obj->cur_chan_list,
-				  pdev_priv_obj->band_capability);
-
-	modify_chan_list_for_nol_list(pdev_priv_obj->cur_chan_list);
-
-	modify_chan_list_for_fcc_channel(pdev_priv_obj->cur_chan_list,
-					 pdev_priv_obj->set_fcc_channel);
-
-}
-
 void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc,
 				    struct regulatory_channel *reg_channels,
 				    uint8_t *alpha2,
@@ -1991,3 +2016,180 @@ void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc,
 				     NULL, 1, WLAN_REGULATORY_SB_ID);
 
 }
+
+QDF_STATUS reg_program_default_cc(struct wlan_objmgr_psoc *psoc,
+		uint16_t regdmn)
+{
+	struct wlan_regulatory_psoc_priv_obj *soc_reg;
+	struct cur_regulatory_info *reg_info;
+	uint16_t cc = -1;
+	uint16_t country_index = -1, regdmn_pair = -1;
+	QDF_STATUS err;
+
+	soc_reg = (struct wlan_regulatory_psoc_priv_obj *)
+		wlan_objmgr_psoc_get_comp_private_obj(psoc,
+				WLAN_UMAC_COMP_REGULATORY);
+
+	if (NULL == soc_reg) {
+		reg_err("reg soc is NULL");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc_reg->offload_enabled)
+		return QDF_STATUS_E_FAILURE;
+
+	reg_info = (struct cur_regulatory_info *)qdf_mem_malloc
+		(sizeof(struct cur_regulatory_info));
+	if (reg_info == NULL) {
+		reg_err("reg info is NULL");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	reg_info->psoc = psoc;
+
+	if (regdmn == 0) {
+		reg_get_default_country(&regdmn);
+		regdmn |= COUNTRY_ERD_FLAG;
+	}
+
+	if (regdmn & COUNTRY_ERD_FLAG) {
+		cc = regdmn & ~COUNTRY_ERD_FLAG;
+
+		reg_get_rdpair_from_country_code(cc,
+				&country_index,
+				&regdmn_pair);
+
+		err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair);
+		if (err == QDF_STATUS_E_FAILURE) {
+			reg_err("%s : Unable to set country code\n", __func__);
+			qdf_mem_free(reg_info->reg_rules_2g_ptr);
+			qdf_mem_free(reg_info->reg_rules_5g_ptr);
+			qdf_mem_free(reg_info);
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		soc_reg->ctry_code = cc;
+
+	} else {
+		reg_get_rdpair_from_regdmn_id(regdmn,
+				&regdmn_pair);
+
+		err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair);
+		if (err == QDF_STATUS_E_FAILURE) {
+			reg_err("%s : Unable to set country code\n", __func__);
+			qdf_mem_free(reg_info->reg_rules_2g_ptr);
+			qdf_mem_free(reg_info->reg_rules_5g_ptr);
+			qdf_mem_free(reg_info);
+			return QDF_STATUS_E_FAILURE;
+		}
+
+		soc_reg->reg_dmn_pair = regdmn;
+	}
+
+	reg_info->offload_enabled = false;
+	reg_process_master_chan_list(reg_info);
+
+	qdf_mem_free(reg_info->reg_rules_2g_ptr);
+	qdf_mem_free(reg_info->reg_rules_5g_ptr);
+	qdf_mem_free(reg_info);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_program_chan_list(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd)
+{
+	struct cur_regulatory_info *reg_info;
+	struct wlan_regulatory_psoc_priv_obj *soc_reg;
+	uint16_t country_index = -1, regdmn_pair = -1;
+	QDF_STATUS err;
+
+	soc_reg = (struct wlan_regulatory_psoc_priv_obj *)
+		wlan_objmgr_psoc_get_comp_private_obj(psoc,
+				WLAN_UMAC_COMP_REGULATORY);
+
+	if (NULL == soc_reg) {
+		reg_err("soc_reg is NULL");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc_reg->offload_enabled)
+		return QDF_STATUS_E_FAILURE;
+
+	reg_info = (struct cur_regulatory_info *)qdf_mem_malloc
+		(sizeof(struct cur_regulatory_info));
+	if (reg_info == NULL) {
+		reg_err("reg info is NULL");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	reg_info->psoc = psoc;
+
+	if (rd->flags == CC_IS_SET) {
+		reg_get_rdpair_from_country_code(rd->cc.country_code,
+				&country_index,
+				&regdmn_pair);
+	} else if (rd->flags == ALPHA_IS_SET) {
+		reg_get_rdpair_from_country_iso(rd->cc.alpha,
+				&country_index,
+				&regdmn_pair);
+	} else if (rd->flags == REGDMN_IS_SET) {
+		reg_get_rdpair_from_regdmn_id(rd->cc.regdmn_id,
+				&regdmn_pair);
+	}
+
+	err = reg_get_cur_reginfo(reg_info, country_index, regdmn_pair);
+	if (err == QDF_STATUS_E_FAILURE) {
+		reg_err("%s : Unable to set country code\n", __func__);
+		qdf_mem_free(reg_info->reg_rules_2g_ptr);
+		qdf_mem_free(reg_info->reg_rules_5g_ptr);
+		qdf_mem_free(reg_info);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (rd->flags == CC_IS_SET ||
+			rd->flags == ALPHA_IS_SET) {
+		soc_reg->ctry_code =
+			g_all_countries[country_index].country_code;
+	}
+
+	if (rd->flags == REGDMN_IS_SET)
+		soc_reg->reg_dmn_pair = rd->cc.regdmn_id;
+
+	reg_info->offload_enabled = false;
+	reg_process_master_chan_list(reg_info);
+	qdf_mem_free(reg_info->reg_rules_2g_ptr);
+	qdf_mem_free(reg_info->reg_rules_5g_ptr);
+	qdf_mem_free(reg_info);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS reg_get_current_cc(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd)
+{
+	struct wlan_regulatory_psoc_priv_obj *soc_reg;
+
+	soc_reg = (struct wlan_regulatory_psoc_priv_obj *)
+		wlan_objmgr_psoc_get_comp_private_obj(psoc,
+				WLAN_UMAC_COMP_REGULATORY);
+
+	if (NULL == soc_reg) {
+		reg_err("reg soc is NULL");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc_reg->offload_enabled)
+		return QDF_STATUS_E_FAILURE;
+
+	if (rd->flags == CC_IS_SET) {
+		rd->cc.country_code = soc_reg->ctry_code;
+	} else if (rd->flags == ALPHA_IS_SET) {
+		qdf_mem_copy(rd->cc.alpha, soc_reg->current_country,
+				sizeof(rd->cc.alpha));
+	} else if (rd->flags == REGDMN_IS_SET) {
+		rd->cc.regdmn_id = soc_reg->reg_dmn_pair;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}

+ 34 - 0
umac/regulatory/core/src/reg_services.h

@@ -66,6 +66,31 @@
 	 - REG_CH_TO_FREQ(reg_get_chan_enum(curchan))	\
 	 > REG_SBS_SEPARATION_THRESHOLD)
 
+/* EEPROM setting is a country code */
+#define    COUNTRY_ERD_FLAG     0x8000
+
+/**
+ * enum cc_regdmn_flag: Regdomain flags
+ * @INVALID:       Invalid flag
+ * @CC_IS_SET:     Country code is set
+ * @REGDMN_IS_SET: Regdomain ID is set
+ * @ALPHA_IS_SET:  Country ISO is set
+ */
+enum cc_regdmn_flag {
+	INVALID,
+	CC_IS_SET,
+	REGDMN_IS_SET,
+	ALPHA_IS_SET,
+};
+
+struct cc_regdmn_s {
+	union {
+		uint16_t country_code;
+		uint16_t regdmn_id;
+		uint8_t alpha[REG_ALPHA2_LEN + 1];
+	} cc;
+	uint8_t flags;
+};
 
 extern const struct chan_map channel_map[NUM_CHANNELS];
 
@@ -162,6 +187,9 @@ QDF_STATUS reg_get_current_chan_list(struct wlan_objmgr_pdev *pdev,
 				     struct regulatory_channel
 				     *chan_list);
 
+QDF_STATUS reg_program_chan_list(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd);
+
 void reg_update_nol_ch(struct wlan_objmgr_pdev *pdev, uint8_t *ch_list,
 		       uint8_t num_ch, bool nol_ch);
 
@@ -199,4 +227,10 @@ void reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc,
 			       struct regulatory_channel *reg_channels,
 			       uint8_t *alpha2,
 			       enum dfs_reg dfs_region);
+
+QDF_STATUS reg_program_default_cc(struct wlan_objmgr_psoc *psoc,
+		uint16_t regdmn);
+
+QDF_STATUS reg_get_current_cc(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd);
 #endif

+ 14 - 0
umac/regulatory/dispatcher/inc/reg_services_public_struct.h

@@ -393,6 +393,14 @@ enum ht_sec_ch_offset {
 	HIGH_PRIMARY_CH = 3,
 };
 
+enum cc_setting_code {
+	REG_SET_CC_STATUS_PASS = 0,
+	REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+	REG_INIT_ALPHA2_NOT_FOUND = 2,
+	REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+	REG_SET_CC_STATUS_NO_MEMORY = 4,
+	REG_SET_CC_STATUS_FAIL = 5,
+};
 
 /**
  * struct cur_reg_rule
@@ -415,6 +423,7 @@ struct cur_reg_rule {
 /**
  * struct cur_regulatory_info
  * @psoc: psoc ptr
+ * @cc_setting_code: cc setting regdb value
  * @alpha2: country alpha2
  * @dfs_reg: dfs region
  * @phybitmap: phy bit map
@@ -429,7 +438,12 @@ struct cur_reg_rule {
  */
 struct cur_regulatory_info {
 	struct wlan_objmgr_psoc *psoc;
+	enum cc_setting_code status_code;
+	uint8_t num_phy;
+	uint16_t reg_dmn_pair;
+	uint16_t ctry_code;
 	uint8_t alpha2[REG_ALPHA2_LEN + 1];
+	bool offload_enabled;
 	enum dfs_reg dfs_region;
 	uint32_t phybitmap;
 	uint32_t min_bw_2g;

+ 9 - 0
umac/regulatory/dispatcher/inc/wlan_reg_ucfg_api.h

@@ -46,6 +46,15 @@ QDF_STATUS ucfg_reg_unregister_event_handler(uint8_t vdev_id, reg_event_cb cb,
 		void *arg);
 QDF_STATUS ucfg_reg_init_handler(uint8_t pdev_id);
 
+QDF_STATUS ucfg_reg_program_default_cc(struct wlan_objmgr_psoc *psoc,
+		uint16_t regdmn);
+
+QDF_STATUS ucfg_reg_program_cc(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd);
+
+QDF_STATUS ucfg_reg_get_current_cc(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd);
+
 /**
  * ucfg_reg_set_config_vars () - Set the config vars in reg component
  * @psoc: psoc ptr

+ 4 - 2
umac/regulatory/dispatcher/src/wlan_reg_services_api.c

@@ -347,7 +347,8 @@ QDF_STATUS regulatory_psoc_open(struct wlan_objmgr_psoc *psoc)
 	struct wlan_lmac_if_reg_tx_ops *tx_ops;
 
 	tx_ops = get_reg_psoc_tx_ops(psoc);
-	tx_ops->register_master_handler(psoc, NULL);
+	if (tx_ops->register_master_handler)
+		tx_ops->register_master_handler(psoc, NULL);
 
 	return QDF_STATUS_SUCCESS;
 };
@@ -357,7 +358,8 @@ QDF_STATUS regulatory_psoc_close(struct wlan_objmgr_psoc *psoc)
 	struct wlan_lmac_if_reg_tx_ops *tx_ops;
 
 	tx_ops = get_reg_psoc_tx_ops(psoc);
-	tx_ops->unregister_master_handler(psoc, NULL);
+	if (tx_ops->unregister_master_handler)
+		tx_ops->unregister_master_handler(psoc, NULL);
 
 	return QDF_STATUS_SUCCESS;
 };

+ 18 - 0
umac/regulatory/dispatcher/src/wlan_reg_ucfg_api.c

@@ -113,3 +113,21 @@ void ucfg_reg_program_mas_chan_list(struct wlan_objmgr_psoc *psoc,
 {
 	reg_program_mas_chan_list(psoc, reg_channels, alpha2, dfs_region);
 }
+
+QDF_STATUS ucfg_reg_program_default_cc(struct wlan_objmgr_psoc *psoc,
+		uint16_t regdmn)
+{
+	return reg_program_default_cc(psoc, regdmn);
+}
+
+QDF_STATUS ucfg_reg_program_cc(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd)
+{
+	return reg_program_chan_list(psoc, rd);
+}
+
+QDF_STATUS ucfg_reg_get_current_cc(struct wlan_objmgr_psoc *psoc,
+		struct cc_regdmn_s *rd)
+{
+	return reg_get_current_cc(psoc, rd);
+}

+ 1 - 0
wmi/src/wmi_unified_tlv.c

@@ -17174,6 +17174,7 @@ static QDF_STATUS extract_reg_chan_list_update_event_tlv(
 			REG_ALPHA2_LEN);
 	reg_info->dfs_region = chan_list_event_hdr->dfs_region;
 	reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+	reg_info->offload_enabled = true;
 	reg_info->min_bw_2g = chan_list_event_hdr->min_bw_2g;
 	reg_info->max_bw_2g = chan_list_event_hdr->max_bw_2g;
 	reg_info->min_bw_5g = chan_list_event_hdr->min_bw_5g;

Some files were not shown because too many files changed in this diff