瀏覽代碼

Merge "qcacmn: Add changes for PN check in REO"

Linux Build Service Account 7 年之前
父節點
當前提交
86230a188b

+ 17 - 0
dp/inc/cdp_txrx_cmn.h

@@ -542,4 +542,21 @@ cdp_display_stats(ol_txrx_soc_handle soc, uint16_t value)
 
 	return 0;
 }
+
+
+/**
+  * cdp_set_pn_check(): function to set pn check
+  * @soc: soc handle
+  * @sec_type: security type
+  * #rx_pn: receive pn
+  */
+static inline int cdp_set_pn_check(ol_txrx_soc_handle soc,
+	struct cdp_vdev *vdev, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
+{
+	if (soc->ops->cmn_drv_ops->set_pn_check)
+		soc->ops->cmn_drv_ops->set_pn_check(vdev, peer_handle,
+			sec_type, rx_pn);
+	return 0;
+}
+
 #endif /* _CDP_TXRX_CMN_H_ */

+ 21 - 0
dp/inc/cdp_txrx_cmn_struct.h

@@ -210,6 +210,27 @@ enum ol_txrx_peer_state {
 	OL_TXRX_PEER_STATE_AUTH,    /* authentication successful */
 };
 
+
+/**
+ * struct cdp_sec_type - security type information
+ */
+enum cdp_sec_type {
+    cdp_sec_type_none,
+    cdp_sec_type_wep128,
+    cdp_sec_type_wep104,
+    cdp_sec_type_wep40,
+    cdp_sec_type_tkip,
+    cdp_sec_type_tkip_nomic,
+    cdp_sec_type_aes_ccmp,
+    cdp_sec_type_wapi,
+    cdp_sec_type_aes_ccmp_256,
+    cdp_sec_type_aes_gcmp,
+    cdp_sec_type_aes_gcmp_256,
+
+    /* keep this last! */
+    cdp_num_sec_types
+};
+
 typedef struct cdp_soc_t *ol_txrx_soc_handle;
 
 /**

+ 1 - 0
dp/inc/cdp_txrx_handle.h

@@ -29,6 +29,7 @@
 struct cdp_cfg;
 struct cdp_pdev;
 struct cdp_vdev;
+struct cdp_peer;
 struct cdp_raw_ast;
 
 #endif

+ 3 - 0
dp/inc/cdp_txrx_ops.h

@@ -208,6 +208,9 @@ struct cdp_cmn_ops {
 	int(*txrx_soc_get_nss_cfg)(ol_txrx_soc_handle soc);
 	QDF_STATUS (*txrx_intr_attach)(void *soc);
 	void (*txrx_intr_detach)(void *soc);
+	void  (*set_pn_check)(struct cdp_vdev *vdev,
+		struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
+		 uint32_t *rx_pn);
 };
 
 struct cdp_ctrl_ops {

+ 3 - 0
dp/wifi3.0/dp_internal.h

@@ -285,4 +285,7 @@ QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
 		uint32_t config_param_3);
 void dp_htt_stats_print_tag(uint8_t tag_type, uint32_t *tag_buf);
 int dp_peer_rxtid_stats(struct dp_peer *peer);
+void dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle,
+	struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,
+	 uint32_t *rx_pn);
 #endif /* #ifndef _DP_INTERNAL_H_ */

+ 1 - 0
dp/wifi3.0/dp_main.c

@@ -3813,6 +3813,7 @@ static struct cdp_cmn_ops dp_ops_cmn = {
 	.txrx_soc_get_nss_cfg = dp_soc_get_nss_cfg_wifi3,
 	.txrx_intr_attach = dp_soc_interrupt_attach,
 	.txrx_intr_detach = dp_soc_interrupt_detach,
+	.set_pn_check = dp_set_pn_check_wifi3,
 	/* TODO: Add other functions */
 };
 

+ 103 - 7
dp/wifi3.0/dp_peer.c

@@ -806,14 +806,14 @@ try_desc_alloc:
 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
 	 */
 	switch (peer->security[dp_sec_ucast].sec_type) {
-	case htt_sec_type_tkip_nomic:
-	case htt_sec_type_aes_ccmp:
-	case htt_sec_type_aes_ccmp_256:
-	case htt_sec_type_aes_gcmp:
-	case htt_sec_type_aes_gcmp_256:
+	case cdp_sec_type_tkip_nomic:
+	case cdp_sec_type_aes_ccmp:
+	case cdp_sec_type_aes_ccmp_256:
+	case cdp_sec_type_aes_gcmp:
+	case cdp_sec_type_aes_gcmp_256:
 		hal_pn_type = HAL_PN_WPA;
 		break;
-	case htt_sec_type_wapi:
+	case cdp_sec_type_wapi:
 		if (vdev->opmode == wlan_op_mode_ap)
 			hal_pn_type = HAL_PN_WAPI_EVEN;
 		else
@@ -990,7 +990,7 @@ void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
 	 * send a HTT SEC_IND message to overwrite these defaults.
 	 */
 	peer->security[dp_sec_ucast].sec_type =
-		peer->security[dp_sec_mcast].sec_type = htt_sec_type_none;
+		peer->security[dp_sec_mcast].sec_type = cdp_sec_type_none;
 }
 
 /*
@@ -1140,6 +1140,102 @@ void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
 	}
 }
 
+
+/**
+ * dp_set_pn_check_wifi3() - enable PN check in REO for security
+ * @peer: Datapath peer handle
+ * @vdev: Datapath vdev
+ * @pdev - data path device instance
+ * @sec_type - security type
+ * @rx_pn - Receive pn starting number
+ *
+ */
+
+void
+dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle, enum cdp_sec_type sec_type,  uint32_t *rx_pn)
+{
+	struct dp_peer *peer =  (struct dp_peer *)peer_handle;
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+	struct dp_pdev *pdev;
+	struct dp_soc *soc;
+	int i;
+	struct hal_reo_cmd_params params;
+
+	/* preconditions */
+	qdf_assert(vdev);
+
+	pdev = vdev->pdev;
+	soc = pdev->soc;
+
+
+	qdf_mem_zero(&params, sizeof(params));
+
+	params.std.need_status = 1;
+	params.u.upd_queue_params.update_pn_valid = 1;
+	params.u.upd_queue_params.update_pn_size = 1;
+	params.u.upd_queue_params.update_pn = 1;
+	params.u.upd_queue_params.update_pn_check_needed = 1;
+
+	peer->security[dp_sec_ucast].sec_type = sec_type;
+
+	switch (sec_type) {
+	case cdp_sec_type_tkip_nomic:
+	case cdp_sec_type_aes_ccmp:
+	case cdp_sec_type_aes_ccmp_256:
+	case cdp_sec_type_aes_gcmp:
+	case cdp_sec_type_aes_gcmp_256:
+		params.u.upd_queue_params.pn_check_needed = 1;
+		params.u.upd_queue_params.pn_size = 48;
+		break;
+	case cdp_sec_type_wapi:
+		params.u.upd_queue_params.pn_check_needed = 1;
+		params.u.upd_queue_params.pn_size = 128;
+		if (vdev->opmode == wlan_op_mode_ap) {
+			params.u.upd_queue_params.pn_even = 1;
+			params.u.upd_queue_params.update_pn_even = 1;
+		} else {
+			params.u.upd_queue_params.pn_uneven = 1;
+			params.u.upd_queue_params.update_pn_uneven = 1;
+		}
+		break;
+	default:
+		params.u.upd_queue_params.pn_check_needed = 0;
+		break;
+	}
+
+
+	for (i = 0; i < DP_MAX_TIDS; i++) {
+		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
+		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
+			params.std.addr_lo =
+				rx_tid->hw_qdesc_paddr & 0xffffffff;
+			params.std.addr_hi =
+				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
+
+			if (sec_type != cdp_sec_type_wapi) {
+				params.u.upd_queue_params.update_pn_valid = 0;
+			} else {
+				/*
+				 * Setting PN valid bit for WAPI sec_type,
+				 * since WAPI PN has to be started with
+				 * predefined value
+				 */
+				params.u.upd_queue_params.update_pn_valid = 1;
+				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
+				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
+				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
+				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
+			}
+			dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
+				dp_rx_tid_update_cb, rx_tid);
+		} else {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+				"PN Check not setup for TID :%d \n", i);
+		}
+	}
+}
+
+
 void
 dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
 	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,

+ 163 - 32
dp/wifi3.0/dp_rx.c

@@ -399,6 +399,7 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
  *
  * @vdev: DP Virtual device handle
  * @nbuf: Buffer pointer
+ * @rx_tlv_hdr: start of rx tlv header
  *
  * This function allocated memory for mesh receive stats and fill the
  * required stats. Stores the memory address in skb cb.
@@ -406,13 +407,13 @@ dp_rx_intrabss_fwd(struct dp_soc *soc,
  * Return: void
  */
 static
-void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
+				uint8_t *rx_tlv_hdr)
 {
 	struct mesh_recv_hdr_s *rx_info = NULL;
 	uint32_t pkt_type;
 	uint32_t nss;
 	uint32_t rate_mcs;
-	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
 
 	/* fill recv mesh stats */
 	rx_info = qdf_mem_malloc(sizeof(struct mesh_recv_hdr_s));
@@ -462,6 +463,7 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  *
  * @vdev: DP Virtual device handle
  * @nbuf: Buffer pointer
+ * @rx_tlv_hdr: start of rx tlv header
  *
  * This checks if the received packet is matching any filter out
  * catogery and and drop the packet if it matches.
@@ -470,9 +472,9 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
  */
 
 static inline
-QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
+					uint8_t *rx_tlv_hdr)
 {
-	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
 	union dp_align_mac_addr mac_addr;
 
 	if (qdf_unlikely(vdev->mesh_rx_filter)) {
@@ -517,12 +519,14 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
 
 #else
 static
-void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
+				uint8_t *rx_tlv_hdr)
 {
 }
 
 static inline
-QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
+					uint8_t *rx_tlv_hdr)
 {
 	return QDF_STATUS_E_FAILURE;
 }
@@ -736,6 +740,77 @@ static void dp_rx_lro(uint8_t *rx_tlv, struct dp_peer *peer,
 }
 #endif
 
+static inline void dp_rx_adjust_nbuf_len(qdf_nbuf_t nbuf, uint16_t *mpdu_len)
+{
+	if (*mpdu_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN))
+		qdf_nbuf_set_pktlen(nbuf, RX_BUFFER_SIZE);
+	else
+		qdf_nbuf_set_pktlen(nbuf, (*mpdu_len + RX_PKT_TLVS_LEN));
+
+	*mpdu_len -= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN);
+}
+
+/**
+ * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
+ *		     multiple nbufs.
+ * @nbuf: nbuf which can may be part of frag_list.
+ * @rx_tlv_hdr: pointer to the start of RX TLV headers.
+ * @mpdu_len: mpdu length.
+ * @is_first_frag: is this the first nbuf in the fragmented MSDU.
+ * @frag_list_len: length of all the fragments combined.
+ * @head_frag_nbuf: parent nbuf
+ * @frag_list_head: pointer to the first nbuf in the frag_list.
+ * @frag_list_tail: pointer to the last nbuf in the frag_list.
+ *
+ * This function implements the creation of RX frag_list for cases
+ * where an MSDU is spread across multiple nbufs.
+ *
+ */
+void dp_rx_sg_create(qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
+			uint16_t *mpdu_len, bool *is_first_frag,
+			uint16_t *frag_list_len, qdf_nbuf_t *head_frag_nbuf,
+			qdf_nbuf_t *frag_list_head, qdf_nbuf_t *frag_list_tail)
+{
+	if (qdf_unlikely(qdf_nbuf_is_chfrag_cont(nbuf))) {
+		if (!(*is_first_frag)) {
+			*is_first_frag = 1;
+			qdf_nbuf_set_chfrag_start(nbuf, 1);
+			*mpdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
+
+			dp_rx_adjust_nbuf_len(nbuf, mpdu_len);
+			*head_frag_nbuf = nbuf;
+		} else {
+			dp_rx_adjust_nbuf_len(nbuf, mpdu_len);
+			qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
+			*frag_list_len += qdf_nbuf_len(nbuf);
+
+			DP_RX_LIST_APPEND(*frag_list_head,
+						*frag_list_tail,
+						nbuf);
+		}
+	} else {
+		if (qdf_unlikely(*is_first_frag)) {
+			qdf_nbuf_set_chfrag_start(nbuf, 0);
+			dp_rx_adjust_nbuf_len(nbuf, mpdu_len);
+			qdf_nbuf_pull_head(nbuf,
+					RX_PKT_TLVS_LEN);
+			*frag_list_len += qdf_nbuf_len(nbuf);
+
+			DP_RX_LIST_APPEND(*frag_list_head,
+						*frag_list_tail,
+						nbuf);
+
+			qdf_nbuf_append_ext_list(*head_frag_nbuf,
+						*frag_list_head,
+						*frag_list_len);
+
+			*is_first_frag = 0;
+			return;
+		}
+		*head_frag_nbuf = nbuf;
+	}
+}
+
 /**
  * dp_rx_process() - Brain of the Rx processing functionality
  *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
@@ -782,6 +857,12 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
 	struct dp_soc *soc = int_ctx->soc;
 	uint8_t ring_id;
 	uint8_t core_id;
+	bool is_first_frag = 0;
+	uint16_t mpdu_len = 0;
+	qdf_nbuf_t head_frag_nbuf = NULL;
+	qdf_nbuf_t frag_list_head = NULL;
+	qdf_nbuf_t frag_list_tail = NULL;
+	uint16_t frag_list_len = 0;
 
 	DP_HIST_INIT();
 	/* Debug -- Remove later */
@@ -812,7 +893,7 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
 	 */
 	while (qdf_likely((ring_desc =
 				hal_srng_dst_get_next(hal_soc, hal_ring))
-				&& quota--)) {
+				&& quota)) {
 
 		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
 		ring_id = hal_srng_ring_id_get(hal_ring);
@@ -907,6 +988,15 @@ dp_rx_process(struct dp_intr *int_ctx, void *hal_ring, uint32_t quota)
 		DP_HIST_PACKET_COUNT_INC(vdev->pdev->pdev_id);
 		qdf_nbuf_queue_add(&vdev->rxq, rx_desc->nbuf);
 fail:
+		/*
+		 * if continuation bit is set then we have MSDU spread
+		 * across multiple buffers, let us not decrement quota
+		 * till we reap all buffers of that MSDU.
+		 */
+		if (qdf_likely(!qdf_nbuf_is_chfrag_cont(rx_desc->nbuf)))
+			quota -= 1;
+
+
 		dp_rx_add_to_free_desc_list(&head[rx_desc->pool_id],
 						&tail[rx_desc->pool_id],
 						rx_desc);
@@ -961,8 +1051,44 @@ done:
 				qdf_assert(0);
 			}
 
-			if (qdf_nbuf_is_chfrag_start(nbuf))
-				peer_mdata = hal_rx_mpdu_peer_meta_data_get(rx_tlv_hdr);
+			/*
+			 * The below condition happens when an MSDU is spread
+			 * across multiple buffers. This can happen in two cases
+			 * 1. The nbuf size is smaller then the received msdu.
+			 *    ex: we have set the nbuf size to 2048 during
+			 *        nbuf_alloc. but we received an msdu which is
+			 *        2304 bytes in size then this msdu is spread
+			 *        across 2 nbufs.
+			 *
+			 * 2. AMSDUs when RAW mode is enabled.
+			 *    ex: 1st MSDU is in 1st nbuf and 2nd MSDU is spread
+			 *        across 1st nbuf and 2nd nbuf and last MSDU is
+			 *        spread across 2nd nbuf and 3rd nbuf.
+			 *
+			 * for these scenarios let us create a skb frag_list and
+			 * append these buffers till the last MSDU of the AMSDU
+			 */
+			if (qdf_unlikely(vdev->rx_decap_type ==
+					htt_cmn_pkt_type_raw)) {
+
+				dp_rx_sg_create(nbuf, rx_tlv_hdr, &mpdu_len,
+						&is_first_frag, &frag_list_len,
+						&head_frag_nbuf,
+						&frag_list_head,
+						&frag_list_tail);
+
+				if (is_first_frag)
+					continue;
+				else {
+					nbuf = head_frag_nbuf;
+					rx_tlv_hdr = qdf_nbuf_data(nbuf);
+				}
+			}
+
+			if (qdf_nbuf_is_chfrag_start(nbuf)) {
+				peer_mdata = hal_rx_mpdu_peer_meta_data_get
+								(rx_tlv_hdr);
+			}
 
 			peer_id = DP_PEER_METADATA_PEER_ID_GET(peer_mdata);
 			peer = dp_peer_find_by_id(soc, peer_id);
@@ -1089,11 +1215,18 @@ done:
 			msdu_len = hal_rx_msdu_start_msdu_len_get(rx_tlv_hdr);
 			pkt_len = msdu_len + l2_hdr_offset + RX_PKT_TLVS_LEN;
 
-			/* Set length in nbuf */
-			qdf_nbuf_set_pktlen(nbuf, pkt_len);
+			if (unlikely(qdf_nbuf_get_ext_list(nbuf)))
+				qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
+			else {
+				qdf_nbuf_set_pktlen(nbuf, pkt_len);
+				qdf_nbuf_pull_head(nbuf,
+						RX_PKT_TLVS_LEN +
+						l2_hdr_offset);
+			}
 
 			if (qdf_unlikely(vdev->mesh_vdev)) {
-				if (dp_rx_filter_mesh_packets(vdev, nbuf)
+				if (dp_rx_filter_mesh_packets(vdev, nbuf,
+								rx_tlv_hdr)
 						== QDF_STATUS_SUCCESS) {
 					QDF_TRACE(QDF_MODULE_ID_DP,
 						QDF_TRACE_LEVEL_INFO_MED,
@@ -1104,16 +1237,9 @@ done:
 					qdf_nbuf_free(nbuf);
 					continue;
 				}
-				dp_rx_fill_mesh_stats(vdev, nbuf);
+				dp_rx_fill_mesh_stats(vdev, nbuf, rx_tlv_hdr);
 			}
 
-			/*
-			 * Advance the packet start pointer by total size of
-			 * pre-header TLV's
-			 */
-			qdf_nbuf_pull_head(nbuf,
-					   RX_PKT_TLVS_LEN + l2_hdr_offset);
-
 #ifdef QCA_WIFI_NAPIER_EMULATION_DBG /* Debug code, remove later */
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 				"p_id %d msdu_len %d hdr_off %d",
@@ -1124,18 +1250,23 @@ done:
 					qdf_nbuf_data(nbuf), 128, false);
 #endif /* NAPIER_EMULATION */
 
-			/* WDS Source Port Learning */
-			if (qdf_likely((vdev->wds_enabled) &&
-						(vdev->rx_decap_type ==
-						htt_cmn_pkt_type_ethernet)))
-				dp_rx_wds_srcport_learn(soc, rx_tlv_hdr, peer,
-						nbuf);
-
-			/* Intrabss-fwd */
-			if (vdev->opmode != wlan_op_mode_sta)
-				if (dp_rx_intrabss_fwd(soc, peer, rx_tlv_hdr,
-									nbuf))
-					continue; /* Get next descriptor */
+			if (qdf_likely(vdev->rx_decap_type ==
+						htt_cmn_pkt_type_ethernet)) {
+				/* WDS Source Port Learning */
+				if (qdf_likely(vdev->wds_enabled))
+					dp_rx_wds_srcport_learn(soc,
+								rx_tlv_hdr,
+								peer,
+								nbuf);
+
+				/* Intrabss-fwd */
+				if (vdev->opmode != wlan_op_mode_sta)
+					if (dp_rx_intrabss_fwd(soc,
+								peer,
+								rx_tlv_hdr,
+								nbuf))
+						continue; /* Get next desc */
+			}
 
 			rx_bufs_used++;
 

+ 10 - 0
dp/wifi3.0/dp_rx.h

@@ -284,6 +284,16 @@ uint32_t dp_rx_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
 uint32_t
 dp_rx_wbm_err_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
 
+void
+dp_rx_sg_create(qdf_nbuf_t nbuf,
+		uint8_t *rx_tlv_hdr,
+		uint16_t *mpdu_len,
+		bool *is_first_frag,
+		uint16_t *frag_list_len,
+		qdf_nbuf_t *head_frag_nbuf,
+		qdf_nbuf_t *frag_list_head,
+		qdf_nbuf_t *frag_list_tail);
+
 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
 				uint32_t pool_id,
 				uint32_t pool_size,

+ 20 - 1
dp/wifi3.0/dp_rx_err.c

@@ -72,6 +72,8 @@ static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
 	struct hal_buf_info buf_info;
 	struct hal_rx_msdu_list msdu_list; /* MSDU's per MPDU */
 	int i;
+	uint8_t *rx_tlv_hdr;
+	uint32_t tid;
 
 	hal_rx_reo_buf_paddr_get(ring_desc, &buf_info);
 
@@ -96,6 +98,13 @@ static uint32_t dp_rx_msdus_drop(struct dp_soc *soc, void *ring_desc,
 		}
 
 		rx_bufs_used++;
+		tid = hal_rx_mpdu_start_tid_get(rx_desc->rx_buf_start);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Packet received with PN error for tid :%d", tid);
+
+		rx_tlv_hdr = qdf_nbuf_data(rx_desc->nbuf);
+		if (hal_rx_encryption_info_valid(rx_tlv_hdr))
+			hal_rx_print_pn(rx_tlv_hdr);
 
 		/* Just free the buffers */
 		qdf_nbuf_free(rx_desc->nbuf);
@@ -139,6 +148,7 @@ dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
 	peer_id = DP_PEER_METADATA_PEER_ID_GET(
 				mpdu_desc_info->peer_meta_data);
 
+
 	peer = dp_peer_find_by_id(soc, peer_id);
 
 	if (qdf_likely(peer)) {
@@ -146,7 +156,16 @@ dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
 		 * TODO: Check for peer specific policies & set peer_pn_policy
 		 */
 	}
-
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"Packet received with PN error");
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"discard rx due to PN error for peer  %p  "
+		"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
+		peer,
+		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+		peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
 
 	/* No peer PN policy -- definitely drop */
 	if (!peer_pn_policy)

+ 81 - 0
dp/wifi3.0/hal_rx.h

@@ -338,6 +338,46 @@ enum hal_rx_ret_buf_manager {
 	_OFFSET_TO_BYTE_PTR(msdu_details_ptr,		\
 RX_MSDU_DETAILS_2_RX_MSDU_DESC_INFO_RX_MSDU_DESC_INFO_DETAILS_OFFSET))
 
+
+#define HAL_RX_MPDU_PN_31_0_GET(_rx_mpdu_info)		\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+	RX_MPDU_INFO_4_PN_31_0_OFFSET)),		\
+	RX_MPDU_INFO_4_PN_31_0_MASK,			\
+	RX_MPDU_INFO_4_PN_31_0_LSB))
+
+#define HAL_RX_MPDU_PN_63_32_GET(_rx_mpdu_info)		\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+	RX_MPDU_INFO_5_PN_63_32_OFFSET)),		\
+	RX_MPDU_INFO_5_PN_63_32_MASK,			\
+	RX_MPDU_INFO_5_PN_63_32_LSB))
+
+#define HAL_RX_MPDU_PN_95_64_GET(_rx_mpdu_info)		\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+	RX_MPDU_INFO_6_PN_95_64_OFFSET)),		\
+	RX_MPDU_INFO_6_PN_95_64_MASK,			\
+	RX_MPDU_INFO_6_PN_95_64_LSB))
+
+#define HAL_RX_MPDU_PN_127_96_GET(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+	RX_MPDU_INFO_7_PN_127_96_OFFSET)),		\
+	RX_MPDU_INFO_7_PN_127_96_MASK,			\
+	RX_MPDU_INFO_7_PN_127_96_LSB))
+
+#define HAL_RX_MPDU_ENCRYPT_TYPE_GET(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,	\
+	RX_MPDU_INFO_3_ENCRYPT_TYPE_OFFSET)),		\
+	RX_MPDU_INFO_3_ENCRYPT_TYPE_MASK,		\
+	RX_MPDU_INFO_3_ENCRYPT_TYPE_LSB))
+
+#define HAL_RX_MPDU_ENCRYPTION_INFO_VALID(_rx_mpdu_info)	\
+	(_HAL_MS((*_OFFSET_TO_WORD_PTR(_rx_mpdu_info,		\
+	RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_OFFSET)),	\
+	RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_MASK,	\
+	RX_MPDU_INFO_2_FRAME_ENCRYPTION_INFO_VALID_LSB))
+
+
+
+
 static inline void hal_rx_mpdu_desc_info_get(void *desc_addr,
 				struct hal_rx_mpdu_desc_info *mpdu_desc_info)
 {
@@ -537,6 +577,47 @@ static inline uint8_t
 
 }
 
+/*
+ * @ hal_rx_encryption_info_valid: Returns encryption type.
+ *
+ * @ buf: rx_tlv_hdr of the received packet
+ * @ Return: encryption type
+ */
+static inline uint32_t
+hal_rx_encryption_info_valid(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
+	struct rx_mpdu_start *mpdu_start =
+				 &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start;
+	struct rx_mpdu_info *mpdu_info = &(mpdu_start->rx_mpdu_info_details);
+	uint32_t encryption_info = HAL_RX_MPDU_ENCRYPTION_INFO_VALID(mpdu_info);
+
+	return encryption_info;
+}
+
+/*
+ * @ hal_rx_print_pn: Prints the PN of rx packet.
+ *
+ * @ buf: rx_tlv_hdr of the received packet
+ * @ Return: void
+ */
+static inline void
+hal_rx_print_pn(uint8_t *buf)
+{
+	struct rx_pkt_tlvs *pkt_tlvs = (struct rx_pkt_tlvs *)buf;
+	struct rx_mpdu_start *mpdu_start =
+				 &pkt_tlvs->mpdu_start_tlv.rx_mpdu_start;
+	struct rx_mpdu_info *mpdu_info = &(mpdu_start->rx_mpdu_info_details);
+
+	uint32_t pn_31_0 = HAL_RX_MPDU_PN_31_0_GET(mpdu_info);
+	uint32_t pn_63_32 = HAL_RX_MPDU_PN_63_32_GET(mpdu_info);
+	uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info);
+	uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info);
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x \n",
+			pn_127_96, pn_95_64, pn_63_32, pn_31_0);
+}
+
 /*
  * Get msdu_done bit from the RX_ATTENTION TLV
  */

+ 4 - 0
hal/wifi3.0/hal_api.h

@@ -170,6 +170,10 @@ enum hal_ring_type {
 #define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
 #define HAL_SRNG_MSI_INTR				0x00020000
 
+#define PN_SIZE_24 0
+#define PN_SIZE_48 1
+#define PN_SIZE_128 2
+
 /**
  * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
  * used by callers for calculating the size of memory to be allocated before

+ 2 - 2
hal/wifi3.0/hal_internal.h

@@ -181,8 +181,8 @@ enum hal_srng_dir {
 /* Lock wrappers for SRNG */
 #define hal_srng_lock_t qdf_spinlock_t
 #define SRNG_LOCK_INIT(_lock) qdf_spinlock_create(_lock)
-#define SRNG_LOCK(_lock) qdf_spinlock_acquire(_lock)
-#define SRNG_UNLOCK(_lock) qdf_spinlock_release(_lock)
+#define SRNG_LOCK(_lock) qdf_spin_lock_bh(_lock)
+#define SRNG_UNLOCK(_lock) qdf_spin_unlock_bh(_lock)
 #define SRNG_LOCK_DESTROY(_lock) qdf_spinlock_destroy(_lock)
 
 #define MAX_SRNG_REG_GROUPS 2

+ 7 - 0
hal/wifi3.0/hal_reo.c

@@ -491,6 +491,13 @@ inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
 		BA_WINDOW_SIZE, p->ba_window_size - 1);
 
+	if (p->pn_size == 24)
+		p->pn_size = PN_SIZE_24;
+	else if (p->pn_size == 48)
+		p->pn_size = PN_SIZE_48;
+	else if (p->pn_size == 128)
+		p->pn_size = PN_SIZE_128;
+
 	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
 		PN_SIZE, p->pn_size);
 

+ 1 - 1
hal/wifi3.0/hal_reo.h

@@ -322,7 +322,7 @@ struct hal_reo_cmd_update_queue_params {
 		pn_hand_enab:1,
 		ignore_ampdu:1;
 	uint32_t ba_window_size:8,
-		pn_size:2,
+		pn_size:8,
 		svld:1,
 		ssn:12,
 		seq_2k_err_detect:1,

+ 0 - 4
hal/wifi3.0/hal_rx.c

@@ -23,10 +23,6 @@
 #define HAL_REO_QUEUE_DESC 8
 #define HAL_REO_QUEUE_EXT_DESC 9
 
-#define PN_SIZE_24 0
-#define PN_SIZE_48 1
-#define PN_SIZE_128 2
-
 /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
  * how these counters are assigned
  */

+ 0 - 1
umac/dfs/core/src/misc/dfs_nol.c

@@ -424,7 +424,6 @@ void dfs_nol_timer_cleanup(struct wlan_dfs *dfs)
 	}
 	dfs->dfs_nol = NULL;
 	dfs_nol_update(dfs);
-	dfs_mlme_set_no_chans_available(dfs->dfs_pdev_obj, 0);
 }
 
 int dfs_get_rn_use_nol(struct wlan_dfs *dfs)

+ 3 - 0
wmi/inc/wmi_unified_param.h

@@ -6880,6 +6880,8 @@ struct wmi_adaptive_dwelltime_params {
  *     for PER based roam in tx path
  * @rx_per_mon_time: Minimum time required to be considered as valid scenario
  *     for PER based roam in rx path
+ * @min_candidate_rssi: Minimum RSSI threshold for candidate AP to be used for
+ *     PER based roaming
  */
 struct wmi_per_roam_config {
 	uint32_t enable;
@@ -6892,6 +6894,7 @@ struct wmi_per_roam_config {
 	uint32_t per_rest_time;
 	uint32_t tx_per_mon_time;
 	uint32_t rx_per_mon_time;
+	uint32_t min_candidate_rssi;
 };
 
 /**

+ 2 - 0
wmi/src/wmi_unified_tlv.c

@@ -14419,6 +14419,8 @@ static QDF_STATUS send_per_roam_config_cmd_tlv(wmi_unified_t wmi_handle,
 	wmi_per_config->pkt_err_rate_mon_time =
 			(req_buf->per_config.tx_per_mon_time << 16) |
 			(req_buf->per_config.rx_per_mon_time & 0x0000ffff);
+	wmi_per_config->min_candidate_rssi =
+			req_buf->per_config.min_candidate_rssi;
 
 	/* Send per roam config parameters */
 	status = wmi_unified_cmd_send(wmi_handle, buf,