Browse Source

qcacld-3.0: add logic to configure FST in CMEM

Unlike QCA6490, where flow search table(FST) is maintained in DDR;
in QCA6750, the flow search table which is used for RX FISA will be
placed in CMEM. CMEM is an on-chip memory, accessing this memory
would be faster when compared to DDR.

Placing FST in CMEM will bring down the latencies for the HW to access the
flow search table and this latency is critical when FISA is enabled in
QCA6750 to avoid packet drops.

Adding the logic to add/delete FST entries placed in CMEM.

Change-Id: Icbfbb7e00cec813ed3de07a640d203db556da444
CRs-Fixed: 2771183
Manikanta Pubbisetty 4 years ago
parent
commit
90194f2a71
3 changed files with 499 additions and 15 deletions
  1. 465 14
      core/dp/txrx3.0/dp_fisa_rx.c
  2. 8 0
      core/dp/txrx3.0/dp_fisa_rx.h
  3. 26 1
      core/dp/txrx3.0/dp_rx_fst.c

+ 465 - 14
core/dp/txrx3.0/dp_fisa_rx.c

@@ -21,6 +21,7 @@
 #include "dp_internal.h"
 #include "dp_internal.h"
 #include <enet.h>
 #include <enet.h>
 #include <linux/skbuff.h>
 #include <linux/skbuff.h>
+#include "hif.h"
 
 
 static void dp_rx_fisa_flush_flow_wrap(struct dp_fisa_rx_sw_ft *sw_ft);
 static void dp_rx_fisa_flush_flow_wrap(struct dp_fisa_rx_sw_ft *sw_ft);
 
 
@@ -234,6 +235,76 @@ dp_rx_fisa_setup_hw_fse(struct dp_rx_fst *fisa_hdl,
 	return hw_fse;
 	return hw_fse;
 }
 }
 
 
+/**
+ * dp_rx_fisa_acquire_ft_lock() - Acquire lock which protects SW FT entries
+ * @fisa_hdl: Handle to fisa context
+ * @reo_id: REO ID
+ *
+ * Return: None
+ */
+static inline void
+dp_rx_fisa_acquire_ft_lock(struct dp_rx_fst *fisa_hdl, uint8_t reo_id)
+{
+	if (fisa_hdl->flow_deletion_supported)
+		qdf_spin_lock_bh(&fisa_hdl->dp_rx_sw_ft_lock[reo_id]);
+}
+
+/**
+ * dp_rx_fisa_acquire_ft_lock() - Release lock which protects SW FT entries
+ * @fisa_hdl: Handle to fisa context
+ * @reo_id: REO ID
+ *
+ * Return: None
+ */
+static inline void
+dp_rx_fisa_release_ft_lock(struct dp_rx_fst *fisa_hdl, uint8_t reo_id)
+{
+	if (fisa_hdl->flow_deletion_supported)
+		qdf_spin_unlock_bh(&fisa_hdl->dp_rx_sw_ft_lock[reo_id]);
+}
+
+/**
+ * dp_rx_fisa_setup_cmem_fse() - Setup the flow search entry in HW CMEM
+ * @fisa_hdl: Handle to fisa context
+ * @hashed_flow_idx: Index to flow table
+ * @rx_flow_info: tuple to be populated in flow table
+ * @flow_steer_info: REO index to which flow to be steered
+ *
+ * Return: Offset to the FSE entry in CMEM
+ */
+static uint32_t
+dp_rx_fisa_setup_cmem_fse(struct dp_rx_fst *fisa_hdl, uint32_t hashed_flow_idx,
+			  struct cdp_rx_flow_tuple_info *rx_flow_info,
+			  uint32_t flow_steer_info)
+{
+	struct dp_fisa_rx_sw_ft *sw_ft_entry;
+	struct hal_rx_flow flow;
+
+	sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
+				fisa_hdl->base)[hashed_flow_idx]);
+	sw_ft_entry->metadata = ++fisa_hdl->meta_counter;
+
+	/* REO destination index starts from 1 */
+	flow.reo_destination_indication = flow_steer_info + 1;
+	flow.fse_metadata = sw_ft_entry->metadata;
+	flow.tuple_info.dest_ip_127_96 = rx_flow_info->dest_ip_127_96;
+	flow.tuple_info.dest_ip_95_64 = rx_flow_info->dest_ip_95_64;
+	flow.tuple_info.dest_ip_63_32 =	rx_flow_info->dest_ip_63_32;
+	flow.tuple_info.dest_ip_31_0 = rx_flow_info->dest_ip_31_0;
+	flow.tuple_info.src_ip_127_96 =	rx_flow_info->src_ip_127_96;
+	flow.tuple_info.src_ip_95_64 = rx_flow_info->src_ip_95_64;
+	flow.tuple_info.src_ip_63_32 = rx_flow_info->src_ip_63_32;
+	flow.tuple_info.src_ip_31_0 = rx_flow_info->src_ip_31_0;
+	flow.tuple_info.dest_port = rx_flow_info->dest_port;
+	flow.tuple_info.src_port = rx_flow_info->src_port;
+	flow.tuple_info.l4_protocol = rx_flow_info->l4_protocol;
+	flow.reo_destination_handler = HAL_RX_FSE_REO_DEST_FT;
+
+	return hal_rx_flow_setup_cmem_fse(fisa_hdl->soc_hdl->hal_soc,
+					  fisa_hdl->cmem_ba, hashed_flow_idx,
+					  &flow);
+}
+
 /**
 /**
  * dp_rx_fisa_update_sw_ft_entry() - Helper function to update few SW FT entry
  * dp_rx_fisa_update_sw_ft_entry() - Helper function to update few SW FT entry
  * @sw_ft_entry: Pointer to softerware flow tabel entry
  * @sw_ft_entry: Pointer to softerware flow tabel entry
@@ -443,6 +514,377 @@ static bool is_flow_idx_valid(bool flow_invalid, bool flow_timeout)
 		return false;
 		return false;
 }
 }
 
 
+/**
+ * dp_fisa_rx_delete_flow() - Delete a flow from SW and HW FST, currently
+ * only applicable when FST is in CMEM
+ * @fisa_hdl: handle to FISA context
+ * @elem: details of the flow which is being added
+ * @hashed_flow_idx: hashed flow idx of the deleting flow
+ *
+ * Return: None
+ */
+static void
+dp_fisa_rx_delete_flow(struct dp_rx_fst *fisa_hdl,
+		       struct dp_fisa_rx_fst_update_elem *elem,
+		       uint32_t hashed_flow_idx)
+{
+	struct dp_fisa_rx_sw_ft *sw_ft_entry;
+	u8 reo_id;
+
+	sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
+				fisa_hdl->base)[hashed_flow_idx]);
+	reo_id = sw_ft_entry->napi_id;
+
+	dp_rx_fisa_acquire_ft_lock(fisa_hdl, reo_id);
+
+	/* Flush the flow before deletion */
+	dp_rx_fisa_flush_flow_wrap(sw_ft_entry);
+
+	memset(sw_ft_entry, 0, sizeof(*sw_ft_entry));
+
+	dp_rx_fisa_update_sw_ft_entry(sw_ft_entry, elem->flow_idx, elem->vdev,
+				      fisa_hdl->soc_hdl, hashed_flow_idx);
+
+	/* Add HW FT entry */
+	sw_ft_entry->cmem_offset = dp_rx_fisa_setup_cmem_fse(
+					fisa_hdl, hashed_flow_idx,
+					&elem->flow_tuple_info, elem->reo_id);
+
+	sw_ft_entry->is_populated = true;
+	sw_ft_entry->napi_id = elem->reo_id;
+	qdf_mem_copy(&sw_ft_entry->rx_flow_tuple_info, &elem->flow_tuple_info,
+		     sizeof(struct cdp_rx_flow_tuple_info));
+
+	sw_ft_entry->is_flow_tcp = elem->is_tcp_flow;
+	sw_ft_entry->is_flow_udp = elem->is_udp_flow;
+
+	dp_rx_fisa_release_ft_lock(fisa_hdl, reo_id);
+
+	fisa_hdl->add_flow_count++;
+	fisa_hdl->del_flow_count++;
+}
+
+/**
+ * dp_fisa_rx_get_hw_ft_timestamp() - Get timestamp maintained in the HW FSE
+ * @fisa_hdl: handle to FISA context
+ * @hashed_flow_idx: hashed idx of the flow
+ *
+ * Return: Timestamp
+ */
+static uint32_t
+dp_fisa_rx_get_hw_ft_timestamp(struct dp_rx_fst *fisa_hdl,
+			       uint32_t hashed_flow_idx)
+{
+	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
+	struct dp_fisa_rx_sw_ft *sw_ft_entry;
+
+	sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
+				fisa_hdl->base)[hashed_flow_idx]);
+
+	if (fisa_hdl->soc_hdl->fst_in_cmem)
+		return hal_rx_flow_get_cmem_fse_timestamp(
+				hal_soc_hdl, sw_ft_entry->cmem_offset);
+
+	return ((struct rx_flow_search_entry *)sw_ft_entry->hw_fse)->timestamp;
+}
+
+/**
+ * dp_fisa_rx_fst_update() - Core logic which helps in Addition/Deletion
+ * of flows
+ * into/from SW & HW FST
+ * @fisa_hdl: handle to FISA context
+ * @elem: details of the flow which is being added
+ *
+ * Return: None
+ */
+static void dp_fisa_rx_fst_update(struct dp_rx_fst *fisa_hdl,
+				  struct dp_fisa_rx_fst_update_elem *elem)
+{
+	struct cdp_rx_flow_tuple_info *rx_flow_tuple_info;
+	uint32_t skid_count = 0, max_skid_length;
+	struct dp_fisa_rx_sw_ft *sw_ft_entry;
+	bool is_fst_updated = false;
+	uint32_t hashed_flow_idx;
+	uint32_t flow_hash;
+	uint32_t lru_ft_entry_time = 0xffffffff;
+	uint32_t lru_ft_entry_idx = 0;
+	uint32_t timestamp;
+
+	/* Get the hash from TLV
+	 * FSE FT Toeplitz hash is same Common parser hash available in TLV
+	 * common parser toeplitz hash is same as FSE toeplitz hash as
+	 * toeplitz key is same.
+	 */
+	flow_hash = elem->flow_idx;
+	hashed_flow_idx = flow_hash & fisa_hdl->hash_mask;
+	max_skid_length = fisa_hdl->max_skid_length;
+	rx_flow_tuple_info = &elem->flow_tuple_info;
+
+	dp_fisa_debug("flow_hash 0x%x hashed_flow_idx 0x%x", flow_hash,
+		      hashed_flow_idx);
+	dp_fisa_debug("max_skid_length 0x%x", max_skid_length);
+
+	do {
+		sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
+					fisa_hdl->base)[hashed_flow_idx]);
+		if (!sw_ft_entry->is_populated) {
+			/* Add SW FT entry */
+			dp_rx_fisa_update_sw_ft_entry(sw_ft_entry,
+						      flow_hash, elem->vdev,
+						      fisa_hdl->soc_hdl,
+						      hashed_flow_idx);
+
+			/* Add HW FT entry */
+			sw_ft_entry->cmem_offset =
+				dp_rx_fisa_setup_cmem_fse(fisa_hdl,
+							  hashed_flow_idx,
+							  rx_flow_tuple_info,
+							  elem->reo_id);
+			sw_ft_entry->is_populated = true;
+			sw_ft_entry->napi_id = elem->reo_id;
+			qdf_mem_copy(&sw_ft_entry->rx_flow_tuple_info,
+				     rx_flow_tuple_info,
+				     sizeof(struct cdp_rx_flow_tuple_info));
+
+			sw_ft_entry->is_flow_tcp = elem->is_tcp_flow;
+			sw_ft_entry->is_flow_udp = elem->is_udp_flow;
+
+			is_fst_updated = true;
+			fisa_hdl->add_flow_count++;
+			break;
+		}
+		/* else */
+		/* hash collision move to the next FT entry */
+		dp_fisa_debug("Hash collision %d",
+			      fisa_hdl->hash_collision_cnt);
+		fisa_hdl->hash_collision_cnt++;
+
+		timestamp = dp_fisa_rx_get_hw_ft_timestamp(fisa_hdl,
+							   hashed_flow_idx);
+		if (timestamp < lru_ft_entry_time) {
+			lru_ft_entry_time = timestamp;
+			lru_ft_entry_idx = hashed_flow_idx;
+		}
+		skid_count++;
+		hashed_flow_idx++;
+		hashed_flow_idx &= fisa_hdl->hash_mask;
+	} while (skid_count <= max_skid_length);
+
+	/*
+	 * if (skid_count > max_skid_length)
+	 * Remove LRU flow from HW FT
+	 * Remove LRU flow from SW FT
+	 */
+	if (skid_count > max_skid_length) {
+		dp_fisa_debug("Max skid length reached flow cannot be added, evict exiting flow");
+		dp_fisa_rx_delete_flow(fisa_hdl, elem, lru_ft_entry_idx);
+		is_fst_updated = true;
+	}
+
+	/**
+	 * Send HTT cache invalidation command to firmware to
+	 * reflect the flow update
+	 */
+	if (is_fst_updated &&
+	    (qdf_atomic_inc_return(&fisa_hdl->fse_cache_flush_posted) == 1)) {
+		/* return 1 after increment implies FSE cache flush message
+		 * already posted. so start restart the timer
+		 */
+		qdf_timer_start(&fisa_hdl->fse_cache_flush_timer,
+				FSE_CACHE_FLUSH_TIME_OUT);
+	}
+}
+
+/**
+ * dp_fisa_rx_fst_update_work() - Work functions for FST updates
+ * @arg: argument passed to the work function
+ *
+ * Return: None
+ */
+void dp_fisa_rx_fst_update_work(void *arg)
+{
+	struct dp_fisa_rx_fst_update_elem *elem;
+	struct dp_rx_fst *fisa_hdl = arg;
+	qdf_list_node_t *node;
+	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
+
+	if (hif_force_wake_request(((struct hal_soc *)hal_soc_hdl)->hif_handle)) {
+		dp_err("Wake up request failed");
+		qdf_check_state_before_panic();
+		return;
+	}
+
+	qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
+	while (qdf_list_peek_front(&fisa_hdl->fst_update_list, &node) ==
+	       QDF_STATUS_SUCCESS) {
+		elem = (struct dp_fisa_rx_fst_update_elem *)node;
+		dp_fisa_rx_fst_update(fisa_hdl, elem);
+		qdf_list_remove_front(&fisa_hdl->fst_update_list, &node);
+		qdf_mem_free(elem);
+	}
+	qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
+
+	if (hif_force_wake_release(((struct hal_soc *)hal_soc_hdl)->hif_handle)) {
+		dp_err("Wake up release failed");
+		qdf_check_state_before_panic();
+		return;
+	}
+}
+
+/**
+ * dp_fisa_rx_is_fst_work_queued() - Check if work is already queued for
+ * the flow
+ * @fisa_hdl: handle to FISA context
+ * @flow_idx: Flow index
+ *
+ * Return: True/False
+ */
+static inline bool
+dp_fisa_rx_is_fst_work_queued(struct dp_rx_fst *fisa_hdl, uint32_t flow_idx)
+{
+	struct dp_fisa_rx_fst_update_elem *elem;
+	qdf_list_node_t *cur_node, *next_node;
+	QDF_STATUS status;
+
+	status = qdf_list_peek_front(&fisa_hdl->fst_update_list, &cur_node);
+	if (status == QDF_STATUS_E_EMPTY)
+		return false;
+
+	do {
+		elem = (struct dp_fisa_rx_fst_update_elem *)cur_node;
+		if (elem->flow_idx == flow_idx)
+			return true;
+
+		status = qdf_list_peek_next(&fisa_hdl->fst_update_list,
+					    cur_node, &next_node);
+		cur_node = next_node;
+	} while (status == QDF_STATUS_SUCCESS);
+
+	return false;
+}
+
+/**
+ * dp_fisa_rx_queue_fst_update_work() - Queue FST update work
+ * @fisa_hdl: Handle to FISA context
+ * @flow_idx: Flow index
+ * @nbuf: Received RX packet
+ * @vdev: DP vdev handle
+ *
+ * Return: None
+ */
+static void *
+dp_fisa_rx_queue_fst_update_work(struct dp_rx_fst *fisa_hdl, uint32_t flow_idx,
+				 qdf_nbuf_t nbuf, struct dp_vdev *vdev)
+{
+	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
+	struct cdp_rx_flow_tuple_info flow_tuple_info;
+	bool is_flow_tcp, is_flow_udp, is_flow_ipv6;
+	uint8_t *rx_tlv_hdr = qdf_nbuf_data(nbuf);
+	struct dp_fisa_rx_fst_update_elem *elem;
+	struct dp_fisa_rx_sw_ft *sw_ft_entry;
+	uint32_t hashed_flow_idx;
+	bool found;
+
+	is_flow_tcp = HAL_RX_TLV_GET_TCP_PROTO(rx_tlv_hdr);
+	is_flow_udp = HAL_RX_TLV_GET_UDP_PROTO(rx_tlv_hdr);
+	is_flow_ipv6 = HAL_RX_TLV_GET_IPV6(rx_tlv_hdr);
+
+	if (is_flow_ipv6 || !(is_flow_tcp || is_flow_udp)) {
+		dp_fisa_debug("Not UDP or TCP IPV4 flow");
+		return NULL;
+	}
+
+	qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
+	found = dp_fisa_rx_is_fst_work_queued(fisa_hdl, flow_idx);
+	qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
+	if (found)
+		return NULL;
+
+	hashed_flow_idx = flow_idx & fisa_hdl->hash_mask;
+	sw_ft_entry = &(((struct dp_fisa_rx_sw_ft *)
+				fisa_hdl->base)[hashed_flow_idx]);
+
+	get_flow_tuple_from_nbuf(hal_soc_hdl, &flow_tuple_info,
+				 nbuf, rx_tlv_hdr);
+
+	if (sw_ft_entry->is_populated && is_same_flow(
+			&sw_ft_entry->rx_flow_tuple_info, &flow_tuple_info))
+		return sw_ft_entry;
+
+	elem = qdf_mem_malloc(sizeof(*elem));
+	if (!elem) {
+		dp_fisa_debug("failed to allocate memory for FST update");
+		return NULL;
+	}
+
+	qdf_mem_copy(&elem->flow_tuple_info, &flow_tuple_info,
+		     sizeof(struct cdp_rx_flow_tuple_info));
+	elem->flow_idx = flow_idx;
+	elem->is_tcp_flow = is_flow_tcp;
+	elem->is_udp_flow = is_flow_udp;
+	elem->reo_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
+	elem->vdev = vdev;
+
+	qdf_spin_lock_bh(&fisa_hdl->dp_rx_fst_lock);
+	qdf_list_insert_back(&fisa_hdl->fst_update_list, &elem->node);
+	qdf_spin_unlock_bh(&fisa_hdl->dp_rx_fst_lock);
+
+	qdf_queue_work(fisa_hdl->soc_hdl->osdev, fisa_hdl->fst_update_wq,
+		       &fisa_hdl->fst_update_work);
+
+	return NULL;
+}
+
+/**
+ * dp_fisa_rx_get_sw_ft_entry() - Get SW FT entry for the flow
+ * @fisa_hdl: Handle to FISA context
+ * @nbuf: Received RX packet
+ * @flow_idx: Flow index
+ * @vdev: handle to DP vdev
+ *
+ * Return: SW FT entry
+ */
+static inline struct dp_fisa_rx_sw_ft *
+dp_fisa_rx_get_sw_ft_entry(struct dp_rx_fst *fisa_hdl, qdf_nbuf_t nbuf,
+			   uint32_t flow_idx, struct dp_vdev *vdev)
+{
+	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
+	struct dp_fisa_rx_sw_ft *sw_ft_entry = NULL;
+	struct dp_fisa_rx_sw_ft *sw_ft_base;
+	uint32_t fse_metadata;
+	uint8_t *rx_tlv_hdr;
+
+	sw_ft_base = (struct dp_fisa_rx_sw_ft *)fisa_hdl->base;
+	rx_tlv_hdr = qdf_nbuf_data(nbuf);
+
+	if (qdf_unlikely(flow_idx >= fisa_hdl->max_entries)) {
+		dp_info("flow_idx is invalid 0x%x", flow_idx);
+		hal_rx_dump_pkt_tlvs(hal_soc_hdl, rx_tlv_hdr,
+				     QDF_TRACE_LEVEL_INFO_HIGH);
+		DP_STATS_INC(fisa_hdl, invalid_flow_index, 1);
+		return NULL;
+	}
+
+	sw_ft_entry = &sw_ft_base[flow_idx];
+
+	if (!fisa_hdl->flow_deletion_supported) {
+		sw_ft_entry->vdev = vdev;
+		return sw_ft_entry;
+	}
+
+	/* When a flow is deleted, there could be some packets of that flow
+	 * with valid flow_idx in the REO queue and arrive at a later time,
+	 * compare the metadata for such packets before returning the SW FT
+	 * entry to avoid packets getting aggregated with the wrong flow.
+	 */
+	fse_metadata = hal_rx_msdu_fse_metadata_get(hal_soc_hdl, rx_tlv_hdr);
+	if (fisa_hdl->del_flow_count && fse_metadata != sw_ft_entry->metadata)
+		return NULL;
+
+	sw_ft_entry->vdev = vdev;
+	return sw_ft_entry;
+}
+
 /**
 /**
  * dp_rx_get_fisa_flow() - Get FT entry corresponding to incoming nbuf
  * dp_rx_get_fisa_flow() - Get FT entry corresponding to incoming nbuf
  * @fisa_hdl: handle to FISA context
  * @fisa_hdl: handle to FISA context
@@ -459,8 +901,6 @@ dp_rx_get_fisa_flow(struct dp_rx_fst *fisa_hdl, struct dp_vdev *vdev,
 	uint32_t flow_idx;
 	uint32_t flow_idx;
 	bool flow_invalid, flow_timeout, flow_idx_valid;
 	bool flow_invalid, flow_timeout, flow_idx_valid;
 	struct dp_fisa_rx_sw_ft *sw_ft_entry = NULL;
 	struct dp_fisa_rx_sw_ft *sw_ft_entry = NULL;
-	struct dp_fisa_rx_sw_ft *sw_ft_base = (struct dp_fisa_rx_sw_ft *)
-								fisa_hdl->base;
 	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
 	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
 
 
 	if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf))
 	if (QDF_NBUF_CB_RX_TCP_PROTO(nbuf))
@@ -475,21 +915,16 @@ dp_rx_get_fisa_flow(struct dp_rx_fst *fisa_hdl, struct dp_vdev *vdev,
 
 
 	flow_idx_valid = is_flow_idx_valid(flow_invalid, flow_timeout);
 	flow_idx_valid = is_flow_idx_valid(flow_invalid, flow_timeout);
 	if (flow_idx_valid) {
 	if (flow_idx_valid) {
-		/* If flow index is invalid, fail to get flow */
-		if (qdf_unlikely(flow_idx >= fisa_hdl->max_entries)) {
-			dp_info("flow_idx is invalid 0x%x", flow_idx);
-			hal_rx_dump_pkt_tlvs(hal_soc_hdl, rx_tlv_hdr,
-					     QDF_TRACE_LEVEL_INFO_HIGH);
-			DP_STATS_INC(fisa_hdl, invalid_flow_index, 1);
-			return NULL;
-		}
-		sw_ft_entry = &sw_ft_base[flow_idx];
-		sw_ft_entry->vdev = vdev;
-
+		sw_ft_entry = dp_fisa_rx_get_sw_ft_entry(fisa_hdl, nbuf,
+							 flow_idx, vdev);
 		return sw_ft_entry;
 		return sw_ft_entry;
 	}
 	}
 
 
 	/* else new flow, add entry to FT */
 	/* else new flow, add entry to FT */
+	if (fisa_hdl->soc_hdl->fst_in_cmem)
+		return dp_fisa_rx_queue_fst_update_work(fisa_hdl, flow_idx,
+							nbuf, vdev);
+
 	sw_ft_entry = dp_rx_fisa_add_ft_entry(fisa_hdl, flow_idx, nbuf, vdev,
 	sw_ft_entry = dp_rx_fisa_add_ft_entry(fisa_hdl, flow_idx, nbuf, vdev,
 					      rx_tlv_hdr);
 					      rx_tlv_hdr);
 
 
@@ -1006,6 +1441,8 @@ static int dp_add_nbuf_to_fisa_flow(struct dp_rx_fst *fisa_hdl,
 	hal_aggr_count = hal_rx_get_fisa_flow_agg_count(hal_soc_hdl,
 	hal_aggr_count = hal_rx_get_fisa_flow_agg_count(hal_soc_hdl,
 							rx_tlv_hdr);
 							rx_tlv_hdr);
 
 
+	dp_rx_fisa_acquire_ft_lock(fisa_hdl, fisa_flow->napi_id);
+
 	if (!flow_aggr_cont) {
 	if (!flow_aggr_cont) {
 		/* Start of new aggregation for the flow
 		/* Start of new aggregation for the flow
 		 * Flush previous aggregates for this flow
 		 * Flush previous aggregates for this flow
@@ -1113,10 +1550,12 @@ static int dp_add_nbuf_to_fisa_flow(struct dp_rx_fst *fisa_hdl,
 		dp_rx_fisa_aggr_tcp(fisa_hdl, fisa_flow, nbuf);
 		dp_rx_fisa_aggr_tcp(fisa_hdl, fisa_flow, nbuf);
 	}
 	}
 
 
+	dp_rx_fisa_release_ft_lock(fisa_hdl, fisa_flow->napi_id);
 	return FISA_AGGR_DONE;
 	return FISA_AGGR_DONE;
 
 
 invalid_fisa_assist:
 invalid_fisa_assist:
 	/* Not eligible aggregation deliver frame without FISA */
 	/* Not eligible aggregation deliver frame without FISA */
+	dp_rx_fisa_release_ft_lock(fisa_hdl, fisa_flow->napi_id);
 	return FISA_AGGR_NOT_ELIGIBLE;
 	return FISA_AGGR_NOT_ELIGIBLE;
 }
 }
 
 
@@ -1156,6 +1595,7 @@ QDF_STATUS dp_rx_fisa_flush_by_vdev_ctx_id(struct dp_soc *soc,
 	int ft_size = fisa_hdl->max_entries;
 	int ft_size = fisa_hdl->max_entries;
 	int i;
 	int i;
 
 
+	dp_rx_fisa_acquire_ft_lock(fisa_hdl, rx_ctx_id);
 	for (i = 0; i < ft_size; i++) {
 	for (i = 0; i < ft_size; i++) {
 		if (sw_ft_entry[i].is_populated &&
 		if (sw_ft_entry[i].is_populated &&
 		    vdev == sw_ft_entry[i].vdev &&
 		    vdev == sw_ft_entry[i].vdev &&
@@ -1165,6 +1605,7 @@ QDF_STATUS dp_rx_fisa_flush_by_vdev_ctx_id(struct dp_soc *soc,
 			dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
 			dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
 		}
 		}
 	}
 	}
+	dp_rx_fisa_release_ft_lock(fisa_hdl, rx_ctx_id);
 
 
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
@@ -1249,8 +1690,13 @@ QDF_STATUS dp_fisa_rx(struct dp_soc *soc, struct dp_vdev *vdev,
 		 */
 		 */
 		if (qdf_unlikely(qdf_nbuf_get_ext_list(head_nbuf))) {
 		if (qdf_unlikely(qdf_nbuf_get_ext_list(head_nbuf))) {
 			dp_fisa_debug("Fragmented skb, will not be FISAed");
 			dp_fisa_debug("Fragmented skb, will not be FISAed");
-			if (fisa_flow)
+			if (fisa_flow) {
+				dp_rx_fisa_acquire_ft_lock(dp_fisa_rx_hdl,
+							   fisa_flow->napi_id);
 				dp_rx_fisa_flush_flow(vdev, fisa_flow);
 				dp_rx_fisa_flush_flow(vdev, fisa_flow);
+				dp_rx_fisa_release_ft_lock(dp_fisa_rx_hdl,
+							   fisa_flow->napi_id);
+			}
 			goto pull_nbuf;
 			goto pull_nbuf;
 		}
 		}
 
 
@@ -1344,6 +1790,7 @@ QDF_STATUS dp_rx_fisa_flush_by_ctx_id(struct dp_soc *soc, int napi_id)
 	int ft_size = fisa_hdl->max_entries;
 	int ft_size = fisa_hdl->max_entries;
 	int i;
 	int i;
 
 
+	dp_rx_fisa_acquire_ft_lock(fisa_hdl, napi_id);
 	for (i = 0; i < ft_size; i++) {
 	for (i = 0; i < ft_size; i++) {
 		if (sw_ft_entry[i].napi_id == napi_id &&
 		if (sw_ft_entry[i].napi_id == napi_id &&
 		    sw_ft_entry[i].is_populated) {
 		    sw_ft_entry[i].is_populated) {
@@ -1352,6 +1799,7 @@ QDF_STATUS dp_rx_fisa_flush_by_ctx_id(struct dp_soc *soc, int napi_id)
 			dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
 			dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
 		}
 		}
 	}
 	}
+	dp_rx_fisa_release_ft_lock(fisa_hdl, napi_id);
 
 
 	return QDF_STATUS_SUCCESS;
 	return QDF_STATUS_SUCCESS;
 }
 }
@@ -1372,11 +1820,14 @@ QDF_STATUS dp_rx_fisa_flush_by_vdev_id(struct dp_soc *soc, uint8_t vdev_id)
 	}
 	}
 
 
 	for (i = 0; i < ft_size; i++) {
 	for (i = 0; i < ft_size; i++) {
+		dp_rx_fisa_acquire_ft_lock(fisa_hdl, sw_ft_entry[i].napi_id);
 		if (vdev == sw_ft_entry[i].vdev) {
 		if (vdev == sw_ft_entry[i].vdev) {
 			dp_fisa_debug("flushing %d %pk vdev %pK", i,
 			dp_fisa_debug("flushing %d %pk vdev %pK", i,
 				      &sw_ft_entry[i], vdev);
 				      &sw_ft_entry[i], vdev);
+
 			dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
 			dp_rx_fisa_flush_flow_wrap(&sw_ft_entry[i]);
 		}
 		}
+		dp_rx_fisa_release_ft_lock(fisa_hdl, sw_ft_entry[i].napi_id);
 	}
 	}
 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_RX);
 
 

+ 8 - 0
core/dp/txrx3.0/dp_fisa_rx.h

@@ -113,6 +113,14 @@ void dp_rx_skip_fisa(struct cdp_soc_t *cdp_soc, uint32_t value)
  */
  */
 void dp_set_fisa_disallowed_for_vdev(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
 void dp_set_fisa_disallowed_for_vdev(struct cdp_soc_t *cdp_soc, uint8_t vdev_id,
 				     uint8_t rx_ctx_id, uint8_t val);
 				     uint8_t rx_ctx_id, uint8_t val);
+
+/**
+ * dp_fisa_rx_fst_update_work() - Work functions for FST updates
+ * @arg: argument passed to the work function
+ *
+ * Return: None
+ */
+void dp_fisa_rx_fst_update_work(void *arg);
 #else
 #else
 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
 static QDF_STATUS dp_rx_dump_fisa_stats(struct dp_soc *soc)
 {
 {

+ 26 - 1
core/dp/txrx3.0/dp_rx_fst.c

@@ -23,6 +23,7 @@
 #include "hal_rx_flow.h"
 #include "hal_rx_flow.h"
 #include "dp_htt.h"
 #include "dp_htt.h"
 #include "dp_internal.h"
 #include "dp_internal.h"
+#include "hif.h"
 
 
 /* Timeout in milliseconds to wait for CMEM FST HTT response */
 /* Timeout in milliseconds to wait for CMEM FST HTT response */
 #define DP_RX_FST_CMEM_RESP_TIMEOUT 2000
 #define DP_RX_FST_CMEM_RESP_TIMEOUT 2000
@@ -32,7 +33,31 @@ void dp_fisa_rx_fst_update_work(void *arg);
 
 
 void dp_rx_dump_fisa_table(struct dp_soc *soc)
 void dp_rx_dump_fisa_table(struct dp_soc *soc)
 {
 {
-	hal_rx_dump_fse_table(soc->rx_fst->hal_rx_fst);
+	hal_soc_handle_t hal_soc_hdl = soc->hal_soc;
+	struct dp_rx_fst *fst = soc->rx_fst;
+	struct dp_fisa_rx_sw_ft *sw_ft_entry;
+	int i;
+
+	if (!soc->fst_in_cmem)
+		return hal_rx_dump_fse_table(soc->rx_fst->hal_rx_fst);
+
+	sw_ft_entry = (struct dp_fisa_rx_sw_ft *)fst->base;
+
+	if (hif_force_wake_request(((struct hal_soc *)hal_soc_hdl)->hif_handle)) {
+		dp_err("Wake up request failed");
+		qdf_check_state_before_panic();
+		return;
+	}
+
+	for (i = 0; i < fst->max_entries; i++)
+		hal_rx_dump_cmem_fse(hal_soc_hdl,
+				     sw_ft_entry[i].cmem_offset, i);
+
+	if (hif_force_wake_release(((struct hal_soc *)hal_soc_hdl)->hif_handle)) {
+		dp_err("Wake up release failed");
+		qdf_check_state_before_panic();
+		return;
+	}
 }
 }
 
 
 /**
 /**