Parcourir la source

qcacld-3.0: Reduce FSE cache invalidation messages

Current flow learning algorithm invalidates the cache for every time
flow is learnt. This results in multiple invalidation when there
multiple flows in quick succession of single NAPI soft IRQ processing.
FW has restriction on number simultaneous message processing.
Delay cache invalidation message so that single message serves multiple
flow learning.

Change-Id: Iee8cb5f12546890f2da32195dc3c4878e7a39ffd
CRs-Fixed: 2718968
Manjunathappa Prakash il y a 4 ans
Parent
commit
d269df54b3
3 fichiers modifiés avec 93 ajouts et 39 suppressions
  1. 8 39
      core/dp/txrx3.0/dp_fisa_rx.c
  2. 3 0
      core/dp/txrx3.0/dp_fisa_rx.h
  3. 82 0
      core/dp/txrx3.0/dp_rx_fst.c

+ 8 - 39
core/dp/txrx3.0/dp_fisa_rx.c

@@ -280,34 +280,6 @@ static bool is_same_flow(struct cdp_rx_flow_tuple_info *tuple1,
 		return true;
 }
 
-/**
- * dp_rx_flow_send_htt_operation_cmd() - Invalidate FSE cache on FT change
- * @pdev: handle to DP pdev
- * @fse_op: Cache operation code
- * @rx_flow_tuple: flow tuple whose entry has to be invalidated
- *
- * Return: Success if we successfully send FW HTT command
- */
-static QDF_STATUS
-dp_rx_flow_send_htt_operation_cmd(struct dp_pdev *pdev,
-				  enum dp_htt_flow_fst_operation fse_op,
-				  struct cdp_rx_flow_tuple_info *rx_flow_tuple)
-{
-	struct dp_htt_rx_flow_fst_operation fse_op_cmd;
-	struct cdp_rx_flow_info rx_flow_info;
-
-	rx_flow_info.is_addr_ipv4 = true;
-	rx_flow_info.op_code = CDP_FLOW_FST_ENTRY_ADD;
-	qdf_mem_copy(&rx_flow_info.flow_tuple_info, rx_flow_tuple,
-		     sizeof(struct cdp_rx_flow_tuple_info));
-	rx_flow_info.fse_metadata = 0xDADA;
-	fse_op_cmd.pdev_id = pdev->pdev_id;
-	fse_op_cmd.op_code = fse_op;
-	fse_op_cmd.rx_flow = &rx_flow_info;
-
-	return dp_htt_rx_flow_fse_operation(pdev, &fse_op_cmd);
-}
-
 /**
  * dp_rx_fisa_add_ft_entry() - Add new flow to HW and SW FT if it is not added
  * @fisa_hdl: handle to FISA context
@@ -329,7 +301,6 @@ dp_rx_fisa_add_ft_entry(struct dp_rx_fst *fisa_hdl,
 	uint32_t hashed_flow_idx;
 	uint32_t skid_count = 0, max_skid_length;
 	struct cdp_rx_flow_tuple_info rx_flow_tuple_info;
-	QDF_STATUS status;
 	bool is_fst_updated = false;
 	bool is_flow_tcp, is_flow_udp, is_flow_ipv6;
 	hal_soc_handle_t hal_soc_hdl = fisa_hdl->soc_hdl->hal_soc;
@@ -442,16 +413,14 @@ dp_rx_fisa_add_ft_entry(struct dp_rx_fst *fisa_hdl,
 	 * Send HTT cache invalidation command to firmware to
 	 * reflect the flow update
 	 */
-	if (is_fst_updated) {
-		status = dp_rx_flow_send_htt_operation_cmd(vdev->pdev,
-					DP_HTT_FST_CACHE_INVALIDATE_FULL,
-					&rx_flow_tuple_info);
-		if (QDF_STATUS_SUCCESS != status) {
-			dp_err("Failed to send the cache invalidation\n");
-			/* TBD: remove flow from SW and HW flow table
-			 * Not big impact cache entry gets updated later
-			 */
-		}
+	if (is_fst_updated &&
+	    (qdf_atomic_inc_return(&fisa_hdl->fse_cache_flush_posted) == 1)) {
+		/* return 1 after increment implies FSE cache flush message
+		 * already posted. so start restart the timer
+		 */
+		qdf_timer_start(&fisa_hdl->fse_cache_flush_timer,
+				FSE_CACHE_FLUSH_TIME_OUT);
+
 	}
 	dp_fisa_debug("sw_ft_entry %pK", sw_ft_entry);
 	return sw_ft_entry;

+ 3 - 0
core/dp/txrx3.0/dp_fisa_rx.h

@@ -26,6 +26,9 @@
 #endif
 
 #if defined(WLAN_SUPPORT_RX_FISA)
+
+#define FSE_CACHE_FLUSH_TIME_OUT	5 /* milliSeconds */
+
 /**
  * dp_rx_dump_fisa_stats() - Dump fisa stats
  * @soc: core txrx main context

+ 82 - 0
core/dp/txrx3.0/dp_rx_fst.c

@@ -31,6 +31,71 @@ void dp_rx_dump_fisa_table(struct dp_soc *soc)
 	hal_rx_dump_fse_table(soc->rx_fst->hal_rx_fst);
 }
 
+/**
+ * dp_rx_flow_send_htt_operation_cmd() - Invalidate FSE cache on FT change
+ * @pdev: handle to DP pdev
+ * @fse_op: Cache operation code
+ * @rx_flow_tuple: flow tuple whose entry has to be invalidated
+ *
+ * Return: Success if we successfully send FW HTT command
+ */
+static QDF_STATUS
+dp_rx_flow_send_htt_operation_cmd(struct dp_pdev *pdev,
+				  enum dp_htt_flow_fst_operation fse_op,
+				  struct cdp_rx_flow_tuple_info *rx_flow_tuple)
+{
+	struct dp_htt_rx_flow_fst_operation fse_op_cmd;
+	struct cdp_rx_flow_info rx_flow_info;
+
+	rx_flow_info.is_addr_ipv4 = true;
+	rx_flow_info.op_code = CDP_FLOW_FST_ENTRY_ADD;
+	qdf_mem_copy(&rx_flow_info.flow_tuple_info, rx_flow_tuple,
+		     sizeof(struct cdp_rx_flow_tuple_info));
+	rx_flow_info.fse_metadata = 0xDADA;
+	fse_op_cmd.pdev_id = pdev->pdev_id;
+	fse_op_cmd.op_code = fse_op;
+	fse_op_cmd.rx_flow = &rx_flow_info;
+
+	return dp_htt_rx_flow_fse_operation(pdev, &fse_op_cmd);
+}
+
+/**
+ * dp_fisa_fse_cache_flush_timer() - FSE cache flush timeout handler
+ * @arg: SoC handle
+ *
+ * Return: None
+ */
+static void dp_fisa_fse_cache_flush_timer(void *arg)
+{
+	struct dp_soc *soc = (struct dp_soc *)arg;
+	struct dp_rx_fst *fisa_hdl = soc->rx_fst;
+	struct cdp_rx_flow_tuple_info rx_flow_tuple_info = { 0 };
+	static uint32_t fse_cache_flush_rec_idx;
+	struct fse_cache_flush_history *fse_cache_flush_rec;
+	QDF_STATUS status;
+
+	fse_cache_flush_rec = &fisa_hdl->cache_fl_rec[fse_cache_flush_rec_idx %
+							MAX_FSE_CACHE_FL_HST];
+	fse_cache_flush_rec->timestamp = qdf_get_log_timestamp();
+	fse_cache_flush_rec->flows_added =
+			qdf_atomic_read(&fisa_hdl->fse_cache_flush_posted);
+	fse_cache_flush_rec_idx++;
+	dp_info("FSE cache flush for %d flows",
+		fse_cache_flush_rec->flows_added);
+
+	qdf_atomic_set(&fisa_hdl->fse_cache_flush_posted, 0);
+	status =
+	 dp_rx_flow_send_htt_operation_cmd(soc->pdev_list[0],
+					   DP_HTT_FST_CACHE_INVALIDATE_FULL,
+					   &rx_flow_tuple_info);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		dp_err("Failed to send the cache invalidation\n");
+		/*
+		 * Not big impact cache entry gets updated later
+		 */
+	}
+}
+
 /**
  * dp_rx_fst_attach() - Initialize Rx FST and setup necessary parameters
  * @soc: SoC handle
@@ -43,6 +108,7 @@ QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
 	struct dp_rx_fst *fst;
 	uint8_t *hash_key;
 	struct wlan_cfg_dp_soc_ctxt *cfg = soc->wlan_cfg_ctx;
+	QDF_STATUS status;
 
 	/* Check if it is enabled in the INI */
 	if (!wlan_cfg_is_rx_fisa_enabled(cfg)) {
@@ -107,6 +173,17 @@ QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
 
 	qdf_spinlock_create(&fst->dp_rx_fst_lock);
 
+	status = qdf_timer_init(soc->osdev, &fst->fse_cache_flush_timer,
+				dp_fisa_fse_cache_flush_timer, (void *)soc,
+				QDF_TIMER_TYPE_WAKE_APPS);
+	if (QDF_IS_STATUS_ERROR(status)) {
+		QDF_TRACE(QDF_MODULE_ID_ANY, QDF_TRACE_LEVEL_ERROR,
+			  "Failed to init cache_flush_timer\n");
+		goto timer_init_fail;
+	}
+
+	qdf_atomic_init(&fst->fse_cache_flush_posted);
+
 	fst->soc_hdl = soc;
 	soc->rx_fst = fst;
 	soc->fisa_enable = true;
@@ -117,6 +194,10 @@ QDF_STATUS dp_rx_fst_attach(struct dp_soc *soc, struct dp_pdev *pdev)
 		  fst->max_entries);
 
 	return QDF_STATUS_SUCCESS;
+
+timer_init_fail:
+	qdf_spinlock_destroy(&fst->dp_rx_fst_lock);
+	hal_rx_fst_detach(fst->hal_rx_fst, soc->osdev);
 out1:
 	qdf_mem_free(fst->base);
 out2:
@@ -168,6 +249,7 @@ void dp_rx_fst_detach(struct dp_soc *soc, struct dp_pdev *pdev)
 
 	dp_fst = soc->rx_fst;
 	if (qdf_likely(dp_fst)) {
+		qdf_timer_sync_cancel(&dp_fst->fse_cache_flush_timer);
 		hal_rx_fst_detach(dp_fst->hal_rx_fst, soc->osdev);
 		qdf_mem_free(dp_fst->base);
 		qdf_spinlock_destroy(&dp_fst->dp_rx_fst_lock);