Parcourir la source

qcacmn: Enable msi support for ext_group ring groupings

assign msi vectors to srng rings based on the ext_group they will be
serviced in.

provide support for ext_groups in hif_pci.

Change-Id: If313fdb43b939871c0d73dea9a05f757427b5b16
CRs-Fixed: 2051911
Houston Hoffman il y a 8 ans
Parent
commit
648a918927

+ 263 - 44
dp/wifi3.0/dp_main.c

@@ -153,7 +153,156 @@ const int dp_stats_mapping_table[][STATS_TYPE_MAX] = {
 	{TXRX_FW_STATS_INVALID, TXRX_RX_HOST_STATS},
 };
 
-/*
+/**
+ * dp_srng_find_ring_in_mask() - find which ext_group a ring belongs
+ * @ring_num: ring num of the ring being queried
+ * @grp_mask: the grp_mask array for the ring type in question.
+ *
+ * The grp_mask array is indexed by group number and the bit fields correspond
+ * to ring numbers.  We are finding which interrupt group a ring belongs to.
+ *
+ * Return: the index in the grp_mask array with the ring number.
+ * -QDF_STATUS_E_NOENT if no entry is found
+ */
+static int dp_srng_find_ring_in_mask(int ring_num, int *grp_mask)
+{
+	int ext_group_num;
+	int mask = 1 << ring_num;
+
+	for (ext_group_num = 0; ext_group_num < WLAN_CFG_INT_NUM_CONTEXTS;
+	     ext_group_num++) {
+		if (mask & grp_mask[ext_group_num])
+			return ext_group_num;
+	}
+
+	return -QDF_STATUS_E_NOENT;
+}
+
+static int dp_srng_calculate_msi_group(struct dp_soc *soc,
+				       enum hal_ring_type ring_type,
+				       int ring_num)
+{
+	int *grp_mask;
+
+	switch (ring_type) {
+	case WBM2SW_RELEASE:
+		/* dp_tx_comp_handler - soc->tx_comp_ring */
+		if (ring_num < 3)
+			grp_mask = &soc->wlan_cfg_ctx->int_tx_ring_mask[0];
+
+		/* dp_rx_wbm_err_process - soc->rx_rel_ring */
+		else if (ring_num == 3) {
+			/* sw treats this as a separate ring type */
+			grp_mask = &soc->wlan_cfg_ctx->
+				int_rx_wbm_rel_ring_mask[0];
+			ring_num = 0;
+		} else {
+			qdf_assert(0);
+			return -QDF_STATUS_E_NOENT;
+		}
+	break;
+
+	case REO_EXCEPTION:
+		/* dp_rx_err_process - &soc->reo_exception_ring */
+		grp_mask = &soc->wlan_cfg_ctx->int_rx_err_ring_mask[0];
+	break;
+
+	case REO_DST:
+		/* dp_rx_process - soc->reo_dest_ring */
+		grp_mask = &soc->wlan_cfg_ctx->int_rx_ring_mask[0];
+	break;
+
+	case REO_STATUS:
+		/* dp_reo_status_ring_handler - soc->reo_status_ring */
+		grp_mask = &soc->wlan_cfg_ctx->int_reo_status_ring_mask[0];
+	break;
+
+	/* dp_rx_mon_status_srng_process - pdev->rxdma_mon_status_ring*/
+	case RXDMA_MONITOR_STATUS:
+	/* dp_rx_mon_dest_process - pdev->rxdma_mon_dst_ring */
+	case RXDMA_MONITOR_DST:
+		/* dp_mon_process */
+		grp_mask = &soc->wlan_cfg_ctx->int_rx_mon_ring_mask[0];
+	break;
+
+	case RXDMA_MONITOR_BUF:
+	case RXDMA_BUF:
+		/* TODO: support low_thresh interrupt */
+		return -QDF_STATUS_E_NOENT;
+	break;
+
+	case TCL_DATA:
+	case TCL_CMD:
+	case REO_CMD:
+	case SW2WBM_RELEASE:
+	case WBM_IDLE_LINK:
+		/* normally empty SW_TO_HW rings */
+		return -QDF_STATUS_E_NOENT;
+	break;
+
+	case TCL_STATUS:
+	case REO_REINJECT:
+	case RXDMA_DST:
+		/* misc unused rings */
+		return -QDF_STATUS_E_NOENT;
+	break;
+
+	case CE_SRC:
+	case CE_DST:
+	case CE_DST_STATUS:
+		/* CE_rings - currently handled by hif */
+	default:
+		return -QDF_STATUS_E_NOENT;
+	break;
+	}
+
+	return dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void dp_srng_msi_setup(struct dp_soc *soc, struct hal_srng_params
+			      *ring_params, int ring_type, int ring_num)
+{
+	int msi_group_number;
+	int msi_data_count;
+	int ret;
+	uint32_t msi_data_start, msi_irq_start, addr_low, addr_high;
+
+	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
+					    &msi_data_count, &msi_data_start,
+					    &msi_irq_start);
+
+	if (ret)
+		return;
+
+	msi_group_number = dp_srng_calculate_msi_group(soc, ring_type,
+						       ring_num);
+	if (msi_group_number < 0) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+			FL("ring not part of an ext_group; ring_type: %d,ring_num %d"),
+			ring_type, ring_num);
+		ring_params->msi_addr = 0;
+		ring_params->msi_data = 0;
+		return;
+	}
+
+	if (msi_group_number > msi_data_count) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
+			FL("2 msi_groups will share an msi; msi_group_num %d"),
+			msi_group_number);
+
+		QDF_ASSERT(0);
+	}
+
+	pld_get_msi_address(soc->osdev->dev, &addr_low, &addr_high);
+
+	ring_params->msi_addr = addr_low;
+	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
+	ring_params->msi_data = (msi_group_number % msi_data_count)
+		+ msi_data_start;
+	ring_params->flags |= HAL_SRNG_MSI_INTR;
+}
+
+/**
  * dp_setup_srng - Internal function to setup SRNG rings used by data path
  */
 static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
@@ -166,6 +315,11 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
 	struct hal_srng_params ring_params;
 	uint32_t max_entries = hal_srng_max_entries(hal_soc, ring_type);
 
+	/* TODO: Currently hal layer takes care of endianness related settings.
+	 * See if these settings need to passed from DP layer
+	 */
+	ring_params.flags = 0;
+
 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
 	srng->hal_srng = NULL;
 	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
@@ -187,9 +341,7 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
 		(unsigned long)srng->base_vaddr_unaligned);
 	ring_params.num_entries = num_entries;
 
-	/* TODO: Check MSI support and get MSI settings from HIF layer */
-	ring_params.msi_data = 0;
-	ring_params.msi_addr = 0;
+	dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
 
 	/*
 	 * Setup interrupt timer and batch counter thresholds for
@@ -212,11 +364,6 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
 			wlan_cfg_get_int_timer_threshold_other(soc->wlan_cfg_ctx);
 	}
 
-	/* TODO: Currently hal layer takes care of endianness related settings.
-	 * See if these settings need to passed from DP layer
-	 */
-	ring_params.flags = 0;
-
 	/* Enable low threshold interrupts for rx buffer rings (regular and
 	 * monitor buffer rings.
 	 * TODO: See if this is required for any other ring
@@ -227,6 +374,7 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
 		 */
 		ring_params.low_threshold = num_entries >> 3;
 		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
+		ring_params.intr_timer_thres_us = 0x1000;
 	}
 
 	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
@@ -415,6 +563,7 @@ static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
 	int i;
 
 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
+		soc->intr_ctx[i].dp_intr_id = i;
 		soc->intr_ctx[i].tx_ring_mask = 0xF;
 		soc->intr_ctx[i].rx_ring_mask = 0xF;
 		soc->intr_ctx[i].rx_mon_ring_mask = 0x1;
@@ -459,6 +608,104 @@ static void dp_soc_interrupt_detach(void *txrx_soc)
 	}
 }
 #else
+
+static void dp_soc_interrupt_map_calculate_integrated(struct dp_soc *soc,
+		int intr_ctx_num, int *irq_id_map, int *num_irq_r)
+{
+	int j;
+	int num_irq = 0;
+
+	int tx_mask =
+		wlan_cfg_get_tx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_mask =
+		wlan_cfg_get_rx_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_mon_mask =
+		wlan_cfg_get_rx_mon_ring_mask(soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+
+	for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
+
+		if (tx_mask & (1 << j)) {
+			irq_id_map[num_irq++] =
+				(wbm2host_tx_completions_ring1 - j);
+		}
+
+		if (rx_mask & (1 << j)) {
+			irq_id_map[num_irq++] =
+				(reo2host_destination_ring1 - j);
+		}
+
+		if (rx_mon_mask & (1 << j)) {
+			irq_id_map[num_irq++] =
+				(ppdu_end_interrupts_mac1 - j);
+		}
+		if (rx_wbm_rel_ring_mask & (1 << j))
+			irq_id_map[num_irq++] = wbm2host_rx_release;
+
+		if (rx_err_ring_mask & (1 << j))
+			irq_id_map[num_irq++] = reo2host_exception;
+
+		if (reo_status_ring_mask & (1 << j))
+			irq_id_map[num_irq++] = reo2host_status;
+
+	}
+	*num_irq_r = num_irq;
+}
+
+static void dp_soc_interrupt_map_calculate_msi(struct dp_soc *soc,
+		int intr_ctx_num, int *irq_id_map, int *num_irq_r,
+		int msi_vector_count, int msi_vector_start)
+{
+	int tx_mask = wlan_cfg_get_tx_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_mask = wlan_cfg_get_rx_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_mon_mask = wlan_cfg_get_rx_mon_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_err_ring_mask = wlan_cfg_get_rx_err_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int rx_wbm_rel_ring_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+	int reo_status_ring_mask = wlan_cfg_get_reo_status_ring_mask(
+					soc->wlan_cfg_ctx, intr_ctx_num);
+
+	unsigned int vector =
+		(intr_ctx_num % msi_vector_count) + msi_vector_start;
+	int num_irq = 0;
+
+	if (tx_mask | rx_mask | rx_mon_mask | rx_err_ring_mask |
+	    rx_wbm_rel_ring_mask | reo_status_ring_mask)
+		irq_id_map[num_irq++] =
+			pld_get_msi_irq(soc->osdev->dev, vector);
+
+	*num_irq_r = num_irq;
+}
+
+static void dp_soc_interrupt_map_calculate(struct dp_soc *soc, int intr_ctx_num,
+				    int *irq_id_map, int *num_irq)
+{
+	int msi_vector_count, ret;
+	uint32_t msi_base_data, msi_vector_start;
+
+	ret = pld_get_user_msi_assignment(soc->osdev->dev, "DP",
+					    &msi_vector_count,
+					    &msi_base_data,
+					    &msi_vector_start);
+	if (ret)
+		return dp_soc_interrupt_map_calculate_integrated(soc,
+				intr_ctx_num, irq_id_map, num_irq);
+
+	else
+		dp_soc_interrupt_map_calculate_msi(soc,
+				intr_ctx_num, irq_id_map, num_irq,
+				msi_vector_count, msi_vector_start);
+}
+
 /*
  * dp_soc_interrupt_attach() - Register handlers for DP interrupts
  * @txrx_soc: DP SOC handle
@@ -478,7 +725,6 @@ static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
 
 
 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
-		int j = 0;
 		int ret = 0;
 
 		/* Map of IRQ ids registered with one interrupt context */
@@ -499,6 +745,7 @@ static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
 		int rxdma2host_ring_mask =
 			wlan_cfg_get_rxdma2host_ring_mask(soc->wlan_cfg_ctx, i);
 
+		soc->intr_ctx[i].dp_intr_id = i;
 		soc->intr_ctx[i].tx_ring_mask = tx_mask;
 		soc->intr_ctx[i].rx_ring_mask = rx_mask;
 		soc->intr_ctx[i].rx_mon_ring_mask = rx_mon_mask;
@@ -511,40 +758,8 @@ static QDF_STATUS dp_soc_interrupt_attach(void *txrx_soc)
 
 		num_irq = 0;
 
-		for (j = 0; j < HIF_MAX_GRP_IRQ; j++) {
-
-			if (tx_mask & (1 << j)) {
-				irq_id_map[num_irq++] =
-					(wbm2host_tx_completions_ring1 - j);
-			}
-
-			if (rx_mask & (1 << j)) {
-				irq_id_map[num_irq++] =
-					(reo2host_destination_ring1 - j);
-			}
-
-			if (rxdma2host_ring_mask & (1 << j)) {
-				irq_id_map[num_irq++] =
-					rxdma2host_destination_ring_mac1 -
-					wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
-			}
-
-			if (rx_mon_mask & (1 << j)) {
-				irq_id_map[num_irq++] =
-					ppdu_end_interrupts_mac1 -
-					wlan_cfg_get_hw_mac_idx(soc->wlan_cfg_ctx, j);
-			}
-
-			if (rx_wbm_rel_ring_mask & (1 << j))
-				irq_id_map[num_irq++] = wbm2host_rx_release;
-
-			if (rx_err_ring_mask & (1 << j))
-				irq_id_map[num_irq++] = reo2host_exception;
-
-			if (reo_status_ring_mask & (1 << j))
-				irq_id_map[num_irq++] = reo2host_status;
-
-		}
+		dp_soc_interrupt_map_calculate(soc, i, &irq_id_map[0],
+					       &num_irq);
 
 		ret = hif_register_ext_group(soc->hif_handle,
 				num_irq, irq_id_map, dp_service_srngs,
@@ -580,6 +795,10 @@ static void dp_soc_interrupt_detach(void *txrx_soc)
 		soc->intr_ctx[i].tx_ring_mask = 0;
 		soc->intr_ctx[i].rx_ring_mask = 0;
 		soc->intr_ctx[i].rx_mon_ring_mask = 0;
+		soc->intr_ctx[i].rx_err_ring_mask = 0;
+		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
+		soc->intr_ctx[i].reo_status_ring_mask = 0;
+
 		qdf_lro_deinit(soc->intr_ctx[i].lro_ctx);
 	}
 }

+ 5 - 12
dp/wifi3.0/dp_rx_mon_dest.c

@@ -730,27 +730,21 @@ mon_deliver_fail:
 void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
 {
 	struct dp_pdev *pdev = soc->pdev_list[mac_id];
-	uint8_t pdev_id = pdev->pdev_id;
+	uint8_t pdev_id;
 	void *hal_soc;
 	void *rxdma_dst_ring_desc;
-	void *mon_dst_srng = pdev->rxdma_mon_dst_ring.hal_srng;
+	void *mon_dst_srng;
 	union dp_rx_desc_list_elem_t *head = NULL;
 	union dp_rx_desc_list_elem_t *tail = NULL;
 	uint32_t ppdu_id;
 	uint32_t rx_bufs_used;
 
-#ifdef DP_INTR_POLL_BASED
-	if (!pdev)
-		return;
-#endif
-
 	pdev_id = pdev->pdev_id;
 	mon_dst_srng = pdev->rxdma_mon_dst_ring.hal_srng;
 
-	if (!mon_dst_srng) {
+	if (!mon_dst_srng || hal_srng_initialized(mon_dst_srng)) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"%s %d : HAL Monitor Destination Ring Init \
-			Failed -- %p\n",
+			"%s %d : HAL Monitor Destination Ring Init Failed -- %p\n",
 			__func__, __LINE__, mon_dst_srng);
 		return;
 	}
@@ -761,8 +755,7 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
 
 	if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"%s %d : HAL Monitor Destination Ring Init \
-			Failed -- %p\n",
+			"%s %d : HAL Monitor Destination Ring access Failed -- %p\n",
 			__func__, __LINE__, mon_dst_srng);
 		return;
 	}

+ 7 - 10
dp/wifi3.0/dp_rx_mon_status.c

@@ -47,11 +47,6 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
 	uint8_t *rx_tlv_start;
 	uint32_t tlv_status = HAL_TLV_STATUS_DUMMY;
 
-#ifdef DP_INTR_POLL_BASED
-	if (!pdev)
-		return;
-#endif
-
 	ppdu_info = &pdev->ppdu_info;
 
 	if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
@@ -116,14 +111,16 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
 	QDF_STATUS status;
 	uint32_t work_done = 0;
 
-#ifdef DP_INTR_POLL_BASED
-	if (!pdev)
-		return work_done;
-#endif
-
 	mon_status_srng = pdev->rxdma_mon_status_ring.hal_srng;
 
 	qdf_assert(mon_status_srng);
+	if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
+
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s %d : HAL Monitor Destination Ring Init Failed -- %p\n",
+			__func__, __LINE__, mon_status_srng);
+		return work_done;
+	}
 
 	hal_soc = soc->hal_soc;
 

+ 1 - 0
dp/wifi3.0/dp_types.h

@@ -424,6 +424,7 @@ struct dp_intr {
 	struct dp_soc *soc;    /* Reference to SoC structure ,
 				to get DMA ring handles */
 	qdf_lro_ctx_t lro_ctx;
+	uint8_t dp_intr_id;
 };
 
 #define REO_DESC_FREELIST_SIZE 64

+ 16 - 0
hal/wifi3.0/hal_api.h

@@ -194,6 +194,15 @@ extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
  */
 uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
 
+/**
+ * hal_srng_get_dir - Returns the direction of the ring
+ * @hal_soc: Opaque HAL SOC handle
+ * @ring_type: one of the types from hal_ring_type
+ *
+ * Return: Ring direction
+ */
+enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
+
 /* HAL memory information */
 struct hal_mem_info {
 	/* dev base virutal addr */
@@ -306,6 +315,13 @@ extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  */
 extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
 
+static inline bool hal_srng_initialized(void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+
+	return !!srng->initialized;
+}
+
 /**
  * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
  * hal_srng_access_start if locked access is required

+ 10 - 5
hal/wifi3.0/hal_srng.c

@@ -840,9 +840,6 @@ static inline void hal_srng_src_hw_init(struct hal_soc *hal,
 		SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
 	}
 
-	HIF_INFO("%s: hw_init srng (msi_end) %d", __func__, srng->ring_id);
-
-
 	SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
 	reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
 		((uint64_t)(srng->ring_base_paddr) >> 32)) |
@@ -983,8 +980,6 @@ static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
 		SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
 	}
 
-	HIF_INFO("%s: hw_init srng msi end %d", __func__, srng->ring_id);
-
 	SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
 	reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
 		((uint64_t)(srng->ring_base_paddr) >> 32)) |
@@ -1227,6 +1222,8 @@ void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
 
 	SRNG_LOCK_INIT(&srng->lock);
 
+	srng->initialized = true;
+
 	return (void *)srng;
 }
 
@@ -1268,6 +1265,14 @@ uint32_t hal_srng_max_entries(void *hal_soc, int ring_type)
 	return SRNG_MAX_SIZE_DWORDS / ring_config->entry_size;
 }
 
+enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type)
+{
+	struct hal_hw_srng_config *ring_config =
+		HAL_SRNG_CONFIG(hal, ring_type);
+
+	return ring_config->ring_dir;
+}
+
 /**
  * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
  *

+ 34 - 35
hif/src/ce/ce_service_srng.c

@@ -585,9 +585,6 @@ static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
 	if (ret)
 		return;
 
-	HIF_INFO("%s: ce_id %d, msi_start: %d, msi_count %d", __func__, ce_id,
-		  msi_data_start, msi_data_count);
-
 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
 
 	ring_params->msi_addr = addr_low;
@@ -595,19 +592,18 @@ static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
 	ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
 	ring_params->flags |= HAL_SRNG_MSI_INTR;
 
-	HIF_INFO("%s: ce_id %d, msi_addr %p, msi_data %d", __func__, ce_id,
+	HIF_DBG("%s: ce_id %d, msi_addr %p, msi_data %d", __func__, ce_id,
 		  (void *)ring_params->msi_addr, ring_params->msi_data);
 }
 
 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
-			struct CE_ring_state *src_ring)
+			struct CE_ring_state *src_ring,
+			struct CE_attr *attr)
 {
 	struct hal_srng_params ring_params = {0};
 
 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
 
-	ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
-
 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
 	ring_params.num_entries = src_ring->nentries;
@@ -617,14 +613,13 @@ static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
 	 * A valid default value caused continuous interrupts to
 	 * fire with MSI enabled. Need to revisit usage of the timer
 	 */
-	ring_params.intr_timer_thres_us = 0;
-	ring_params.intr_batch_cntr_thres_entries = 1;
 
-	/* TODO
-	 * ring_params.msi_addr = XXX;
-	 * ring_params.msi_data = XXX;
-	 * ring_params.flags = XXX;
-	 */
+	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
+		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
+
+		ring_params.intr_timer_thres_us = 0;
+		ring_params.intr_batch_cntr_thres_entries = 1;
+	}
 
 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
 			&ring_params);
@@ -635,25 +630,31 @@ static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
 				struct CE_attr *attr)
 {
 	struct hal_srng_params ring_params = {0};
+	bool status_ring_timer_thresh_work_arround = true;
 
 	HIF_INFO("%s: ce_id %d", __func__, ce_id);
 
-	ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
-
 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
 	ring_params.num_entries = dest_ring->nentries;
-	ring_params.low_threshold = dest_ring->nentries - 1;
-	ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
-	ring_params.intr_timer_thres_us = 1024;
-	ring_params.intr_batch_cntr_thres_entries = 0;
 	ring_params.max_buffer_length = attr->src_sz_max;
 
-	/* TODO
-	 * ring_params.msi_addr = XXX;
-	 * ring_params.msi_data = XXX;
-	 * ring_params.flags = XXX;
-	 */
+	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
+		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
+		if (status_ring_timer_thresh_work_arround) {
+			/* hw bug work arround*/
+			ring_params.low_threshold = dest_ring->nentries - 1;
+			ring_params.intr_timer_thres_us = 1024;
+			ring_params.intr_batch_cntr_thres_entries = 0;
+			ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
+		} else {
+			/* normal behavior for future chips */
+			ring_params.low_threshold = dest_ring->nentries >> 3;
+			ring_params.intr_timer_thres_us = 100000;
+			ring_params.intr_batch_cntr_thres_entries = 0;
+			ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
+		}
+	}
 
 	/*Dest ring is also source ring*/
 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
@@ -661,7 +662,8 @@ static void ce_srng_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
 }
 
 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
-				struct CE_ring_state *status_ring)
+				struct CE_ring_state *status_ring,
+				struct CE_attr *attr)
 {
 	struct hal_srng_params ring_params = {0};
 
@@ -672,14 +674,11 @@ static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
 	ring_params.num_entries = status_ring->nentries;
-	ring_params.intr_timer_thres_us = 0;
-	ring_params.intr_batch_cntr_thres_entries = 1;
 
-	/* TODO
-	 * ring_params.msi_addr = XXX;
-	 * ring_params.msi_data = XXX;
-	 * ring_params.flags = XXX;
-	 */
+	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
+		ring_params.intr_timer_thres_us = 0x1000;
+		ring_params.intr_batch_cntr_thres_entries = 0x1;
+	}
 
 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
 			ce_id, 0, &ring_params);
@@ -691,13 +690,13 @@ static void ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
 {
 	switch (ring_type) {
 	case CE_RING_SRC:
-		ce_srng_src_ring_setup(scn, ce_id, ring);
+		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
 		break;
 	case CE_RING_DEST:
 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
 		break;
 	case CE_RING_STATUS:
-		ce_srng_status_ring_setup(scn, ce_id, ring);
+		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
 		break;
 	default:
 		qdf_assert(0);

+ 1 - 0
hif/src/ce/ce_tasklet.c

@@ -398,6 +398,7 @@ irqreturn_t ce_dispatch_interrupt(int ce_id,
 			  __func__, tasklet_entry->ce_id, CE_COUNT_MAX);
 		return IRQ_NONE;
 	}
+
 	hif_irq_disable(scn, ce_id);
 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);

+ 1 - 0
hif/src/dispatcher/multibus_pci.c

@@ -83,6 +83,7 @@ QDF_STATUS hif_initialize_pci_ops(struct hif_softc *hif_sc)
 		&hif_pci_enable_power_management;
 	bus_ops->hif_disable_power_management =
 		&hif_pci_disable_power_management;
+	bus_ops->hif_grp_irq_configure = &hif_pci_configure_grp_irq;
 	bus_ops->hif_display_stats =
 		&hif_pci_display_stats;
 	bus_ops->hif_clear_stats =

+ 4 - 0
hif/src/dispatcher/pci_api.h

@@ -26,6 +26,8 @@
  */
 #ifndef _PCI_API_H_
 #define _PCI_API_H_
+struct hif_exec_context;
+
 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx,
 			enum qdf_bus_type bus_type);
 void hif_pci_close(struct hif_softc *hif_ctx);
@@ -52,6 +54,8 @@ int hif_pci_dump_registers(struct hif_softc *scn);
 void hif_pci_enable_power_management(struct hif_softc *hif_ctx,
 				 bool is_packet_log_enabled);
 void hif_pci_disable_power_management(struct hif_softc *hif_ctx);
+int hif_pci_configure_grp_irq(struct hif_softc *scn,
+			      struct hif_exec_context *exec);
 void hif_pci_display_stats(struct hif_softc *hif_ctx);
 void hif_pci_clear_stats(struct hif_softc *hif_ctx);
 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id);

+ 0 - 1
hif/src/hif_exec.c

@@ -299,7 +299,6 @@ irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
 	struct hif_exec_context *hif_ext_group = context;
 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
 
-
 	hif_ext_group->irq_disable(hif_ext_group);
 	qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
 

+ 74 - 2
hif/src/pcie/if_pci.c

@@ -54,6 +54,7 @@
 #include "if_pci_internal.h"
 #include "ce_tasklet.h"
 #include "targaddrs.h"
+#include "hif_exec.h"
 
 #include "pci_api.h"
 #include "ahb_api.h"
@@ -2592,6 +2593,25 @@ static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
 	return ret;
 }
 
+static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
+{
+	int i, j, irq;
+	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+	struct hif_exec_context *hif_ext_group;
+
+	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
+		hif_ext_group = hif_state->hif_ext_group[i];
+		if (hif_ext_group->irq_requested) {
+			hif_ext_group->irq_requested = false;
+			for (j = 0; j < hif_ext_group->numirq; j++) {
+				irq = hif_ext_group->os_irq[j];
+				free_irq(irq, hif_ext_group);
+			}
+			hif_ext_group->numirq = 0;
+		}
+	}
+}
+
 /**
  * hif_nointrs(): disable IRQ
  *
@@ -2612,6 +2632,8 @@ void hif_pci_nointrs(struct hif_softc *scn)
 	if (scn->request_irq_done == false)
 		return;
 
+	hif_pci_deconfigure_grp_irq(scn);
+
 	ret = hif_ce_srng_msi_free_irq(scn);
 	if (ret != 0 && sc->num_msi_intrs > 0) {
 		/* MSI interrupt(s) */
@@ -3623,11 +3645,14 @@ static int hif_ce_msi_configure_irq(struct hif_softc *scn)
 		unsigned int msi_data = (ce_id % msi_data_count) +
 			msi_irq_start;
 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
-
-		HIF_INFO("%s: (ce_id %d, msi_data %d, irq %d tasklet %p)",
+		HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %p)",
 			 __func__, ce_id, msi_data, irq,
 			 &ce_sc->tasklets[ce_id]);
 
+		/* implies the ce is also initialized */
+		if (!ce_sc->tasklets[ce_id].inited)
+			continue;
+
 		pci_sc->ce_msi_irq_num[ce_id] = irq;
 		ret = request_irq(irq, hif_ce_interrupt_handler,
 				  IRQF_SHARED,
@@ -3652,6 +3677,53 @@ free_irq:
 	return ret;
 }
 
+static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
+{
+	int i;
+
+	for (i = 0; i < hif_ext_group->numirq; i++)
+		disable_irq_nosync(hif_ext_group->os_irq[i]);
+}
+
+static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
+{
+	int i;
+
+	for (i = 0; i < hif_ext_group->numirq; i++)
+		enable_irq(hif_ext_group->os_irq[i]);
+}
+
+
+int hif_pci_configure_grp_irq(struct hif_softc *scn,
+			      struct hif_exec_context *hif_ext_group)
+{
+	int ret = 0;
+	int irq = 0;
+	int j;
+
+	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
+	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
+	hif_ext_group->work_complete = &hif_dummy_grp_done;
+
+	for (j = 0; j < hif_ext_group->numirq; j++) {
+		irq = hif_ext_group->irq[j];
+
+		HIF_DBG("%s: request_irq = %d for grp %d",
+			  __func__, irq, hif_ext_group->grp_id);
+		ret = request_irq(irq,
+				  hif_ext_group_interrupt_handler,
+				  IRQF_SHARED, "wlan_EXT_GRP",
+				  hif_ext_group);
+		if (ret) {
+			HIF_ERROR("%s: request_irq failed ret = %d",
+				  __func__, ret);
+			return -EFAULT;
+		}
+		hif_ext_group->os_irq[j] = irq;
+	}
+	hif_ext_group->irq_requested = true;
+	return 0;
+}
 
 /**
  * hif_configure_irq() - configure interrupt

+ 30 - 69
wlan_cfg/wlan_cfg.c

@@ -174,6 +174,34 @@
 #define WLAN_RX_HASH_ENABLE 0
 #endif
 
+#ifdef CONFIG_MCL
+static const int tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
+						0,
+						WLAN_CFG_TX_RING_MASK_0,
+						0,
+						0,
+						WLAN_CFG_TX_RING_MASK_1,
+						WLAN_CFG_TX_RING_MASK_2,
+						WLAN_CFG_TX_RING_MASK_3};
+
+static const int rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
+					0,
+					0,
+					WLAN_CFG_RX_RING_MASK_0,
+					0,
+					WLAN_CFG_RX_RING_MASK_1,
+					WLAN_CFG_RX_RING_MASK_2,
+					WLAN_CFG_RX_RING_MASK_3};
+
+static const int rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
+					0,
+					0,
+					0,
+					WLAN_CFG_RX_MON_RING_MASK_0,
+					WLAN_CFG_RX_MON_RING_MASK_1,
+					WLAN_CFG_RX_MON_RING_MASK_2,
+					WLAN_CFG_RX_MON_RING_MASK_3};
+#else
 static const int tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
 						WLAN_CFG_TX_RING_MASK_0,
 						WLAN_CFG_TX_RING_MASK_1,
@@ -192,6 +220,8 @@ static const int rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
 					WLAN_CFG_RX_MON_RING_MASK_2,
 					WLAN_CFG_RX_MON_RING_MASK_3};
 
+#endif
+
 static const int rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
 					WLAN_CFG_RX_ERR_RING_MASK_0,
 					WLAN_CFG_RX_ERR_RING_MASK_1,
@@ -216,75 +246,6 @@ static const int rxdma2host_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {
 					WLAN_CFG_RXDMA2HOST_RING_MASK_2,
 					WLAN_CFG_RXDMA2HOST_RING_MASK_3};
 
-/**
- * struct wlan_cfg_dp_soc_ctxt - Configuration parameters for SoC (core TxRx)
- * @num_int_ctxts - Number of NAPI/Interrupt contexts to be registered for DP
- * @max_clients - Maximum number of peers/stations supported by device
- * @max_alloc_size - Maximum allocation size for any dynamic memory
- *			allocation request for this device
- * @per_pdev_tx_ring - 0 - TCL ring is not mapped per radio
- *		       1 - Each TCL ring is mapped to one radio/pdev
- * @num_tcl_data_rings - Number of TCL Data rings supported by device
- * @per_pdev_rx_ring - 0 - REO ring is not mapped per radio
- *		       1 - Each REO ring is mapped to one radio/pdev
- * @num_tx_desc_pool - Number of Tx Descriptor pools
- * @num_tx_ext_desc_pool - Number of Tx MSDU extension Descriptor pools
- * @num_tx_desc - Number of Tx Descriptors per pool
- * @num_tx_ext_desc - Number of Tx MSDU extension Descriptors per pool
- * @max_peer_id - Maximum value of peer id that FW can assign for a client
- * @htt_packet_type - Default 802.11 encapsulation type for any VAP created
- * @int_tx_ring_mask - Bitmap of Tx interrupts mapped to each NAPI/Intr context
- * @int_rx_ring_mask - Bitmap of Rx interrupts mapped to each NAPI/Intr context
- * @int_rx_mon_ring_mask - Bitmap of Rx monitor ring interrupts mapped to each
- *			  NAPI/Intr context
- * @int_rxdma2host_ring_mask - Bitmap of RXDMA2host ring interrupts mapped to
- *		each NAPI/Intr context
- * @int_ce_ring_mask - Bitmap of CE interrupts mapped to each NAPI/Intr context
- * @lro_enabled - is LRO enabled
- * @rx_hash - Enable hash based steering of rx packets
- *
- */
-struct wlan_cfg_dp_soc_ctxt {
-	int num_int_ctxts;
-	int max_clients;
-	int max_alloc_size;
-	int per_pdev_tx_ring;
-	int num_tcl_data_rings;
-	int per_pdev_rx_ring;
-	int num_reo_dest_rings;
-	int num_tx_desc_pool;
-	int num_tx_ext_desc_pool;
-	int num_tx_desc;
-	int num_tx_ext_desc;
-	int max_peer_id;
-	int htt_packet_type;
-	int int_batch_threshold_tx;
-	int int_timer_threshold_tx;
-	int int_batch_threshold_rx;
-	int int_timer_threshold_rx;
-	int int_batch_threshold_other;
-	int int_timer_threshold_other;
-	int tx_ring_size;
-	int tx_comp_ring_size;
-	int int_tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_rxdma2host_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_ce_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	int int_reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
-	bool lro_enabled;
-	bool rx_hash;
-	int nss_cfg;
-	int hw_macid[MAX_PDEV_CNT];
-	int base_hw_macid;
-#ifdef QCA_LL_TX_FLOW_CONTROL_V2
-	int tx_flow_stop_queue_th;
-	int tx_flow_start_queue_offset;
-#endif
-};
-
 /**
  * struct wlan_cfg_dp_pdev_ctxt - Configuration parameters for pdev (radio)
  * @rx_dma_buf_ring_size - Size of RxDMA buffer ring

+ 107 - 8
wlan_cfg/wlan_cfg.h

@@ -25,8 +25,10 @@
  */
 #if defined(CONFIG_MCL)
 #define MAX_PDEV_CNT 1
+#define WLAN_CFG_INT_NUM_CONTEXTS 7
 #else
 #define MAX_PDEV_CNT 3
+#define WLAN_CFG_INT_NUM_CONTEXTS 4
 #endif
 
 /* Tx configuration */
@@ -52,9 +54,79 @@
 #define DP_MAX_TIDS 17
 #define DP_NON_QOS_TID 16
 
-#define WLAN_CFG_INT_NUM_CONTEXTS 4
 struct wlan_cfg_dp_pdev_ctxt;
-struct wlan_cfg_dp_soc_ctxt;
+/**
+ * struct wlan_cfg_dp_soc_ctxt - Configuration parameters for SoC (core TxRx)
+ * @num_int_ctxts - Number of NAPI/Interrupt contexts to be registered for DP
+ * @max_clients - Maximum number of peers/stations supported by device
+ * @max_alloc_size - Maximum allocation size for any dynamic memory
+ *			allocation request for this device
+ * @per_pdev_tx_ring - 0 - TCL ring is not mapped per radio
+ *		       1 - Each TCL ring is mapped to one radio/pdev
+ * @num_tcl_data_rings - Number of TCL Data rings supported by device
+ * @per_pdev_rx_ring - 0 - REO ring is not mapped per radio
+ *		       1 - Each REO ring is mapped to one radio/pdev
+ * @num_tx_desc_pool - Number of Tx Descriptor pools
+ * @num_tx_ext_desc_pool - Number of Tx MSDU extension Descriptor pools
+ * @num_tx_desc - Number of Tx Descriptors per pool
+ * @num_tx_ext_desc - Number of Tx MSDU extension Descriptors per pool
+ * @max_peer_id - Maximum value of peer id that FW can assign for a client
+ * @htt_packet_type - Default 802.11 encapsulation type for any VAP created
+ * @int_tx_ring_mask - Bitmap of Tx interrupts mapped to each NAPI/Intr context
+ * @int_rx_ring_mask - Bitmap of Rx interrupts mapped to each NAPI/Intr context
+ * @int_rx_mon_ring_mask - Bitmap of Rx monitor ring interrupts mapped to each
+ *			  NAPI/Intr context
+ * @int_rx_err_ring_mask - Bitmap of Rx err ring interrupts mapped to each
+ *			  NAPI/Intr context
+ * @int_wbm_rel_ring_mask - Bitmap of wbm rel ring interrupts mapped to each
+ *			  NAPI/Intr context
+ * @int_reo_status_ring_mask - Bitmap of reo status ring interrupts mapped to each
+ *                        NAPI/Intr context
+ * @int_ce_ring_mask - Bitmap of CE interrupts mapped to each NAPI/Intr context
+ * @lro_enabled - is LRO enabled
+ * @rx_hash - Enable hash based steering of rx packets
+ * @nss_cfg - nss configuration
+ */
+struct wlan_cfg_dp_soc_ctxt {
+	int num_int_ctxts;
+	int max_clients;
+	int max_alloc_size;
+	int per_pdev_tx_ring;
+	int num_tcl_data_rings;
+	int per_pdev_rx_ring;
+	int num_reo_dest_rings;
+	int num_tx_desc_pool;
+	int num_tx_ext_desc_pool;
+	int num_tx_desc;
+	int num_tx_ext_desc;
+	int max_peer_id;
+	int htt_packet_type;
+	int int_batch_threshold_tx;
+	int int_timer_threshold_tx;
+	int int_batch_threshold_rx;
+	int int_timer_threshold_rx;
+	int int_batch_threshold_other;
+	int int_timer_threshold_other;
+	int tx_ring_size;
+	int tx_comp_ring_size;
+	int int_tx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_rx_mon_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_ce_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int int_rxdma2host_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS];
+	int hw_macid[MAX_PDEV_CNT];
+	int base_hw_macid;
+	bool lro_enabled;
+	bool rx_hash;
+	int nss_cfg;
+#ifdef QCA_LL_TX_FLOW_CONTROL_V2
+	int tx_flow_stop_queue_th;
+	int tx_flow_start_queue_offset;
+#endif
+};
 
 /**
  * wlan_cfg_soc_attach() - Attach configuration interface for SoC
@@ -115,12 +187,6 @@ int wlan_cfg_set_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
 					int context, int mask);
 int wlan_cfg_set_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
 					int context, int mask);
-int wlan_cfg_get_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
-					int context);
-int wlan_cfg_get_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
-					int context);
-int wlan_cfg_get_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg,
-					int context);
 /**
  * wlan_cfg_get_num_contexts() - Number of interrupt contexts to be registered
  * @wlan_cfg_ctx - Configuration Handle
@@ -220,6 +286,39 @@ int wlan_cfg_get_hw_macid(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx);
  */
 int wlan_cfg_get_hw_mac_idx(struct wlan_cfg_dp_soc_ctxt *cfg, int pdev_idx);
 
+/**
+ * wlan_cfg_get_rx_err_ring_mask() - Return Rx monitor ring interrupt mask
+ *					   mapped to an interrupt context
+ * @wlan_cfg_ctx - Configuration Handle
+ * @context - Numerical ID identifying the Interrupt/NAPI context
+ *
+ * Return: int_rx_err_ring_mask[context]
+ */
+int wlan_cfg_get_rx_err_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int
+				  context);
+
+/**
+ * wlan_cfg_get_rx_wbm_rel_ring_mask() - Return Rx monitor ring interrupt mask
+ *					   mapped to an interrupt context
+ * @wlan_cfg_ctx - Configuration Handle
+ * @context - Numerical ID identifying the Interrupt/NAPI context
+ *
+ * Return: int_wbm_rel_ring_mask[context]
+ */
+int wlan_cfg_get_rx_wbm_rel_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int
+				      context);
+
+/**
+ * wlan_cfg_get_reo_status_ring_mask() - Return Rx monitor ring interrupt mask
+ *					   mapped to an interrupt context
+ * @wlan_cfg_ctx - Configuration Handle
+ * @context - Numerical ID identifying the Interrupt/NAPI context
+ *
+ * Return: int_reo_status_ring_mask[context]
+ */
+int wlan_cfg_get_reo_status_ring_mask(struct wlan_cfg_dp_soc_ctxt *cfg, int
+				      context);
+
 /**
  * wlan_cfg_get_ce_ring_mask() - Return CE ring interrupt mask
  *				mapped to an interrupt context