Przeglądaj źródła

Merge "msm: ipa: napi on tx completion changed to polling"

qctecmdr 4 lat temu
rodzic
commit
93bd81f260

+ 78 - 25
drivers/platform/msm/gsi/gsi.c

@@ -683,8 +683,8 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
 		 * Increment RP local only in polling context to avoid
 		 * sys len mismatch.
 		 */
-		if (!(callback && ch_ctx->props.dir ==
-					GSI_CHAN_DIR_FROM_GSI))
+		if (!callback || (ch_ctx->props.dir == GSI_CHAN_DIR_TO_GSI &&
+			!ch_ctx->props.tx_poll))
 			/* the element at RP is also processed */
 			gsi_incr_ring_rp(&ch_ctx->ring);
 
@@ -706,7 +706,8 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
 	 * channel will receive the IEOB interrupt and xfer pointer will be
 	 * overwritten. To avoid this process all data in polling context.
 	 */
-	if (!(callback && ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)) {
+	if (!callback || (ch_ctx->props.dir == GSI_CHAN_DIR_TO_GSI &&
+		!ch_ctx->props.tx_poll)) {
 		ch_ctx->stats.completed++;
 		ch_ctx->user_data[rp_idx].valid = false;
 	}
@@ -738,7 +739,8 @@ static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
 	 * sys len mismatch.
 	 */
 	ch_ctx = &gsi_ctx->chan[evt->chid];
-	if (callback && ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+	if (callback && (ch_ctx->props.dir == GSI_CHAN_DIR_FROM_GSI ||
+		ch_ctx->props.tx_poll))
 		return;
 	gsi_incr_ring_rp(&ctx->ring);
 	/* recycle this element */
@@ -756,6 +758,14 @@ static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
 		gsi_ctx->per.ee, ctx->id, val);
 }
 
+void gsi_ring_evt_doorbell_polling_mode(unsigned long chan_hdl) {
+	struct gsi_evt_ctx *ctx;
+
+	ctx = gsi_ctx->chan[chan_hdl].evtr;
+	gsi_ring_evt_doorbell(ctx);
+}
+EXPORT_SYMBOL(gsi_ring_evt_doorbell_polling_mode);
+
 static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
 {
 	uint32_t val;
@@ -775,6 +785,11 @@ static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
 		gsi_ctx->per.ee, ctx->props.ch_id, val);
 }
 
+static bool check_channel_polling(struct gsi_evt_ctx* ctx) {
+	/* For shared event rings both channels will be marked */
+	return atomic_read(&ctx->chan[0]->poll_mode);
+}
+
 static void gsi_handle_ieob(int ee)
 {
 	uint32_t ch, evt_hdl;
@@ -830,11 +845,9 @@ check_again_v3_0:
 					ctx->ring.rp = rp;
 					while (ctx->ring.rp_local != rp) {
 						++cntr;
-						if (ctx->props.exclusive &&
-						    atomic_read(
-							    &ctx->chan->poll_mode)) {
-							cntr = 0;
-							break;
+						if (check_channel_polling(ctx)) {
+								cntr = 0;
+								break;
 						}
 						gsi_process_evt_re(ctx, &notify,
 								   true);
@@ -886,9 +899,7 @@ check_again_v3_0:
 				ctx->ring.rp = rp;
 				while (ctx->ring.rp_local != rp) {
 					++cntr;
-					if (ctx->props.exclusive &&
-					    atomic_read(
-						    &ctx->chan->poll_mode)) {
+					if (check_channel_polling(ctx)) {
 						cntr = 0;
 						break;
 					}
@@ -2551,7 +2562,7 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
 		if (atomic_read(
 			&gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
 			gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
-			gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
+			gsi_ctx->evtr[props->evt_ring_hdl].chan[0]->props.prot !=
 			GSI_CHAN_PROT_GCI) {
 			GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
 				props->evt_ring_hdl, chan_hdl);
@@ -2632,12 +2643,25 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
 
 	if (erindex < GSI_EVT_RING_MAX) {
 		ctx->evtr = &gsi_ctx->evtr[erindex];
-		if (props->prot != GSI_CHAN_PROT_GCI)
+		if(ctx->evtr->num_of_chan_allocated
+		   >= MAX_CHANNELS_SHARING_EVENT_RING) {
+			GSIERR(
+				"too many channels sharing the same event ring %u\n",
+				erindex);
+			GSI_ASSERT();
+		}
+		if (props->prot != GSI_CHAN_PROT_GCI) {
 			atomic_inc(&ctx->evtr->chan_ref_cnt);
-		if (props->prot != GSI_CHAN_PROT_GCI &&
-			ctx->evtr->props.exclusive &&
-			atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
-			ctx->evtr->chan = ctx;
+			if (ctx->evtr->props.exclusive) {
+				if (atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
+					ctx->evtr->chan
+					[ctx->evtr->num_of_chan_allocated++] = ctx;
+			}
+			else {
+				ctx->evtr->chan[ctx->evtr->num_of_chan_allocated++]
+					= ctx;
+			}
+		}
 	}
 
 	gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
@@ -3695,6 +3719,30 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
 }
 EXPORT_SYMBOL(gsi_is_channel_empty);
 
+bool gsi_is_event_pending(unsigned long chan_hdl) {
+	struct gsi_chan_ctx *ctx;
+	uint64_t rp;
+	uint64_t rp_local;
+	int ee;
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return false;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	ee = gsi_ctx->per.ee;
+
+	/* read only, updating will be handled in NAPI context if needed */
+	rp = ctx->evtr->props.gsi_read_event_ring_rp(
+		&ctx->evtr->props, ctx->evtr->id, ee);
+	rp |= ctx->evtr->ring.rp & GSI_MSB_MASK;
+	rp_local = ctx->evtr->ring.rp_local;
+
+	return rp != rp_local;
+}
+EXPORT_SYMBOL(gsi_is_event_pending);
+
 int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
 {
 	int i;
@@ -4054,6 +4102,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 	enum gsi_chan_mode curr;
 	unsigned long flags;
 	enum gsi_chan_mode chan_mode;
+	int i;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -4073,7 +4122,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
 
-	if (!ctx->evtr || !ctx->evtr->props.exclusive) {
+	if (!ctx->evtr) {
 		GSIERR("cannot configure mode on chan_hdl=%lu\n",
 				chan_hdl);
 		return -GSI_STATUS_UNSUPPORTED_OP;
@@ -4110,9 +4159,11 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 			gsihal_write_reg_n(GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR,
 				gsi_ctx->per.ee, 1 << ctx->evtr->id);
 		}
-		atomic_set(&ctx->poll_mode, mode);
-		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
-			atomic_set(&ctx->evtr->chan->poll_mode, mode);
+		for(i = 0; i < ctx->evtr->num_of_chan_allocated; i++) {
+			atomic_set(&ctx->evtr->chan[i]->poll_mode, mode);
+		}
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && *ctx->evtr->chan) {
+			atomic_set(&ctx->evtr->chan[0]->poll_mode, mode);
 		} else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
 			coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
 			if (coal_ctx != NULL)
@@ -4126,9 +4177,11 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
 
 	if (curr == GSI_CHAN_MODE_POLL &&
 			mode == GSI_CHAN_MODE_CALLBACK) {
-		atomic_set(&ctx->poll_mode, mode);
-		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan) {
-			atomic_set(&ctx->evtr->chan->poll_mode, mode);
+		for(i = 0; i < ctx->evtr->num_of_chan_allocated; i++) {
+			atomic_set(&ctx->evtr->chan[i]->poll_mode, mode);
+		}
+		if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && *ctx->evtr->chan) {
+			atomic_set(&ctx->evtr->chan[0]->poll_mode, mode);
 		} else if (gsi_ctx->coal_info.evchid == ctx->evtr->id) {
 			coal_ctx = &gsi_ctx->chan[gsi_ctx->coal_info.ch_id];
 			if (coal_ctx != NULL)

+ 23 - 1
drivers/platform/msm/gsi/gsi.h

@@ -29,6 +29,7 @@
 #define GSI_EVT_RING_MAX  31
 #define GSI_NO_EVT_ERINDEX 255
 #define GSI_ISR_CACHE_MAX 20
+#define MAX_CHANNELS_SHARING_EVENT_RING 2
 
 #define GSI_IPC_LOGGING(buf, fmt, args...) \
 	do { \
@@ -290,6 +291,7 @@ struct gsi_per_notify {
  * @enable_clk_bug_on: enable IPA clock for dump saving before assert
  * @skip_ieob_mask_wa: flag for skipping ieob_mask_wa
  * All the callbacks are in interrupt context
+ * @tx_poll: propagate to relevant gsi channels that tx polling feature is on
  *
  */
 struct gsi_per_props {
@@ -313,6 +315,7 @@ struct gsi_per_props {
 	int (*clk_status_cb)(void);
 	void (*enable_clk_bug_on)(void);
 	bool skip_ieob_mask_wa;
+	bool tx_poll;
 };
 
 enum gsi_chan_evt {
@@ -432,6 +435,7 @@ enum gsi_chan_use_db_eng {
  *                   is used, REE will fetch/send new TRE to peripheral only
  *                   if peripheral's empty_level_count is higher than
  *                   EMPTY_LVL_THRSHOLD defined for this channel
+ * @tx_poll:         channel process completions in NAPI context
  * @xfer_cb:         transfer notification callback, this callback happens
  *                   on event boundaries
  *
@@ -485,6 +489,7 @@ struct gsi_chan_props {
 	uint8_t low_weight;
 	enum gsi_prefetch_mode prefetch_mode;
 	uint8_t empty_lvl_threshold;
+	bool tx_poll;
 	void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
 	void (*err_cb)(struct gsi_chan_err_notify *notify);
 	void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data);
@@ -1342,7 +1347,8 @@ struct gsi_evt_ctx {
 	struct gsi_ring_ctx ring;
 	struct mutex mlock;
 	struct completion compl;
-	struct gsi_chan_ctx *chan;
+	struct gsi_chan_ctx *chan[MAX_CHANNELS_SHARING_EVENT_RING];
+	uint8_t num_of_chan_allocated;
 	atomic_t chan_ref_cnt;
 	union __packed gsi_evt_scratch scratch;
 	struct gsi_evt_stats stats;
@@ -1697,6 +1703,13 @@ int gsi_dealloc_channel(unsigned long chan_hdl);
 int gsi_poll_channel(unsigned long chan_hdl,
 		struct gsi_chan_xfer_notify *notify);
 
+/**
+ * gsi_ring_evt_doorbell_napi - doorbell from NAPI context
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ */
+void gsi_ring_evt_doorbell_polling_mode(unsigned long chan_hdl);
 
 /**
  * gsi_config_channel_mode - Peripheral should call this function
@@ -2056,6 +2069,15 @@ int gsi_query_channel_info(unsigned long chan_hdl,
  */
 int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
 
+/**
+ * gsi_is_event_pending - Returns true if there is at least one event in the
+ * provided event ring which wasn't processed.
+ *
+ * @chan_hdl: Client handle previously obtained from gsi_alloc_channel
+ *
+ * @Return true if an event is pending, else false
+ */
+bool gsi_is_event_pending(unsigned long chan_hdl);
 /**
  * gsi_get_channel_cfg - This function returns the current config
  * of the specified channel

+ 8 - 0
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -6858,6 +6858,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 		gsi_props.mhi_er_id_limits[1] = resource_p->mhi_evid_limits[1];
 	}
 	gsi_props.skip_ieob_mask_wa = resource_p->skip_ieob_mask_wa;
+	gsi_props.tx_poll = resource_p->tx_poll;
 
 	result = gsi_register_device(&gsi_props,
 		&ipa3_ctx->gsi_dev_hdl);
@@ -7776,6 +7777,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		resource_p->do_ram_collection_on_crash;
 	ipa3_ctx->lan_rx_napi_enable = resource_p->lan_rx_napi_enable;
 	ipa3_ctx->tx_napi_enable = resource_p->tx_napi_enable;
+	ipa3_ctx->tx_poll = resource_p->tx_poll;
 	ipa3_ctx->ipa_gpi_event_rp_ddr = resource_p->ipa_gpi_event_rp_ddr;
 	ipa3_ctx->rmnet_ctl_enable = resource_p->rmnet_ctl_enable;
 	ipa3_ctx->tx_wrapper_cache_max_size = get_tx_wrapper_cache_size(
@@ -8843,6 +8845,12 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		ipa_drv_res->tx_napi_enable
 		? "True" : "False");
 
+	ipa_drv_res->tx_poll = of_property_read_bool(pdev->dev.of_node,
+		"qcom,tx-poll");
+	IPADBG(": Enable tx polling = %s\n",
+	       ipa_drv_res->tx_poll
+	       ? "True" : "False");
+
 	ipa_drv_res->rmnet_ctl_enable =
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,rmnet-ctl-enable");

+ 168 - 30
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -92,6 +92,8 @@
 
 #define IPA_QMAP_ID_BYTE 0
 
+static int ipa3_tx_switch_to_intr_mode(struct ipa3_sys_context *sys);
+static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys);
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -141,13 +143,13 @@ static void ipa3_tasklet_rx_notify(unsigned long data);
 static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
 
 /**
- * ipa3_wq_write_done_common() - this function is responsible on freeing
+ * ipa3_write_done_common() - this function is responsible on freeing
  * all tx_pkt_wrappers related to a skb
  * @tx_pkt: the first tx_pkt_warpper related to a certain skb
  * @sys:points to the ipa3_sys_context the EOT was received on
  * returns the number of tx_pkt_wrappers that were freed
  */
-static int ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
+static int ipa3_write_done_common(struct ipa3_sys_context *sys,
 				struct ipa3_tx_pkt_wrapper *tx_pkt)
 {
 	struct ipa3_tx_pkt_wrapper *next_pkt;
@@ -220,7 +222,7 @@ static void ipa3_wq_write_done_status(int src_pipe,
 	if (!sys)
 		return;
 
-	ipa3_wq_write_done_common(sys, tx_pkt);
+	ipa3_write_done_common(sys, tx_pkt);
 }
 
 /**
@@ -249,7 +251,7 @@ static void ipa3_tasklet_write_done(unsigned long data)
 				struct ipa3_tx_pkt_wrapper, link);
 			xmit_done = this_pkt->xmit_done;
 			spin_unlock_bh(&sys->spinlock);
-			ipa3_wq_write_done_common(sys, this_pkt);
+			ipa3_write_done_common(sys, this_pkt);
 			spin_lock_bh(&sys->spinlock);
 			if (xmit_done)
 				break;
@@ -258,7 +260,60 @@ static void ipa3_tasklet_write_done(unsigned long data)
 	spin_unlock_bh(&sys->spinlock);
 }
 
-static int ipa3_poll_tx_complete(struct ipa3_sys_context *sys, int budget)
+static int ipa3_napi_poll_tx_complete(struct ipa3_sys_context *sys, int budget)
+{
+	struct ipa3_tx_pkt_wrapper *this_pkt = NULL;
+	int entry_budget = budget;
+	int poll_status = 0;
+	int num_of_desc = 0;
+	int i = 0;
+	struct gsi_chan_xfer_notify notify[NAPI_TX_WEIGHT];
+
+	do {
+		poll_status =
+			ipa_poll_gsi_n_pkt(sys, notify, budget, &num_of_desc);
+		for(i = 0; i < num_of_desc; i++) {
+			this_pkt = notify[i].xfer_user_data;
+			/* For shared event ring sys context might change */
+			sys = this_pkt->sys;
+			ipa3_write_done_common(sys, this_pkt);
+			budget--;
+		}
+		IPADBG_LOW("Number of desc polled %d", num_of_desc);
+	} while(budget > 0 && !poll_status);
+	return entry_budget - budget;
+}
+
+static int ipa3_aux_napi_poll_tx_complete(struct napi_struct *napi_tx,
+						int budget)
+{
+	struct ipa3_sys_context *sys = container_of(napi_tx,
+		struct ipa3_sys_context, napi_tx);
+	int tx_done = 0;
+	int ret = 0;
+
+poll_tx:
+	tx_done += ipa3_napi_poll_tx_complete(sys, budget - tx_done);
+
+	/* Doorbell needed here for continuous polling */
+	gsi_ring_evt_doorbell_polling_mode(sys->ep->gsi_chan_hdl);
+
+	if (tx_done < budget) {
+		napi_complete(napi_tx);
+		ret = ipa3_tx_switch_to_intr_mode(sys);
+
+		/* if we got an EOT while we marked NAPI as complete */
+		if (ret == -GSI_STATUS_PENDING_IRQ &&
+			napi_reschedule(napi_tx)) {
+			goto poll_tx;
+		}
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys->ep->client);
+	}
+	IPADBG_LOW("the number of tx completions is: %d", tx_done);
+	return min(tx_done, budget);
+}
+
+static int ipa3_napi_tx_complete(struct ipa3_sys_context *sys, int budget)
 {
 	struct ipa3_tx_pkt_wrapper *this_pkt = NULL;
 	bool xmit_done = false;
@@ -274,7 +329,7 @@ static int ipa3_poll_tx_complete(struct ipa3_sys_context *sys, int budget)
 			struct ipa3_tx_pkt_wrapper, link);
 		xmit_done = this_pkt->xmit_done;
 		spin_unlock_bh(&sys->spinlock);
-		budget -= ipa3_wq_write_done_common(sys, this_pkt);
+		budget -= ipa3_write_done_common(sys, this_pkt);
 		spin_lock_bh(&sys->spinlock);
 		if (xmit_done)
 			atomic_add_unless(&sys->xmit_eot_cnt, -1, 0);
@@ -283,14 +338,14 @@ static int ipa3_poll_tx_complete(struct ipa3_sys_context *sys, int budget)
 	return entry_budget - budget;
 }
 
-static int ipa3_aux_poll_tx_complete(struct napi_struct *napi_tx, int budget)
+static int ipa3_aux_napi_tx_complete(struct napi_struct *napi_tx, int budget)
 {
 	struct ipa3_sys_context *sys = container_of(napi_tx,
 		struct ipa3_sys_context, napi_tx);
 	int tx_done = 0;
 
 poll_tx:
-	tx_done += ipa3_poll_tx_complete(sys, budget - tx_done);
+	tx_done += ipa3_napi_tx_complete(sys, budget - tx_done);
 	if (tx_done < budget) {
 		napi_complete(napi_tx);
 		atomic_set(&sys->in_napi_context, 0);
@@ -299,7 +354,7 @@ poll_tx:
 		if (atomic_read(&sys->xmit_eot_cnt) > 0 &&
 		    !atomic_cmpxchg(&sys->in_napi_context, 0, 1)
 		    && napi_reschedule(napi_tx)) {
-		    goto poll_tx;
+			goto poll_tx;
 		}
 	}
 	IPADBG_LOW("the number of tx completions is: %d", tx_done);
@@ -533,8 +588,9 @@ int ipa3_send(struct ipa3_sys_context *sys,
 		}
 
 		if (i == (num_desc - 1)) {
-			if (!sys->use_comm_evt_ring ||
-			    (sys->pkt_sent % IPA_EOT_THRESH == 0)) {
+			if (ipa3_ctx->tx_poll ||
+				!sys->use_comm_evt_ring ||
+				(sys->pkt_sent % IPA_EOT_THRESH == 0)) {
 				gsi_xfer[i].flags |=
 					GSI_XFER_FLAG_EOT;
 				gsi_xfer[i].flags |=
@@ -878,16 +934,61 @@ void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
 {
 	int ep_idx = IPA_EP_NOT_ALLOCATED;
 
-	if (client == IPA_CLIENT_APPS_WAN_COAL_CONS)
-		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
-	if (client == IPA_CLIENT_APPS_WAN_CONS)
-		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	switch (client) {
+		case IPA_CLIENT_APPS_WAN_COAL_CONS:
+			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+			break;
+		case IPA_CLIENT_APPS_WAN_CONS:
+			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+			break;
+		case IPA_CLIENT_APPS_LAN_CONS:
+			/* for error handling */
+			break;
+		case IPA_CLIENT_APPS_WAN_PROD:
+			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
+			break;
+		case IPA_CLIENT_APPS_LAN_PROD:
+			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD);
+			break;
+		default:
+			IPAERR("unexpected client:%d\n", client);
+			break;
+	}
 
 	if (ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[ep_idx].sys)
 		atomic_set(&ipa3_ctx->ep[ep_idx].sys->curr_polling_state,
 									state);
 }
 
+static int ipa3_tx_switch_to_intr_mode(struct ipa3_sys_context *sys) {
+	int ret;
+
+	atomic_set(&sys->curr_polling_state, 0);
+	__ipa3_update_curr_poll_state(sys->ep->client, 0);
+	ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				      GSI_CHAN_MODE_CALLBACK);
+	if ((ret != GSI_STATUS_SUCCESS) &&
+	    !atomic_read(&sys->curr_polling_state)) {
+		if (ret == -GSI_STATUS_PENDING_IRQ) {
+			atomic_set(&sys->curr_polling_state, 1);
+			__ipa3_update_curr_poll_state(sys->ep->client, 1);
+		} else {
+			IPAERR("Failed to switch to intr mode %d ch_id %d\n",
+				sys->curr_polling_state, sys->ep->gsi_chan_hdl);
+		}
+	}
+
+	/* in case we miss an interrupt after NAPI complete */
+	if(gsi_is_event_pending(sys->ep->gsi_chan_hdl)) {
+		atomic_set(&sys->curr_polling_state, 1);
+		__ipa3_update_curr_poll_state(sys->ep->client, 1);
+		gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+					GSI_CHAN_MODE_POLL);
+		ret = -GSI_STATUS_PENDING_IRQ;
+	}
+	return ret;
+}
+
 /**
  * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
  */
@@ -1062,6 +1163,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 	struct ipahal_reg_coal_evict_lru evict_lru;
 	char buff[IPA_RESOURCE_NAME_MAX];
 	struct ipa_ep_cfg ep_cfg_copy;
+	int (*tx_completion_func)(struct napi_struct *, int);
 
 	if (sys_in == NULL || clnt_hdl == NULL) {
 		IPAERR("NULL args\n");
@@ -1177,6 +1279,11 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
 	}
 
+	if(ipa3_ctx->tx_poll)
+		tx_completion_func = &ipa3_aux_napi_poll_tx_complete;
+	else
+		tx_completion_func = &ipa3_aux_napi_tx_complete;
+
 	atomic_set(&ep->sys->xmit_eot_cnt, 0);
 	if (IPA_CLIENT_IS_PROD(sys_in->client))
 		tasklet_init(&ep->sys->tasklet, ipa3_tasklet_write_done,
@@ -1187,18 +1294,28 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 
 	if (IPA_CLIENT_IS_PROD(sys_in->client) &&
 		ipa3_ctx->tx_napi_enable) {
-		if (sys_in->client != IPA_CLIENT_APPS_WAN_PROD) {
+		if (sys_in->client == IPA_CLIENT_APPS_LAN_PROD) {
 			netif_tx_napi_add(&ipa3_ctx->generic_ndev,
-			&ep->sys->napi_tx, ipa3_aux_poll_tx_complete,
+			&ep->sys->napi_tx, tx_completion_func,
 			NAPI_TX_WEIGHT);
-		} else {
+			ep->sys->napi_tx_enable = ipa3_ctx->tx_napi_enable;
+			ep->sys->tx_poll = ipa3_ctx->tx_poll;
+		} else if(sys_in->client == IPA_CLIENT_APPS_WAN_PROD) {
 			netif_tx_napi_add((struct net_device *)sys_in->priv,
-			&ep->sys->napi_tx, ipa3_aux_poll_tx_complete,
+			&ep->sys->napi_tx, tx_completion_func,
 			NAPI_TX_WEIGHT);
+			ep->sys->napi_tx_enable = ipa3_ctx->tx_napi_enable;
+			ep->sys->tx_poll = ipa3_ctx->tx_poll;
+		} else {
+			/*CMD pipe*/
+			ep->sys->tx_poll = false;
+			ep->sys->napi_tx_enable = false;
+		}
+		if(ep->sys->napi_tx_enable) {
+			napi_enable(&ep->sys->napi_tx);
+			IPADBG("napi_enable on producer client %d completed",
+				sys_in->client);
 		}
-		napi_enable(&ep->sys->napi_tx);
-		IPADBG("napi_enable on producer client %d completed",
-			sys_in->client);
 	}
 
 	ep->client = sys_in->client;
@@ -4627,6 +4744,7 @@ static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
 static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_tx_pkt_wrapper *tx_pkt;
+	struct ipa3_sys_context *sys;
 
 	IPADBG_LOW("event %d notified\n", notify->evt_id);
 
@@ -4635,14 +4753,29 @@ static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
 		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
 		tx_pkt = notify->xfer_user_data;
 		tx_pkt->xmit_done = true;
-		atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
-
-		if (ipa_net_initialized && ipa3_ctx->tx_napi_enable) {
-		    if(!atomic_cmpxchg(&tx_pkt->sys->in_napi_context, 0, 1))
-			napi_schedule(&tx_pkt->sys->napi_tx);
-		}
-		else
+		sys = tx_pkt->sys;
+		if (sys->tx_poll) {
+			if (!atomic_read(&sys->curr_polling_state)) {
+				/* dummy vote to prevent NoC error */
+				if(IPA_ACTIVE_CLIENTS_INC_EP_NO_BLOCK(
+					sys->ep->client)) {
+					IPAERR("clk isn't active");
+					ipa_assert();
+				}
+				/* put the producer event ring into polling mode */
+				gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+							GSI_CHAN_MODE_POLL);
+				atomic_set(&sys->curr_polling_state, 1);
+				__ipa3_update_curr_poll_state(sys->ep->client, 1);
+				napi_schedule(&tx_pkt->sys->napi_tx);
+			}
+		} else if (ipa_net_initialized && sys->napi_tx_enable) {
+			if(!atomic_cmpxchg(&tx_pkt->sys->in_napi_context, 0, 1))
+				napi_schedule(&tx_pkt->sys->napi_tx);
+		} else {
+			atomic_inc(&tx_pkt->sys->xmit_eot_cnt);
 			tasklet_schedule(&tx_pkt->sys->tasklet);
+		}
 		break;
 	default:
 		IPAERR("received unexpected event id %d\n", notify->evt_id);
@@ -5026,6 +5159,11 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 		gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+		if(ep->client == IPA_CLIENT_APPS_WAN_PROD ||
+		   ep->client == IPA_CLIENT_APPS_LAN_PROD)
+			gsi_channel_props.tx_poll = ipa3_ctx->tx_poll;
+		else
+			gsi_channel_props.tx_poll = false;
 	} else {
 		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
 		gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
@@ -5221,7 +5359,7 @@ static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
 	int poll_num = 0;
 
 	if (!actual_num || expected_num <= 0 ||
-		expected_num > IPA_WAN_NAPI_MAX_FRAMES) {
+		expected_num > max(IPA_WAN_NAPI_MAX_FRAMES, NAPI_TX_WEIGHT)) {
 		IPAERR("bad params actual_num=%pK expected_num=%d\n",
 			actual_num, expected_num);
 		return GSI_STATUS_INVALID_PARAMS;

+ 5 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -88,7 +88,7 @@
 
 #define NAPI_WEIGHT 64
 
-#define NAPI_TX_WEIGHT 64
+#define NAPI_TX_WEIGHT 32
 
 #define IPA_WAN_AGGR_PKT_CNT 1
 
@@ -1108,6 +1108,8 @@ struct ipa3_sys_context {
 	bool skip_eot;
 	u32 eob_drop_cnt;
 	struct napi_struct napi_tx;
+	bool tx_poll;
+	bool napi_tx_enable;
 	atomic_t in_napi_context;
 	u32 int_modt;
 	u32 int_modc;
@@ -2179,6 +2181,7 @@ struct ipa3_context {
 	/* dummy netdev for lan RX NAPI */
 	bool lan_rx_napi_enable;
 	bool tx_napi_enable;
+	bool tx_poll;
 	struct net_device generic_ndev;
 	struct napi_struct napi_lan_rx;
 	u32 icc_num_cases;
@@ -2243,6 +2246,7 @@ struct ipa3_plat_drv_res {
 	bool tethered_flow_control;
 	bool lan_rx_napi_enable;
 	bool tx_napi_enable;
+	bool tx_poll;
 	u32 mhi_evid_limits[2]; /* start and end values */
 	bool ipa_mhi_dynamic_config;
 	u32 ipa_tz_unlock_reg_num;