|
@@ -894,7 +894,7 @@ static int ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
|
|
|
|
|
|
atomic_set(&sys->curr_polling_state, 0);
|
|
|
__ipa3_update_curr_poll_state(sys->ep->client, 0);
|
|
|
-
|
|
|
+ ipa_pm_deferred_deactivate(sys->pm_hdl);
|
|
|
ipa3_dec_release_wakelock();
|
|
|
ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
|
|
|
GSI_CHAN_MODE_CALLBACK);
|
|
@@ -927,8 +927,8 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
|
|
|
int cnt;
|
|
|
int ret;
|
|
|
|
|
|
- ipa_pm_activate_sync(sys->pm_hdl);
|
|
|
start_poll:
|
|
|
+ ipa_pm_activate_sync(sys->pm_hdl);
|
|
|
inactive_cycles = 0;
|
|
|
do {
|
|
|
cnt = ipa3_handle_rx_core(sys, true, true);
|
|
@@ -956,7 +956,7 @@ start_poll:
|
|
|
if (ret == -GSI_STATUS_PENDING_IRQ)
|
|
|
goto start_poll;
|
|
|
|
|
|
- ipa_pm_deferred_deactivate(sys->pm_hdl);
|
|
|
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys->ep->client);
|
|
|
}
|
|
|
|
|
|
static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
|
|
@@ -1936,14 +1936,22 @@ fail_pipe_not_valid:
|
|
|
static void ipa3_wq_handle_rx(struct work_struct *work)
|
|
|
{
|
|
|
struct ipa3_sys_context *sys;
|
|
|
+ enum ipa_client_type client_type;
|
|
|
|
|
|
sys = container_of(work, struct ipa3_sys_context, work);
|
|
|
+ /*
|
|
|
+ * Mark client as WAN_COAL_CONS only as
|
|
|
+ * NAPI only use sys of WAN_COAL_CONS.
|
|
|
+ */
|
|
|
+ if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
|
|
|
+ client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
|
|
|
+ else
|
|
|
+ client_type = sys->ep->client;
|
|
|
|
|
|
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
|
|
|
if (ipa_net_initialized && sys->napi_obj) {
|
|
|
- ipa_pm_activate_sync(sys->pm_hdl);
|
|
|
napi_schedule(sys->napi_obj);
|
|
|
} else if (IPA_CLIENT_IS_LOW_LAT_CONS(sys->ep->client)) {
|
|
|
- ipa_pm_activate_sync(sys->pm_hdl);
|
|
|
tasklet_schedule(&sys->tasklet);
|
|
|
} else
|
|
|
ipa3_handle_rx(sys);
|
|
@@ -4595,17 +4603,26 @@ static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
|
|
|
void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys)
|
|
|
{
|
|
|
bool clk_off;
|
|
|
+ enum ipa_client_type client_type;
|
|
|
|
|
|
atomic_set(&sys->curr_polling_state, 1);
|
|
|
__ipa3_update_curr_poll_state(sys->ep->client, 1);
|
|
|
|
|
|
ipa3_inc_acquire_wakelock();
|
|
|
-
|
|
|
/*
|
|
|
- * pm deactivate is done in wq context
|
|
|
- * or after NAPI poll
|
|
|
+ * Mark client as WAN_COAL_CONS only as
|
|
|
+ * NAPI only use sys of WAN_COAL_CONS.
|
|
|
*/
|
|
|
- clk_off = ipa_pm_activate(sys->pm_hdl);
|
|
|
+ if (IPA_CLIENT_IS_WAN_CONS(sys->ep->client))
|
|
|
+ client_type = IPA_CLIENT_APPS_WAN_COAL_CONS;
|
|
|
+ else
|
|
|
+ client_type = sys->ep->client;
|
|
|
+ /*
|
|
|
+ * Have race condition to use PM on poll to isr
|
|
|
+ * switch. Use the active no block instead
|
|
|
+ * where we would have ref counts.
|
|
|
+ */
|
|
|
+ clk_off = IPA_ACTIVE_CLIENTS_INC_EP_NO_BLOCK(client_type);
|
|
|
if (!clk_off && ipa_net_initialized && sys->napi_obj) {
|
|
|
trace_ipa3_napi_schedule(sys->ep->client);
|
|
|
napi_schedule(sys->napi_obj);
|
|
@@ -5213,6 +5230,11 @@ int ipa3_lan_rx_poll(u32 clnt_hdl, int weight)
|
|
|
ep = &ipa3_ctx->ep[clnt_hdl];
|
|
|
|
|
|
start_poll:
|
|
|
+ /*
|
|
|
+ * it is guaranteed we already have clock here.
|
|
|
+ * This is mainly for clock scaling.
|
|
|
+ */
|
|
|
+ ipa_pm_activate(ep->sys->pm_hdl);
|
|
|
while (remain_aggr_weight > 0 &&
|
|
|
atomic_read(&ep->sys->curr_polling_state)) {
|
|
|
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
|
|
@@ -5242,7 +5264,7 @@ start_poll:
|
|
|
napi_reschedule(ep->sys->napi_obj))
|
|
|
goto start_poll;
|
|
|
|
|
|
- ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
|
|
|
+ IPA_ACTIVE_CLIENTS_DEC_EP_NO_BLOCK(ep->client);
|
|
|
}
|
|
|
|
|
|
return cnt;
|
|
@@ -5298,6 +5320,11 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
|
|
|
}
|
|
|
|
|
|
start_poll:
|
|
|
+ /*
|
|
|
+ * it is guaranteed we already have clock here.
|
|
|
+ * This is mainly for clock scaling.
|
|
|
+ */
|
|
|
+ ipa_pm_activate(ep->sys->pm_hdl);
|
|
|
while (remain_aggr_weight > 0 &&
|
|
|
atomic_read(&ep->sys->curr_polling_state)) {
|
|
|
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
|
|
@@ -5335,7 +5362,7 @@ start_poll:
|
|
|
if (ret == -GSI_STATUS_PENDING_IRQ &&
|
|
|
napi_reschedule(ep->sys->napi_obj))
|
|
|
goto start_poll;
|
|
|
- ipa_pm_deferred_deactivate(ep->sys->pm_hdl);
|
|
|
+ IPA_ACTIVE_CLIENTS_DEC_EP_NO_BLOCK(ep->client);
|
|
|
} else {
|
|
|
cnt = weight;
|
|
|
IPADBG_LOW("Client = %d not replenished free descripotrs\n",
|
|
@@ -5478,6 +5505,11 @@ static void ipa3_tasklet_rx_notify(unsigned long data)
|
|
|
sys = (struct ipa3_sys_context *)data;
|
|
|
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
|
|
|
start_poll:
|
|
|
+ /*
|
|
|
+ * it is guaranteed we already have clock here.
|
|
|
+ * This is mainly for clock scaling.
|
|
|
+ */
|
|
|
+ ipa_pm_activate(sys->pm_hdl);
|
|
|
while (1) {
|
|
|
ret = ipa_poll_gsi_pkt(sys, ¬ify);
|
|
|
if (ret)
|
|
@@ -5492,6 +5524,6 @@ start_poll:
|
|
|
ret = ipa3_rx_switch_to_intr_mode(sys);
|
|
|
if (ret == -GSI_STATUS_PENDING_IRQ)
|
|
|
goto start_poll;
|
|
|
- ipa_pm_deferred_deactivate(sys->pm_hdl);
|
|
|
+ IPA_ACTIVE_CLIENTS_DEC_EP_NO_BLOCK(sys->ep->client);
|
|
|
}
|
|
|
|