|
@@ -143,6 +143,7 @@ static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
|
|
|
static unsigned long tag_to_pointer_wa(uint64_t tag);
|
|
|
static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
|
|
|
static void ipa3_tasklet_rx_notify(unsigned long data);
|
|
|
+static void ipa3_tasklet_find_freepage(unsigned long data);
|
|
|
static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
|
|
|
static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget);
|
|
|
|
|
@@ -1194,6 +1195,60 @@ fail_setup:
|
|
|
return result;
|
|
|
}
|
|
|
|
|
|
+static void ipa3_schd_freepage_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct delayed_work *dwork;
|
|
|
+ struct ipa3_sys_context *sys;
|
|
|
+
|
|
|
+ dwork = container_of(work, struct delayed_work, work);
|
|
|
+ sys = container_of(dwork, struct ipa3_sys_context, freepage_work);
|
|
|
+
|
|
|
+ IPADBG_LOW("WQ scheduled, reschedule sort tasklet\n");
|
|
|
+
|
|
|
+ tasklet_schedule(&sys->tasklet_find_freepage);
|
|
|
+}
|
|
|
+
|
|
|
+static void ipa3_tasklet_find_freepage(unsigned long data)
|
|
|
+{
|
|
|
+ struct ipa3_sys_context *sys;
|
|
|
+ struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
|
|
|
+ struct ipa3_rx_pkt_wrapper *tmp = NULL;
|
|
|
+ struct page *cur_page;
|
|
|
+ int found_free_page = 0;
|
|
|
+ struct list_head temp_head;
|
|
|
+
|
|
|
+ sys = (struct ipa3_sys_context *)data;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&temp_head);
|
|
|
+ spin_lock_bh(&sys->common_sys->spinlock);
|
|
|
+ list_for_each_entry_safe(rx_pkt, tmp,
|
|
|
+ &sys->page_recycle_repl->page_repl_head, link) {
|
|
|
+ cur_page = rx_pkt->page_data.page;
|
|
|
+ if (page_ref_count(cur_page) == 1) {
|
|
|
+ /* Found a free page. */
|
|
|
+ list_del_init(&rx_pkt->link);
|
|
|
+ list_add(&rx_pkt->link, &temp_head);
|
|
|
+ found_free_page++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (!found_free_page) {
|
|
|
+ /*Not found free page rescheduling tasklet after 2msec*/
|
|
|
+ IPADBG_LOW("Scheduling WQ not found free pages\n");
|
|
|
+ ++ipa3_ctx->stats.num_of_times_wq_reschd;
|
|
|
+ queue_delayed_work(sys->freepage_wq,
|
|
|
+ &sys->freepage_work,
|
|
|
+ msecs_to_jiffies(ipa3_ctx->page_wq_reschd_time));
|
|
|
+ } else {
|
|
|
+ /*Allow to use pre-allocated buffers*/
|
|
|
+ list_splice(&temp_head, &sys->page_recycle_repl->page_repl_head);
|
|
|
+ ipa3_ctx->stats.page_recycle_cnt_in_tasklet += found_free_page;
|
|
|
+ IPADBG_LOW("found free pages count = %d\n", found_free_page);
|
|
|
+ atomic_set(&sys->common_sys->page_avilable, 1);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&sys->common_sys->spinlock);
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
|
|
|
* IPA EP configuration
|
|
@@ -1286,6 +1341,9 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
|
|
|
goto fail_wq2;
|
|
|
}
|
|
|
|
|
|
+ snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipafreepagewq%d",
|
|
|
+ sys_in->client);
|
|
|
+
|
|
|
INIT_LIST_HEAD(&ep->sys->head_desc_list);
|
|
|
INIT_LIST_HEAD(&ep->sys->rcycl_list);
|
|
|
INIT_LIST_HEAD(&ep->sys->avail_tx_wrapper_list);
|
|
@@ -1302,6 +1360,16 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
|
|
|
/* Use coalescing pipe PM handle for default pipe also*/
|
|
|
ep->sys->pm_hdl = ipa3_ctx->ep[coal_ep_id].sys->pm_hdl;
|
|
|
} else if (IPA_CLIENT_IS_CONS(sys_in->client)) {
|
|
|
+ ep->sys->freepage_wq = alloc_workqueue(buff,
|
|
|
+ WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS |
|
|
|
+ WQ_HIGHPRI, 1);
|
|
|
+ if (!ep->sys->freepage_wq) {
|
|
|
+ IPAERR("failed to create freepage wq for client %d\n",
|
|
|
+ sys_in->client);
|
|
|
+ result = -EFAULT;
|
|
|
+ goto fail_wq3;
|
|
|
+ }
|
|
|
+
|
|
|
pm_reg.name = ipa_clients_strings[sys_in->client];
|
|
|
pm_reg.callback = ipa_pm_sys_pipe_cb;
|
|
|
pm_reg.user_data = ep->sys;
|
|
@@ -1430,6 +1498,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
|
|
|
}
|
|
|
|
|
|
*clnt_hdl = ipa_ep_idx;
|
|
|
+ ep->sys->common_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
|
|
|
|
|
|
if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
|
|
|
ep->sys->repl = kzalloc(sizeof(*ep->sys->repl), GFP_KERNEL);
|
|
@@ -1505,6 +1574,10 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
|
|
|
atomic_set(&ep->sys->repl->head_idx, 0);
|
|
|
atomic_set(&ep->sys->repl->tail_idx, 0);
|
|
|
|
|
|
+ tasklet_init(&ep->sys->tasklet_find_freepage,
|
|
|
+ ipa3_tasklet_find_freepage, (unsigned long) ep->sys);
|
|
|
+ INIT_DELAYED_WORK(&ep->sys->freepage_work, ipa3_schd_freepage_work);
|
|
|
+ ep->sys->napi_sort_page_thrshld_cnt = 0;
|
|
|
ipa3_replenish_rx_page_cache(ep->sys);
|
|
|
ipa3_wq_page_repl(&ep->sys->repl_work);
|
|
|
} else {
|
|
@@ -1615,6 +1688,8 @@ fail_napi:
|
|
|
fail_gen2:
|
|
|
ipa_pm_deregister(ep->sys->pm_hdl);
|
|
|
fail_pm:
|
|
|
+ destroy_workqueue(ep->sys->freepage_wq);
|
|
|
+fail_wq3:
|
|
|
destroy_workqueue(ep->sys->repl_wq);
|
|
|
fail_wq2:
|
|
|
destroy_workqueue(ep->sys->wq);
|
|
@@ -2377,6 +2452,7 @@ static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
|
|
|
list_add_tail(&rx_pkt->link,
|
|
|
&sys->page_recycle_repl->page_repl_head);
|
|
|
}
|
|
|
+ atomic_set(&sys->common_sys->page_avilable, 1);
|
|
|
|
|
|
return;
|
|
|
|
|
@@ -2457,6 +2533,7 @@ static struct ipa3_rx_pkt_wrapper * ipa3_get_free_page
|
|
|
int i = 0;
|
|
|
u8 LOOP_THRESHOLD = ipa3_ctx->page_poll_threshold;
|
|
|
|
|
|
+ spin_lock_bh(&sys->common_sys->spinlock);
|
|
|
list_for_each_entry_safe(rx_pkt, tmp,
|
|
|
&sys->page_recycle_repl->page_repl_head, link) {
|
|
|
if (i == LOOP_THRESHOLD)
|
|
@@ -2467,10 +2544,23 @@ static struct ipa3_rx_pkt_wrapper * ipa3_get_free_page
|
|
|
page_ref_inc(cur_page);
|
|
|
list_del_init(&rx_pkt->link);
|
|
|
++ipa3_ctx->stats.page_recycle_cnt[stats_i][i];
|
|
|
+ sys->common_sys->napi_sort_page_thrshld_cnt = 0;
|
|
|
+ spin_unlock_bh(&sys->common_sys->spinlock);
|
|
|
return rx_pkt;
|
|
|
}
|
|
|
i++;
|
|
|
}
|
|
|
+ spin_unlock_bh(&sys->common_sys->spinlock);
|
|
|
+ IPADBG_LOW("napi_sort_page_thrshld_cnt = %d ipa_max_napi_sort_page_thrshld = %d\n",
|
|
|
+ sys->common_sys->napi_sort_page_thrshld_cnt,
|
|
|
+ ipa3_ctx->ipa_max_napi_sort_page_thrshld);
|
|
|
+ /*Scheduling tasklet to find the free page*/
|
|
|
+ if (sys->common_sys->napi_sort_page_thrshld_cnt >=
|
|
|
+ ipa3_ctx->ipa_max_napi_sort_page_thrshld) {
|
|
|
+ atomic_set(&sys->common_sys->page_avilable, 0);
|
|
|
+ tasklet_schedule(&sys->common_sys->tasklet_find_freepage);
|
|
|
+ ++ipa3_ctx->stats.num_sort_tasklet_sched[stats_i];
|
|
|
+ }
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
@@ -2543,7 +2633,8 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
|
|
|
|
|
|
while (rx_len_cached < sys->rx_pool_sz) {
|
|
|
/* check for an idle page that can be used */
|
|
|
- if ((rx_pkt = ipa3_get_free_page(sys,stats_i)) != NULL) {
|
|
|
+ if (atomic_read(&sys->common_sys->page_avilable) &&
|
|
|
+ ((rx_pkt = ipa3_get_free_page(sys,stats_i)) != NULL)) {
|
|
|
ipa3_ctx->stats.page_recycle_stats[stats_i].page_recycled++;
|
|
|
|
|
|
} else {
|
|
@@ -4138,11 +4229,11 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
|
|
|
IPAERR("notify->veid > GSI_VEID_MAX\n");
|
|
|
if (!rx_page.is_tmp_alloc) {
|
|
|
init_page_count(rx_page.page);
|
|
|
- spin_lock_bh(&rx_pkt->sys->spinlock);
|
|
|
+ spin_lock_bh(&rx_pkt->sys->common_sys->spinlock);
|
|
|
/* Add the element to head. */
|
|
|
list_add(&rx_pkt->link,
|
|
|
&rx_pkt->sys->page_recycle_repl->page_repl_head);
|
|
|
- spin_unlock_bh(&rx_pkt->sys->spinlock);
|
|
|
+ spin_unlock_bh(&rx_pkt->sys->common_sys->spinlock);
|
|
|
} else {
|
|
|
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
|
|
|
rx_pkt->len, DMA_FROM_DEVICE);
|
|
@@ -4170,11 +4261,11 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
|
|
|
list_del_init(&rx_pkt->link);
|
|
|
if (!rx_page.is_tmp_alloc) {
|
|
|
init_page_count(rx_page.page);
|
|
|
- spin_lock_bh(&rx_pkt->sys->spinlock);
|
|
|
+ spin_lock_bh(&rx_pkt->sys->common_sys->spinlock);
|
|
|
/* Add the element to head. */
|
|
|
list_add(&rx_pkt->link,
|
|
|
&rx_pkt->sys->page_recycle_repl->page_repl_head);
|
|
|
- spin_unlock_bh(&rx_pkt->sys->spinlock);
|
|
|
+ spin_unlock_bh(&rx_pkt->sys->common_sys->spinlock);
|
|
|
} else {
|
|
|
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
|
|
|
rx_pkt->len, DMA_FROM_DEVICE);
|
|
@@ -4194,11 +4285,11 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
|
|
|
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
|
|
|
rx_pkt->len, DMA_FROM_DEVICE);
|
|
|
} else {
|
|
|
- spin_lock_bh(&rx_pkt->sys->spinlock);
|
|
|
+ spin_lock_bh(&rx_pkt->sys->common_sys->spinlock);
|
|
|
/* Add the element back to tail. */
|
|
|
list_add_tail(&rx_pkt->link,
|
|
|
&rx_pkt->sys->page_recycle_repl->page_repl_head);
|
|
|
- spin_unlock_bh(&rx_pkt->sys->spinlock);
|
|
|
+ spin_unlock_bh(&rx_pkt->sys->common_sys->spinlock);
|
|
|
dma_sync_single_for_cpu(ipa3_ctx->pdev,
|
|
|
rx_page.dma_addr,
|
|
|
rx_pkt->len, DMA_FROM_DEVICE);
|
|
@@ -5917,6 +6008,7 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ ep->sys->common_sys->napi_sort_page_thrshld_cnt++;
|
|
|
start_poll:
|
|
|
/*
|
|
|
* it is guaranteed we already have clock here.
|
|
@@ -6148,6 +6240,7 @@ static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ sys->napi_sort_page_thrshld_cnt++;
|
|
|
start_poll:
|
|
|
/*
|
|
|
* it is guaranteed we already have clock here.
|