msm: ipa: use lower order pages

When order 3 pages are not available, make changes to use
lower order pages to ensure buffers are provided to HW.

Change-Id: I9eea764d678820b0d3f485525310c506ea29c45e
Signed-off-by: Chaitanya Pratapa <cpratapa@quicinc.com>
This commit is contained in:
Chaitanya Pratapa
2021-10-15 10:14:30 -07:00
parent d52a00bcaf
commit d8ab3cd10e
3 changed files with 43 additions and 12 deletions

View File

@@ -1553,7 +1553,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
"lan_repl_rx_empty=%u\n" "lan_repl_rx_empty=%u\n"
"flow_enable=%u\n" "flow_enable=%u\n"
"flow_disable=%u\n" "flow_disable=%u\n"
"rx_page_drop_cnt=%u\n", "rx_page_drop_cnt=%u\n"
"lower_order=%u\n",
ipa3_ctx->stats.tx_sw_pkts, ipa3_ctx->stats.tx_sw_pkts,
ipa3_ctx->stats.tx_hw_pkts, ipa3_ctx->stats.tx_hw_pkts,
ipa3_ctx->stats.tx_non_linear, ipa3_ctx->stats.tx_non_linear,
@@ -1573,7 +1574,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
ipa3_ctx->stats.lan_repl_rx_empty, ipa3_ctx->stats.lan_repl_rx_empty,
ipa3_ctx->stats.flow_enable, ipa3_ctx->stats.flow_enable,
ipa3_ctx->stats.flow_disable, ipa3_ctx->stats.flow_disable,
ipa3_ctx->stats.rx_page_drop_cnt ipa3_ctx->stats.rx_page_drop_cnt,
ipa3_ctx->stats.lower_order
); );
cnt += nbytes; cnt += nbytes;

View File

@@ -2286,6 +2286,27 @@ fail_kmem_cache_alloc:
} }
} }
static struct page *ipa3_alloc_page(
gfp_t flag, u32 *page_order, bool try_lower)
{
struct page *page = NULL;
u32 p_order = *page_order;
page = __dev_alloc_pages(flag, p_order);
/* We will only try 1 page order lower. */
if (unlikely(!page)) {
if (try_lower && p_order > 0) {
p_order = p_order - 1;
page = __dev_alloc_pages(flag, p_order);
if (likely(page))
ipa3_ctx->stats.lower_order++;
}
}
*page_order = p_order;
return page;
}
static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page( static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
gfp_t flag, bool is_tmp_alloc, struct ipa3_sys_context *sys) gfp_t flag, bool is_tmp_alloc, struct ipa3_sys_context *sys)
{ {
@@ -2296,13 +2317,18 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
flag); flag);
if (unlikely(!rx_pkt)) if (unlikely(!rx_pkt))
return NULL; return NULL;
rx_pkt->len = PAGE_SIZE << sys->page_order;
rx_pkt->page_data.page = __dev_alloc_pages(flag, rx_pkt->page_data.page_order = sys->page_order;
sys->page_order); /* Try a lower order page for order 3 pages in case allocation fails. */
rx_pkt->page_data.page = ipa3_alloc_page(flag,
&rx_pkt->page_data.page_order,
(is_tmp_alloc && rx_pkt->page_data.page_order == 3));
if (unlikely(!rx_pkt->page_data.page)) if (unlikely(!rx_pkt->page_data.page))
goto fail_page_alloc; goto fail_page_alloc;
rx_pkt->len = PAGE_SIZE << rx_pkt->page_data.page_order;
rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
rx_pkt->page_data.page, 0, rx_pkt->page_data.page, 0,
rx_pkt->len, DMA_FROM_DEVICE); rx_pkt->len, DMA_FROM_DEVICE);
@@ -2320,7 +2346,7 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
return rx_pkt; return rx_pkt;
fail_dma_mapping: fail_dma_mapping:
__free_pages(rx_pkt->page_data.page, sys->page_order); __free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
fail_page_alloc: fail_page_alloc:
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
return NULL; return NULL;
@@ -3052,7 +3078,7 @@ static void free_rx_page(void *chan_user_data, void *xfer_user_data)
} }
dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr, dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE); rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, rx_pkt->sys->page_order); __free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt); kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
} }
@@ -3102,7 +3128,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
rx_pkt->page_data.dma_addr, rx_pkt->page_data.dma_addr,
rx_pkt->len, rx_pkt->len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, sys->page_order); __free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
} }
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
rx_pkt); rx_pkt);
@@ -3122,7 +3148,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
rx_pkt->len, rx_pkt->len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, __free_pages(rx_pkt->page_data.page,
sys->page_order); rx_pkt->page_data.page_order);
kmem_cache_free( kmem_cache_free(
ipa3_ctx->rx_pkt_wrapper_cache, ipa3_ctx->rx_pkt_wrapper_cache,
rx_pkt); rx_pkt);
@@ -3922,7 +3948,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
} else { } else {
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr, dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE); rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, sys->page_order); __free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
} }
rx_pkt->sys->free_rx_wrapper(rx_pkt); rx_pkt->sys->free_rx_wrapper(rx_pkt);
IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt); IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt);
@@ -3954,7 +3980,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
} else { } else {
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr, dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE); rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, sys->page_order); __free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
} }
rx_pkt->sys->free_rx_wrapper(rx_pkt); rx_pkt->sys->free_rx_wrapper(rx_pkt);
} }
@@ -3985,7 +4011,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
skb_shinfo(rx_skb)->nr_frags, skb_shinfo(rx_skb)->nr_frags,
rx_page.page, 0, rx_page.page, 0,
size, size,
PAGE_SIZE << sys->page_order); PAGE_SIZE << rx_page.page_order);
} }
} else { } else {
return NULL; return NULL;

View File

@@ -568,11 +568,13 @@ enum ipa_icc_type {
* @page: skb page * @page: skb page
* @dma_addr: DMA address of this Rx packet * @dma_addr: DMA address of this Rx packet
* @is_tmp_alloc: skb page from tmp_alloc or recycle_list * @is_tmp_alloc: skb page from tmp_alloc or recycle_list
* @page_order: page order associated with the page.
*/ */
struct ipa_rx_page_data { struct ipa_rx_page_data {
struct page *page; struct page *page;
dma_addr_t dma_addr; dma_addr_t dma_addr;
bool is_tmp_alloc; bool is_tmp_alloc;
u32 page_order;
}; };
struct ipa3_active_client_htable_entry { struct ipa3_active_client_htable_entry {
@@ -1545,6 +1547,7 @@ struct ipa3_stats {
u32 flow_disable; u32 flow_disable;
u32 tx_non_linear; u32 tx_non_linear;
u32 rx_page_drop_cnt; u32 rx_page_drop_cnt;
u64 lower_order;
struct ipa3_page_recycle_stats page_recycle_stats[3]; struct ipa3_page_recycle_stats page_recycle_stats[3];
u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX]; u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX];
}; };