Merge "msm: ipa: use lower order pages"

This commit is contained in:
qctecmdr
2021-10-28 15:10:36 -07:00
committed by Gerrit - the friendly Code Review server
melakukan ed5dc27be8
3 mengubah file dengan 43 tambahan dan 12 penghapusan

Melihat File

@@ -1586,7 +1586,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
"lan_repl_rx_empty=%u\n"
"flow_enable=%u\n"
"flow_disable=%u\n"
"rx_page_drop_cnt=%u\n",
"rx_page_drop_cnt=%u\n"
"lower_order=%u\n",
ipa3_ctx->stats.tx_sw_pkts,
ipa3_ctx->stats.tx_hw_pkts,
ipa3_ctx->stats.tx_non_linear,
@@ -1606,7 +1607,8 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
ipa3_ctx->stats.lan_repl_rx_empty,
ipa3_ctx->stats.flow_enable,
ipa3_ctx->stats.flow_disable,
ipa3_ctx->stats.rx_page_drop_cnt
ipa3_ctx->stats.rx_page_drop_cnt,
ipa3_ctx->stats.lower_order
);
cnt += nbytes;

Melihat File

@@ -2286,6 +2286,27 @@ fail_kmem_cache_alloc:
}
}
static struct page *ipa3_alloc_page(
gfp_t flag, u32 *page_order, bool try_lower)
{
struct page *page = NULL;
u32 p_order = *page_order;
page = __dev_alloc_pages(flag, p_order);
/* We will only try 1 page order lower. */
if (unlikely(!page)) {
if (try_lower && p_order > 0) {
p_order = p_order - 1;
page = __dev_alloc_pages(flag, p_order);
if (likely(page))
ipa3_ctx->stats.lower_order++;
}
}
*page_order = p_order;
return page;
}
static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
gfp_t flag, bool is_tmp_alloc, struct ipa3_sys_context *sys)
{
@@ -2296,13 +2317,18 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
flag);
if (unlikely(!rx_pkt))
return NULL;
rx_pkt->len = PAGE_SIZE << sys->page_order;
rx_pkt->page_data.page = __dev_alloc_pages(flag,
sys->page_order);
rx_pkt->page_data.page_order = sys->page_order;
/* Try a lower order page for order 3 pages in case allocation fails. */
rx_pkt->page_data.page = ipa3_alloc_page(flag,
&rx_pkt->page_data.page_order,
(is_tmp_alloc && rx_pkt->page_data.page_order == 3));
if (unlikely(!rx_pkt->page_data.page))
goto fail_page_alloc;
rx_pkt->len = PAGE_SIZE << rx_pkt->page_data.page_order;
rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
rx_pkt->page_data.page, 0,
rx_pkt->len, DMA_FROM_DEVICE);
@@ -2320,7 +2346,7 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
return rx_pkt;
fail_dma_mapping:
__free_pages(rx_pkt->page_data.page, sys->page_order);
__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
fail_page_alloc:
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
return NULL;
@@ -3052,7 +3078,7 @@ static void free_rx_page(void *chan_user_data, void *xfer_user_data)
}
dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, rx_pkt->sys->page_order);
__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
@@ -3102,7 +3128,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
rx_pkt->page_data.dma_addr,
rx_pkt->len,
DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, sys->page_order);
__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
}
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
rx_pkt);
@@ -3122,7 +3148,7 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
rx_pkt->len,
DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
sys->page_order);
rx_pkt->page_data.page_order);
kmem_cache_free(
ipa3_ctx->rx_pkt_wrapper_cache,
rx_pkt);
@@ -3922,7 +3948,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
} else {
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, sys->page_order);
__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
}
rx_pkt->sys->free_rx_wrapper(rx_pkt);
IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt);
@@ -3954,7 +3980,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
} else {
dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page, sys->page_order);
__free_pages(rx_pkt->page_data.page, rx_pkt->page_data.page_order);
}
rx_pkt->sys->free_rx_wrapper(rx_pkt);
}
@@ -3985,7 +4011,7 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify
skb_shinfo(rx_skb)->nr_frags,
rx_page.page, 0,
size,
PAGE_SIZE << sys->page_order);
PAGE_SIZE << rx_page.page_order);
}
} else {
return NULL;

Melihat File

@@ -568,11 +568,13 @@ enum ipa_icc_type {
* @page: skb page
* @dma_addr: DMA address of this Rx packet
* @is_tmp_alloc: skb page from tmp_alloc or recycle_list
* @page_order: page order associated with the page.
*/
struct ipa_rx_page_data {
struct page *page;
dma_addr_t dma_addr;
bool is_tmp_alloc;
u32 page_order;
};
struct ipa3_active_client_htable_entry {
@@ -1541,6 +1543,7 @@ struct ipa3_stats {
u32 flow_disable;
u32 tx_non_linear;
u32 rx_page_drop_cnt;
u64 lower_order;
struct ipa3_page_recycle_stats page_recycle_stats[3];
u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX];
};