diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 074bda0c4a..6dc6ae21c6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -3938,21 +3938,25 @@ static struct sk_buff *handle_page_completion(struct gsi_chan_xfer_notify sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) { rx_skb = alloc_skb(0, GFP_ATOMIC); if (unlikely(!rx_skb)) { - IPAERR("skb alloc failure\n"); - list_del_init(&rx_pkt->link); - if (!rx_page.is_tmp_alloc) { - init_page_count(rx_page.page); - spin_lock_bh(&rx_pkt->sys->spinlock); - /* Add the element to head. */ - list_add(&rx_pkt->link, - &rx_pkt->sys->page_recycle_repl->page_repl_head); - spin_unlock_bh(&rx_pkt->sys->spinlock); - } else { - dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr, - rx_pkt->len, DMA_FROM_DEVICE); - __free_pages(rx_pkt->page_data.page, sys->page_order); + IPAERR("skb alloc failure, free all pending pages\n"); + list_for_each_entry_safe(rx_pkt, tmp, head, link) { + rx_page = rx_pkt->page_data; + size = rx_pkt->data_len; + list_del_init(&rx_pkt->link); + if (!rx_page.is_tmp_alloc) { + init_page_count(rx_page.page); + spin_lock_bh(&rx_pkt->sys->spinlock); + /* Add the element to head. */ + list_add(&rx_pkt->link, + &rx_pkt->sys->page_recycle_repl->page_repl_head); + spin_unlock_bh(&rx_pkt->sys->spinlock); + } else { + dma_unmap_page(ipa3_ctx->pdev, rx_page.dma_addr, + rx_pkt->len, DMA_FROM_DEVICE); + __free_pages(rx_pkt->page_data.page, sys->page_order); + } + rx_pkt->sys->free_rx_wrapper(rx_pkt); } - rx_pkt->sys->free_rx_wrapper(rx_pkt); IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_page_drop_cnt); return NULL; }