qcacmn: buffer replenish and reap without map and unmap

use dma_inv_range call instead of dma map and unmap
APIs during replenish and reap. This complete code is
under a compile time macro.
before change: 4160Mbps @ 96.5% (core 3)
after change: 4160Mbps @ 90.5% (core 3)

Change-Id: I61bca349a369ace06cd86f353880108cee013cb1
This commit is contained in:
Tallapragada Kalyan
2022-01-06 18:48:33 +05:30
committed by Madan Koyyalamudi
parent 03f7a9accb
commit 9639c91317
10 changed files with 716 additions and 35 deletions

View File

@@ -2605,6 +2605,7 @@ struct cdp_pdev_stats {
/* Rx errors */
struct {
uint32_t desc_alloc_fail;
uint32_t desc_lt_alloc_fail;
uint32_t ip_csum_err;
uint32_t tcp_udp_csum_err;
uint32_t rxdma_error;

View File

@@ -2410,7 +2410,9 @@ static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
struct dp_srng *rx_refill_buf_ring;
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev];
if (wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
rx_refill_buf_ring =
&soc->rx_refill_buf_ring[mac_for_pdev];
@@ -2419,11 +2421,12 @@ static int dp_process_lmac_rings(struct dp_intr *int_ctx, int total_budget)
&soc->rx_refill_buf_ring[pdev->lmac_id];
intr_stats->num_host2rxdma_ring_masks++;
DP_STATS_INC(pdev, replenish.low_thresh_intrs, 1);
dp_rx_buffers_replenish(soc, mac_for_pdev,
rx_refill_buf_ring,
&soc->rx_desc_buf[mac_for_pdev],
0, &desc_list, &tail);
dp_rx_buffers_lt_replenish_simple(soc, mac_for_pdev,
rx_refill_buf_ring,
rx_desc_pool,
0,
&desc_list,
&tail);
}
}

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2022 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -267,6 +267,300 @@ dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
return QDF_STATUS_SUCCESS;
}
#if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
QDF_STATUS
__dp_rx_buffers_no_map_lt_replenish(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool)
{
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
uint32_t count;
void *rxdma_ring_entry;
union dp_rx_desc_list_elem_t *next = NULL;
void *rxdma_srng;
qdf_nbuf_t nbuf;
qdf_dma_addr_t paddr;
uint16_t num_entries_avail = 0;
uint16_t num_alloc_desc = 0;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
int sync_hw_ptr = 0;
rxdma_srng = dp_rxdma_srng->hal_srng;
if (qdf_unlikely(!dp_pdev)) {
dp_rx_err("%pK: pdev is null for mac_id = %d", soc, mac_id);
return QDF_STATUS_E_FAILURE;
}
if (qdf_unlikely(!rxdma_srng)) {
dp_rx_debug("%pK: rxdma srng not initialized", soc);
return QDF_STATUS_E_FAILURE;
}
hal_srng_access_start(soc->hal_soc, rxdma_srng);
num_entries_avail = hal_srng_src_num_avail(soc->hal_soc,
rxdma_srng,
sync_hw_ptr);
dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
soc, num_entries_avail);
if (qdf_unlikely(num_entries_avail <
((dp_rxdma_srng->num_entries * 3) / 4))) {
hal_srng_access_end(soc->hal_soc, rxdma_srng);
return QDF_STATUS_E_FAILURE;
}
DP_STATS_INC(dp_pdev, replenish.low_thresh_intrs, 1);
num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
rx_desc_pool,
num_entries_avail,
&desc_list,
&tail);
if (!num_alloc_desc) {
dp_rx_err("%pK: no free rx_descs in freelist", soc);
DP_STATS_INC(dp_pdev, err.desc_lt_alloc_fail,
num_entries_avail);
hal_srng_access_end(soc->hal_soc, rxdma_srng);
return QDF_STATUS_E_NOMEM;
}
for (count = 0; count < num_alloc_desc; count++) {
next = desc_list->next;
qdf_prefetch(next);
nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
if (qdf_unlikely(!nbuf)) {
DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
break;
}
paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
rx_desc_pool->buf_size);
rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc,
rxdma_srng);
qdf_assert_always(rxdma_ring_entry);
desc_list->rx_desc.nbuf = nbuf;
desc_list->rx_desc.rx_buf_start = nbuf->data;
desc_list->rx_desc.unmapped = 0;
/* rx_desc.in_use should be zero at this time*/
qdf_assert_always(desc_list->rx_desc.in_use == 0);
desc_list->rx_desc.in_use = 1;
desc_list->rx_desc.in_err_state = 0;
hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
paddr,
desc_list->rx_desc.cookie,
rx_desc_pool->owner);
desc_list = next;
}
qdf_dsb();
hal_srng_access_end(soc->hal_soc, rxdma_srng);
/* No need to count the number of bytes received during replenish.
* Therefore set replenish.pkts.bytes as 0.
*/
DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
DP_STATS_INC(dp_pdev, buf_freelist, (num_alloc_desc - count));
/*
* add any available free desc back to the free list
*/
if (desc_list)
dp_rx_add_desc_list_to_free_list(soc, &desc_list, &tail,
mac_id, rx_desc_pool);
return QDF_STATUS_SUCCESS;
}
QDF_STATUS
__dp_rx_buffers_no_map_replenish(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail)
{
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
uint32_t count;
void *rxdma_ring_entry;
union dp_rx_desc_list_elem_t *next;
void *rxdma_srng;
qdf_nbuf_t nbuf;
qdf_dma_addr_t paddr;
rxdma_srng = dp_rxdma_srng->hal_srng;
if (qdf_unlikely(!dp_pdev)) {
dp_rx_err("%pK: pdev is null for mac_id = %d",
soc, mac_id);
return QDF_STATUS_E_FAILURE;
}
if (qdf_unlikely(!rxdma_srng)) {
dp_rx_debug("%pK: rxdma srng not initialized", soc);
DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
return QDF_STATUS_E_FAILURE;
}
dp_rx_debug("%pK: requested %d buffers for replenish",
soc, num_req_buffers);
hal_srng_access_start(soc->hal_soc, rxdma_srng);
for (count = 0; count < num_req_buffers; count++) {
next = (*desc_list)->next;
qdf_prefetch(next);
nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
if (qdf_unlikely(!nbuf)) {
DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
break;
}
paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
rx_desc_pool->buf_size);
rxdma_ring_entry = (struct dp_buffer_addr_info *)
hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
if (!rxdma_ring_entry)
break;
qdf_assert_always(rxdma_ring_entry);
(*desc_list)->rx_desc.nbuf = nbuf;
(*desc_list)->rx_desc.rx_buf_start = nbuf->data;
(*desc_list)->rx_desc.unmapped = 0;
/* rx_desc.in_use should be zero at this time*/
qdf_assert_always((*desc_list)->rx_desc.in_use == 0);
(*desc_list)->rx_desc.in_use = 1;
(*desc_list)->rx_desc.in_err_state = 0;
hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
paddr,
(*desc_list)->rx_desc.cookie,
rx_desc_pool->owner);
*desc_list = next;
}
qdf_dsb();
hal_srng_access_end(soc->hal_soc, rxdma_srng);
/* No need to count the number of bytes received during replenish.
* Therefore set replenish.pkts.bytes as 0.
*/
DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
DP_STATS_INC(dp_pdev, buf_freelist, (num_req_buffers - count));
/*
* add any available free desc back to the free list
*/
if (*desc_list)
dp_rx_add_desc_list_to_free_list(soc, desc_list, tail,
mac_id, rx_desc_pool);
return QDF_STATUS_SUCCESS;
}
QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers)
{
struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
uint32_t count;
uint32_t nr_descs = 0;
void *rxdma_ring_entry;
union dp_rx_desc_list_elem_t *next;
void *rxdma_srng;
qdf_nbuf_t nbuf;
qdf_dma_addr_t paddr;
union dp_rx_desc_list_elem_t *desc_list = NULL;
union dp_rx_desc_list_elem_t *tail = NULL;
rxdma_srng = dp_rxdma_srng->hal_srng;
if (qdf_unlikely(!dp_pdev)) {
dp_rx_err("%pK: pdev is null for mac_id = %d",
soc, mac_id);
return QDF_STATUS_E_FAILURE;
}
if (qdf_unlikely(!rxdma_srng)) {
dp_rx_debug("%pK: rxdma srng not initialized", soc);
DP_STATS_INC(dp_pdev, replenish.rxdma_err, num_req_buffers);
return QDF_STATUS_E_FAILURE;
}
dp_rx_debug("%pK: requested %d buffers for replenish",
soc, num_req_buffers);
nr_descs = dp_rx_get_free_desc_list(soc, mac_id, rx_desc_pool,
num_req_buffers, &desc_list, &tail);
if (!nr_descs) {
dp_err("no free rx_descs in freelist");
DP_STATS_INC(dp_pdev, err.desc_alloc_fail, num_req_buffers);
return QDF_STATUS_E_NOMEM;
}
dp_debug("got %u RX descs for driver attach", nr_descs);
hal_srng_access_start(soc->hal_soc, rxdma_srng);
for (count = 0; count < nr_descs; count++) {
next = desc_list->next;
qdf_prefetch(next);
nbuf = dp_rx_nbuf_alloc(soc, rx_desc_pool);
if (qdf_unlikely(!nbuf)) {
DP_STATS_INC(dp_pdev, replenish.nbuf_alloc_fail, 1);
break;
}
paddr = dp_rx_nbuf_sync_no_dsb(soc, nbuf,
rx_desc_pool->buf_size);
rxdma_ring_entry = (struct dp_buffer_addr_info *)
hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
if (!rxdma_ring_entry)
break;
qdf_assert_always(rxdma_ring_entry);
desc_list->rx_desc.nbuf = nbuf;
desc_list->rx_desc.rx_buf_start = nbuf->data;
desc_list->rx_desc.unmapped = 0;
/* rx_desc.in_use should be zero at this time*/
qdf_assert_always(desc_list->rx_desc.in_use == 0);
desc_list->rx_desc.in_use = 1;
desc_list->rx_desc.in_err_state = 0;
hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry,
paddr,
desc_list->rx_desc.cookie,
rx_desc_pool->owner);
desc_list = next;
}
qdf_dsb();
hal_srng_access_end(soc->hal_soc, rxdma_srng);
/* No need to count the number of bytes received during replenish.
* Therefore set replenish.pkts.bytes as 0.
*/
DP_STATS_INC_PKT(dp_pdev, replenish.pkts, count, 0);
return QDF_STATUS_SUCCESS;
}
#endif
/*
* dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
* called during dp rx initialization
@@ -2510,8 +2804,10 @@ dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev)
*/
dp_rx_buffer_pool_init(soc, mac_for_pdev);
return dp_pdev_rx_buffers_attach(soc, mac_for_pdev, dp_rxdma_srng,
rx_desc_pool, rxdma_entries - 1);
return dp_pdev_rx_buffers_attach_simple(soc, mac_for_pdev,
dp_rxdma_srng,
rx_desc_pool,
rxdma_entries - 1);
}
/*

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2022 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -24,6 +24,7 @@
#include "dp_peer.h"
#include "dp_internal.h"
#include <qdf_tracepoint.h>
#include "dp_ipa.h"
#ifdef RXDMA_OPTIMIZATION
#ifndef RX_DATA_BUFFER_ALIGNMENT
@@ -1359,6 +1360,69 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail,
const char *func_name);
/*
* __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
* use direct APIs to get invalidate
* and get the physical address of the
* nbuf instead of map api,called during
* dp rx initialization and at the end
* of dp_rx_process.
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @dp_rxdma_srng: dp rxdma circular ring
* @rx_desc_pool: Pointer to free Rx descriptor pool
* @num_req_buffers: number of buffer to be replenished
* @desc_list: list of descs if called from dp_rx_process
* or NULL during dp rx initialization or out of buffer
* interrupt.
* @tail: tail of descs list
* Return: return success or failure
*/
QDF_STATUS
__dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail);
/*
* __dp_rx_buffers_no_map__lt_replenish() - replenish rxdma ring with rx nbufs
* use direct APIs to get invalidate
* and get the physical address of the
* nbuf instead of map api,called when
* low threshold interrupt is triggered
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @dp_rxdma_srng: dp rxdma circular ring
* @rx_desc_pool: Pointer to free Rx descriptor pool
* Return: return success or failure
*/
QDF_STATUS
__dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool);
/*
* __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs
* use direct APIs to get invalidate
* and get the physical address of the
* nbuf instead of map api,called during
* dp rx initialization.
*
* @soc: core txrx main context
* @mac_id: mac_id which is one of 3 mac_ids
* @dp_rxdma_srng: dp rxdma circular ring
* @rx_desc_pool: Pointer to free Rx descriptor pool
* @num_req_buffers: number of buffer to be replenished
* Return: return success or failure
*/
QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc,
uint32_t mac_id,
struct dp_srng *dp_rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers);
/*
* dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs
@@ -2091,4 +2155,191 @@ bool dp_rx_pkt_tracepoints_enabled(void)
qdf_trace_dp_rx_udp_pkt_enabled() ||
qdf_trace_dp_rx_pkt_enabled());
}
#if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
static inline
QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers)
{
return __dp_pdev_rx_buffers_no_map_attach(soc, mac_id,
rxdma_srng,
rx_desc_pool,
num_req_buffers);
}
static inline
void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail)
{
__dp_rx_buffers_no_map_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
num_req_buffers, desc_list, tail);
}
static inline
void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail)
{
__dp_rx_buffers_no_map_lt_replenish(soc, mac_id, rxdma_srng,
rx_desc_pool);
}
static inline
qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
qdf_nbuf_t nbuf,
uint32_t buf_size)
{
qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
(void *)(nbuf->data + buf_size));
return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
}
static inline
qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
qdf_nbuf_t nbuf,
uint32_t buf_size)
{
qdf_nbuf_dma_inv_range((void *)nbuf->data,
(void *)(nbuf->data + buf_size));
return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
}
#if !defined(SPECULATIVE_READ_DISABLED)
static inline
void dp_rx_nbuf_unmap(struct dp_soc *soc,
struct dp_rx_desc *rx_desc,
uint8_t reo_ring_num)
{
struct rx_desc_pool *rx_desc_pool;
qdf_nbuf_t nbuf;
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
nbuf = rx_desc->nbuf;
qdf_nbuf_dma_inv_range((void *)nbuf->data,
(void *)(nbuf->data + rx_desc_pool->buf_size));
}
#else
static inline
void dp_rx_nbuf_unmap(struct dp_soc *soc,
struct dp_rx_desc *rx_desc,
uint8_t reo_ring_num)
{
}
#endif
static inline
void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
uint32_t bufs_reaped)
{
}
static inline
qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
return qdf_nbuf_alloc_simple(soc->osdev, rx_desc_pool->buf_size);
}
#else
static inline
QDF_STATUS dp_pdev_rx_buffers_attach_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers)
{
return dp_pdev_rx_buffers_attach(soc, mac_id,
rxdma_srng,
rx_desc_pool,
num_req_buffers);
}
static inline
void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail)
{
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
num_req_buffers, desc_list, tail);
}
static inline
void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
struct dp_srng *rxdma_srng,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers,
union dp_rx_desc_list_elem_t **desc_list,
union dp_rx_desc_list_elem_t **tail)
{
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
num_req_buffers, desc_list, tail);
}
static inline
qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
qdf_nbuf_t nbuf,
uint32_t buf_size)
{
return (qdf_dma_addr_t)NULL;
}
static inline
qdf_dma_addr_t dp_rx_nbuf_sync(struct dp_soc *dp_soc,
qdf_nbuf_t nbuf,
uint32_t buf_size)
{
return (qdf_dma_addr_t)NULL;
}
static inline
void dp_rx_nbuf_unmap(struct dp_soc *soc,
struct dp_rx_desc *rx_desc,
uint8_t reo_ring_num)
{
struct rx_desc_pool *rx_desc_pool;
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
rx_desc_pool->buf_size,
false);
qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
}
static inline
void dp_rx_per_core_stats_update(struct dp_soc *soc, uint8_t ring_id,
uint32_t bufs_reaped)
{
DP_STATS_INC(soc,
rx.ring_packets[smp_processor_id()][ring_id], bufs_reaped);
}
static inline
qdf_nbuf_t dp_rx_nbuf_alloc(struct dp_soc *soc,
struct rx_desc_pool *rx_desc_pool)
{
return qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
RX_BUFFER_RESERVATION,
rx_desc_pool->buf_alignment, FALSE);
}
#endif
#endif /* _DP_RX_H */

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -504,16 +504,8 @@ more_data:
* move unmap after scattered msdu waiting break logic
* in case double skb unmap happened.
*/
rx_desc_pool = &soc->rx_desc_buf[rx_desc->pool_id];
dp_ipa_reo_ctx_buf_mapping_lock(soc, reo_ring_num);
dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
rx_desc_pool->buf_size,
false);
qdf_nbuf_unmap_nbytes_single(soc->osdev, rx_desc->nbuf,
QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);
dp_rx_nbuf_unmap(soc, rx_desc, reo_ring_num);
rx_desc->unmapped = 1;
dp_ipa_reo_ctx_buf_mapping_unlock(soc, reo_ring_num);
DP_RX_PROCESS_NBUF(soc, nbuf_head, nbuf_tail, ebuf_head,
ebuf_tail, rx_desc);
/*
@@ -547,9 +539,7 @@ more_data:
done:
dp_rx_srng_access_end(int_ctx, soc, hal_ring_hdl);
DP_STATS_INCC(soc,
rx.ring_packets[qdf_get_smp_processor_id()][reo_ring_num],
num_rx_bufs_reaped, num_rx_bufs_reaped);
dp_rx_per_core_stats_update(soc, reo_ring_num, num_rx_bufs_reaped);
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
/*
@@ -563,9 +553,10 @@ done:
rx_desc_pool = &soc->rx_desc_buf[mac_id];
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
rx_desc_pool, rx_bufs_reaped[mac_id],
&head[mac_id], &tail[mac_id]);
dp_rx_buffers_replenish_simple(soc, mac_id, dp_rxdma_srng,
rx_desc_pool,
rx_bufs_reaped[mac_id],
&head[mac_id], &tail[mac_id]);
}
dp_verbose_debug("replenished %u\n", rx_bufs_reaped[0]);
@@ -849,9 +840,6 @@ done:
if (qdf_likely(vdev->rx_decap_type ==
htt_cmn_pkt_type_ethernet) &&
qdf_likely(!vdev->mesh_vdev)) {
/* WDS Destination Address Learning */
dp_rx_da_learn(soc, rx_tlv_hdr, peer, nbuf);
/* Due to HW issue, sometimes we see that the sa_idx
* and da_idx are invalid with sa_valid and da_valid
* bits set

View File

@@ -1094,6 +1094,40 @@ qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
__qdf_nbuf_dma_inv_range(buf_start, buf_end);
}
/**
* qdf_nbuf_dma_inv_range_no_dsb() - barrierless Invalidate the specified
* virtual address range
* @buf_start: start address
* @buf_end: end address
*
* Return: none
*/
static inline void
qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
{
__qdf_nbuf_dma_inv_range_no_dsb(buf_start, buf_end);
}
/**
* qdf_nbuf_dma_clean_range_no_dsb() - barrierless clean the specified
* virtual address range
* @buf_start: start address
* @buf_end: end address
*
* Return: none
*/
static inline void
qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
{
__qdf_nbuf_dma_clean_range_no_dsb(buf_start, buf_end);
}
static inline void
qdf_dsb(void)
{
__qdf_dsb();
}
static inline int qdf_nbuf_get_num_frags(qdf_nbuf_t buf)
{
return __qdf_nbuf_get_num_frags(buf);
@@ -1663,6 +1697,9 @@ void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf);
/* nbuf allocation rouines */
#define qdf_nbuf_alloc_simple(d, s) \
__qdf_nbuf_alloc_simple(d, s)
#define qdf_nbuf_alloc(d, s, r, a, p) \
qdf_nbuf_alloc_debug(d, s, r, a, p, __func__, __LINE__)

View File

@@ -784,6 +784,8 @@ __qdf_nbuf_t
__qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve, int align,
int prio, const char *func, uint32_t line);
__qdf_nbuf_t __qdf_nbuf_alloc_simple(__qdf_device_t osdev, size_t size);
/**
* __qdf_nbuf_alloc_no_recycler() - Allocates skb
* @size: Size to be allocated for skb

View File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2014-2022 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -203,4 +204,12 @@ qdf_nbuf_deinit_replenish_timer(void)
static inline void
__qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end) {}
static inline void
__qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end) {}
static inline void
__qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end) {}
static inline void
__qdf_dsb(void) {}
#endif /*_I_QDF_NBUF_M_H */

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2022 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -161,6 +161,35 @@ __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
{
dmac_inv_range(buf_start, buf_end);
}
static inline void
__qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
dmac_inv_range_no_dsb(buf_start, buf_end);
#else
dmac_inv_range(buf_start, buf_end);
#endif
}
static inline void
__qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
dmac_clean_range_no_dsb(buf_start, buf_end);
#else
dmac_clean_range(buf_start, buf_end);
#endif
}
static inline void
__qdf_dsb(void)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 89)
dsb(st);
#endif
}
#elif defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
static inline void
__qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
@@ -168,10 +197,44 @@ __qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
dma_cache_inv((unsigned long)buf_start,
(unsigned long)(buf_end - buf_start));
}
static inline void
__qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
{
dma_cache_inv((unsigned long)buf_start,
(unsigned long)(buf_end - buf_start));
}
static inline void
__qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
{
dmac_cache_wback((unsigned long)buf_start,
(unsigned long)(buf_end - buf_start));
}
static inline void
__qdf_dsb(void)
{
}
#else
static inline void
__qdf_nbuf_dma_inv_range(const void *buf_start, const void *buf_end)
{
}
static inline void
__qdf_nbuf_dma_inv_range_no_dsb(const void *buf_start, const void *buf_end)
{
}
static inline void
__qdf_nbuf_dma_clean_range_no_dsb(const void *buf_start, const void *buf_end)
{
}
static inline void
__qdf_dsb(void)
{
}
#endif
#endif /*_I_QDF_NBUF_W_H */

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2022 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -595,6 +595,37 @@ skb_alloc:
return skb;
}
#else
struct sk_buff *__qdf_nbuf_alloc_simple(qdf_device_t osdev, size_t size)
{
struct sk_buff *skb;
int flags = GFP_KERNEL;
if (in_interrupt() || irqs_disabled() || in_atomic()) {
flags = GFP_ATOMIC;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
/*
* Observed that kcompactd burns out CPU to make order-3 page.
*__netdev_alloc_skb has 4k page fallback option just in case of
* failing high order page allocation so we don't need to be
* hard. Make kcompactd rest in piece.
*/
flags = flags & ~__GFP_KSWAPD_RECLAIM;
#endif
}
skb = __netdev_alloc_skb(NULL, size, flags);
if (skb)
qdf_nbuf_count_inc(skb);
else
return NULL;
return skb;
}
qdf_export_symbol(__qdf_nbuf_alloc_simple);
struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
int align, int prio, const char *func,
uint32_t line)