qcacmn: Monitor Status ring memory optimization

Memory optimization of monitor status ring by allocating buffers during
replenish using alloc_skb (linux API).
It creates buffer of required size rather than 4k size(dev_alloc_skb)

Change-Id: I3ae5e403de28c4570f8ac3b50d3ca878a9e4b2f9
CRs-Fixed: 2733931
这个提交包含在:
Shivani Soni
2020-07-07 09:52:55 +05:30
提交者 snandini
父节点 bd6a237677
当前提交 c4c0a179f6
修改 6 个文件,包含 71 行新增68 行删除

查看文件

@@ -11333,7 +11333,7 @@ static void dp_cfr_filter(struct cdp_soc_t *soc_hdl,
soc->rxdma_mon_status_ring[mac_id]
.hal_srng,
RXDMA_MONITOR_STATUS,
RX_DATA_BUFFER_SIZE,
RX_MON_STATUS_BUF_SIZE,
&htt_tlv_filter);
}
}

查看文件

@@ -156,7 +156,7 @@ dp_mon_ht2_rx_ring_cfg(struct dp_soc *soc,
hal_ring_hdl =
soc->rxdma_mon_status_ring[lmac_id].hal_srng;
hal_ring_type = RXDMA_MONITOR_STATUS;
ring_buf_size = RX_DATA_BUFFER_SIZE;
ring_buf_size = RX_MON_STATUS_BUF_SIZE;
break;
case DP_MON_FILTER_SRNG_TYPE_RXDMA_MON_BUF:

查看文件

@@ -3225,66 +3225,6 @@ dp_rx_pdev_buffers_free(struct dp_pdev *pdev)
dp_rx_buffer_pool_deinit(soc, mac_for_pdev);
}
/*
* dp_rx_nbuf_prepare() - prepare RX nbuf
* @soc: core txrx main context
* @pdev: core txrx pdev context
*
* This function alloc & map nbuf for RX dma usage, retry it if failed
* until retry times reaches max threshold or succeeded.
*
* Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
*/
qdf_nbuf_t
dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
{
uint8_t *buf;
int32_t nbuf_retry_count;
QDF_STATUS ret;
qdf_nbuf_t nbuf = NULL;
for (nbuf_retry_count = 0; nbuf_retry_count <
QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
nbuf_retry_count++) {
/* Allocate a new skb */
nbuf = qdf_nbuf_alloc(soc->osdev,
RX_DATA_BUFFER_SIZE,
RX_BUFFER_RESERVATION,
RX_DATA_BUFFER_ALIGNMENT,
FALSE);
if (!nbuf) {
DP_STATS_INC(pdev,
replenish.nbuf_alloc_fail, 1);
continue;
}
buf = qdf_nbuf_data(nbuf);
memset(buf, 0, RX_DATA_BUFFER_SIZE);
ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
QDF_DMA_FROM_DEVICE,
RX_DATA_BUFFER_SIZE);
/* nbuf map failed */
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
qdf_nbuf_free(nbuf);
DP_STATS_INC(pdev, replenish.map_err, 1);
continue;
}
/* qdf_nbuf alloc and map succeeded */
break;
}
/* qdf_nbuf still alloc or map failed */
if (qdf_unlikely(nbuf_retry_count >=
QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
return NULL;
return nbuf;
}
#ifdef DP_RX_SPECIAL_FRAME_NEED
bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_peer *peer,
qdf_nbuf_t nbuf, uint32_t frame_mask,

查看文件

@@ -1256,9 +1256,6 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
struct dp_peer *peer);
qdf_nbuf_t
dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
/*
* dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
*

查看文件

@@ -1640,7 +1640,7 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
if ((rx_tlv - rx_tlv_start) >=
RX_DATA_BUFFER_SIZE)
RX_MON_STATUS_BUF_SIZE)
break;
} while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
@@ -1732,6 +1732,63 @@ dp_rx_mon_status_process_tlv(struct dp_soc *soc, struct dp_intr *int_ctx,
return;
}
/*
* dp_rx_nbuf_prepare() - prepare RX nbuf
* @soc: core txrx main context
* @pdev: core txrx pdev context
*
* This function alloc & map nbuf for RX dma usage, retry it if failed
* until retry times reaches max threshold or succeeded.
*
* Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
*/
static inline qdf_nbuf_t
dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
{
uint8_t *buf;
int32_t nbuf_retry_count;
QDF_STATUS ret;
qdf_nbuf_t nbuf = NULL;
for (nbuf_retry_count = 0; nbuf_retry_count <
QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
nbuf_retry_count++) {
/* Allocate a new skb using alloc_skb */
nbuf = qdf_nbuf_alloc_no_recycler(RX_MON_STATUS_BUF_SIZE,
RX_BUFFER_RESERVATION,
RX_DATA_BUFFER_ALIGNMENT);
if (!nbuf) {
DP_STATS_INC(pdev, replenish.nbuf_alloc_fail, 1);
continue;
}
buf = qdf_nbuf_data(nbuf);
memset(buf, 0, RX_MON_STATUS_BUF_SIZE);
ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
QDF_DMA_FROM_DEVICE,
RX_MON_STATUS_BUF_SIZE);
/* nbuf map failed */
if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
qdf_nbuf_free(nbuf);
DP_STATS_INC(pdev, replenish.map_err, 1);
continue;
}
/* qdf_nbuf alloc and map succeeded */
break;
}
/* qdf_nbuf still alloc or map failed */
if (qdf_unlikely(nbuf_retry_count >=
QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
return NULL;
return nbuf;
}
/*
* dp_rx_mon_status_srng_process() - Process monitor status ring
* post the status ring buffer to Rx status Queue for later
@@ -1844,7 +1901,8 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, struct dp_intr *int_ctx,
*/
break;
}
qdf_nbuf_set_pktlen(status_nbuf, RX_DATA_BUFFER_SIZE);
qdf_nbuf_set_pktlen(status_nbuf,
RX_MON_STATUS_BUF_SIZE);
qdf_nbuf_unmap_nbytes_single(soc->osdev, status_nbuf,
QDF_DMA_FROM_DEVICE,
@@ -2027,7 +2085,7 @@ dp_rx_pdev_mon_status_desc_pool_init(struct dp_pdev *pdev, uint32_t mac_id)
pdev_id, num_entries);
rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
rx_desc_pool->buf_size = RX_DATA_BUFFER_SIZE;
rx_desc_pool->buf_size = RX_MON_STATUS_BUF_SIZE;
rx_desc_pool->buf_alignment = RX_DATA_BUFFER_ALIGNMENT;
/* Disable frag processing flag */
dp_rx_enable_mon_dest_frag(rx_desc_pool, false);

查看文件

@@ -40,6 +40,14 @@
#define RX_MONITOR_BUFFER_SIZE 2048
#endif
/* MONITOR STATUS BUFFER SIZE = 1536 data bytes, buffer allocation of 2k bytes
* including skb shared info and buffer alignment.
*/
#define RX_MON_STATUS_BASE_BUF_SIZE 2048
#define RX_MON_STATUS_BUF_ALIGN 128
#define RX_MON_STATUS_BUF_SIZE (RX_MON_STATUS_BASE_BUF_SIZE - \
RX_MON_STATUS_BUF_ALIGN - QDF_SHINFO_SIZE)
/* HAL_RX_NON_QOS_TID = NON_QOS_TID which is 16 */
#define HAL_RX_NON_QOS_TID 16