qcacmn: Refactor unused RX code for SOFTUMAC platforms

In RX and defrag path some of the code in common files will
not be used by new SOFTUMAC based platform. So placing all
the unused code under SOFTUMAC macro and for better readability
clubbing all the code at one place in source file.

Change-Id: I6ff3997a42872a25fb020898f7fb1879746fc8e6
CRs-Fixed: 3382899
This commit is contained in:
Karthik Kantamneni
2022-12-07 12:23:37 +05:30
committed by Madan Koyyalamudi
orang tua 39a0cd23ed
melakukan 82a1c01b88
4 mengubah file dengan 711 tambahan dan 659 penghapusan

Melihat File

@@ -955,38 +955,6 @@ dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl,
uint8_t reo_ring_num,
uint32_t quota);
/**
* dp_rx_err_process() - Processes error frames routed to REO error ring
* @int_ctx: pointer to DP interrupt context
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
* @quota: No. of units (packets) that can be serviced in one shot.
*
* This function implements error processing and top level demultiplexer
* for all the frames routed to REO error ring.
*
* Return: uint32_t: No. of elements processed
*/
uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota);
/**
* dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
* @int_ctx: pointer to DP interrupt context
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
* serviced
* @quota: No. of units (packets) that can be serviced in one shot.
*
* This function implements error processing and top level demultiplexer
* for all the frames routed to WBM2HOST sw release ring.
*
* Return: uint32_t: No. of elements processed
*/
uint32_t
dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota);
/**
* dp_rx_sg_create() - create a frag_list for MSDUs which are spread across
* multiple nbufs.
@@ -1211,38 +1179,6 @@ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf,
void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id);
/**
* dp_rx_process_mic_error(): Function to pass mic error indication to umac
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @txrx_peer: txrx peer handle
*
* Return: void
*/
void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer);
/**
* dp_2k_jump_handle() - Function to handle 2k jump exception
* on WBM ring
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @peer_id: peer id of first msdu
* @tid: Tid for which exception occurred
*
* This function handles 2k jump violations arising out
* of receiving aggregates in non BA case. This typically
* may happen if aggregates are received on a QOS enabled TID
* while Rx window size is still initialized to value of 2. Or
* it may also happen if negotiated window size is 1 but peer
* sends aggregates.
*/
void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
uint16_t peer_id, uint8_t tid);
#define DP_RX_HEAD_APPEND(head, elem) \
do { \
qdf_nbuf_set_next((elem), (head)); \
@@ -1826,48 +1762,6 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
struct rx_desc_pool *rx_desc_pool,
uint32_t num_req_buffers);
/**
* dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
* (WBM), following error handling
*
* @soc: core DP main context
* @ring_desc: opaque pointer to the REO error ring descriptor
* @bm_action: put to idle_list or release to msdu_list
*
* Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
*/
QDF_STATUS
dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
uint8_t bm_action);
/**
* dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
* (WBM) by address
*
* @soc: core DP main context
* @link_desc_addr: link descriptor addr
* @bm_action: put to idle_list or release to msdu_list
*
* Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
*/
QDF_STATUS
dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
hal_buff_addrinfo_t link_desc_addr,
uint8_t bm_action);
/**
* dp_rxdma_err_process() - RxDMA error processing functionality
* @int_ctx: pointer to DP interrupt context
* @soc: core txrx main context
* @mac_id: mac id which is one of 3 mac_ids
* @quota: No. of units (packets) that can be serviced in one shot.
*
* Return: num of buffers processed
*/
uint32_t
dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
uint32_t mac_id, uint32_t quota);
/**
* dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats
* @vdev: DP Virtual device handle
@@ -1902,21 +1796,6 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
struct dp_txrx_peer *peer);
/**
* dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
*
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
* @ring_desc: opaque pointer to the RX ring descriptor
* @rx_desc: host rx descriptor
*
* Return: void
*/
void dp_rx_dump_info_and_assert(struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
/**
* dp_rx_compute_delay() - Compute and fill in all timestamps
* to pass in correct fields
@@ -2074,24 +1953,6 @@ bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc,
void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool,
bool is_mon_dest_desc);
/**
* dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
* frames to OS or wifi parse errors.
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @txrx_peer: peer reference
* @err_code: rxdma err code
* @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
* pool_id has same mapping)
*
* Return: None
*/
void
dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
uint8_t err_code, uint8_t mac_id);
#ifndef QCA_MULTIPASS_SUPPORT
static inline
bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
@@ -2170,88 +2031,6 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc,
#ifndef QCA_HOST_MODE_WIFI_DISABLED
#ifdef QCA_OL_RX_LOCK_LESS_ACCESS
/**
* dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
* @int_ctx: pointer to DP interrupt context
* @soc: DP soc structure pointer
* @hal_ring_hdl: HAL ring handle
*
* Return: 0 on success; error on failure
*/
static inline int
dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
}
/**
* dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
* @int_ctx: pointer to DP interrupt context
* @soc: DP soc structure pointer
* @hal_ring_hdl: HAL ring handle
*
* Return: None
*/
static inline void
dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
}
#else
static inline int
dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
}
static inline void
dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
}
#endif
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
/**
* dp_rx_wbm_sg_list_reset() - Initialize sg list
*
* This api should be called at soc init and afterevery sg processing.
*@soc: DP SOC handle
*/
static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
{
if (soc) {
soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
}
}
/**
* dp_rx_wbm_sg_list_deinit() - De-initialize sg list
*
* This api should be called in down path, to avoid any leak.
*@soc: DP SOC handle
*/
static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
{
if (soc) {
if (soc->wbm_sg_param.wbm_sg_nbuf_head)
qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
dp_rx_wbm_sg_list_reset(soc);
}
}
#ifndef QCA_HOST_MODE_WIFI_DISABLED
#ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
#define DP_RX_PROCESS_NBUF(soc, head, tail, ebuf_head, ebuf_tail, rx_desc) \
do { \
@@ -2276,20 +2055,6 @@ static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
/**
* dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
* to refill
* @soc: DP SOC handle
* @buf_info: the last link desc buf info
* @ring_buf_info: current buf address pointor including link desc
*
* Return: none.
*/
void dp_rx_link_desc_refill_duplicate_check(
struct dp_soc *soc,
struct hal_buf_info *buf_info,
hal_buff_addrinfo_t ring_buf_info);
#ifdef WLAN_FEATURE_PKT_CAPTURE_V2
/**
* dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture
@@ -2402,64 +2167,7 @@ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf,
*/
void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf);
/**
* dp_rx_srng_get_num_pending() - get number of pending entries
* @hal_soc: hal soc opaque pointer
* @hal_ring_hdl: opaque pointer to the HAL Rx Ring
* @num_entries: number of entries in the hal_ring.
* @near_full: pointer to a boolean. This is set if ring is near full.
*
* The function returns the number of entries in a destination ring which are
* yet to be reaped. The function also checks if the ring is near full.
* If more than half of the ring needs to be reaped, the ring is considered
* approaching full.
* The function uses hal_srng_dst_num_valid_locked to get the number of valid
* entries. It should not be called within a SRNG lock. HW pointer value is
* synced into cached_hp.
*
* Return: Number of pending entries if any
*/
uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
hal_ring_handle_t hal_ring_hdl,
uint32_t num_entries,
bool *near_full);
#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
/**
* dp_rx_ring_record_entry() - Record an entry into the rx ring history.
* @soc: Datapath soc structure
* @ring_num: REO ring number
* @ring_desc: REO ring descriptor
*
* Return: None
*/
void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
hal_ring_desc_t ring_desc);
#else
static inline void
dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
hal_ring_desc_t ring_desc)
{
}
#endif
#ifndef QCA_HOST_MODE_WIFI_DISABLED
#ifdef RX_DESC_SANITY_WAR
QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
#else
static inline
QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc)
{
return QDF_STATUS_SUCCESS;
}
#endif
#ifdef DP_RX_DROP_RAW_FRM
/**
* dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop
@@ -2477,29 +2185,6 @@ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf)
}
#endif
#ifdef RX_DESC_DEBUG_CHECK
/**
* dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
* corruption
* @soc: DP SoC context
* @ring_desc: REO ring descriptor
* @rx_desc: Rx descriptor
*
* Return: NONE
*/
QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
#else
static inline
QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc)
{
return QDF_STATUS_SUCCESS;
}
#endif
#ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
/**
* dp_rx_update_stats() - Update soc level rx packet count
@@ -2630,98 +2315,6 @@ static inline int dp_rx_get_loop_pkt_limit(struct dp_soc *soc)
void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf);
#ifdef QCA_SUPPORT_WDS_EXTENDED
/**
* dp_rx_is_list_ready() - Make different lists for 4-address
* and 3-address frames
* @nbuf_head: skb list head
* @vdev: vdev
* @txrx_peer : txrx_peer
* @peer_id: peer id of new received frame
* @vdev_id: vdev_id of new received frame
*
* Return: true if peer_ids are different.
*/
static inline bool
dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer,
uint16_t peer_id,
uint8_t vdev_id)
{
if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
return true;
return false;
}
#else
static inline bool
dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer,
uint16_t peer_id,
uint8_t vdev_id)
{
if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
return true;
return false;
}
#endif
#ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
/**
* dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
* @pdev: pointer to dp_pdev structure
* @rx_tlv: pointer to rx_pkt_tlvs structure
* @nbuf: pointer to skb buffer
*
* Return: None
*/
void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
uint8_t *rx_tlv,
qdf_nbuf_t nbuf);
#else
static inline void
dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
uint8_t *rx_tlv,
qdf_nbuf_t nbuf)
{
}
#endif
#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc *soc)
{
return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
}
static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc *soc)
{
return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
}
#else
static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc *soc)
{
struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
uint8_t wbm2_sw_rx_rel_ring_id;
wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
wbm2_sw_rx_rel_ring_id);
}
static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc *soc)
{
return dp_rx_get_rx_bm_id(soc);
}
#endif
static inline uint16_t
dp_rx_peer_metadata_peer_id_get(struct dp_soc *soc, uint32_t peer_metadata)
{
@@ -3356,4 +2949,441 @@ dp_rx_deliver_to_osif_stack(struct dp_soc *soc,
qdf_nbuf_t nbuf,
qdf_nbuf_t tail,
bool is_eapol);
#ifndef WLAN_SOFTUMAC_SUPPORT /* WLAN_SOFTUMAC_SUPPORT */
/**
* dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info
*
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced
* @ring_desc: opaque pointer to the RX ring descriptor
* @rx_desc: host rx descriptor
*
* Return: void
*/
void dp_rx_dump_info_and_assert(struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
/**
* dp_rx_link_desc_return() - Return a MPDU link descriptor to HW
* (WBM), following error handling
*
* @soc: core DP main context
* @ring_desc: opaque pointer to the REO error ring descriptor
* @bm_action: put to idle_list or release to msdu_list
*
* Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
*/
QDF_STATUS
dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc,
uint8_t bm_action);
/**
* dp_rx_link_desc_return_by_addr - Return a MPDU link descriptor to
* (WBM) by address
*
* @soc: core DP main context
* @link_desc_addr: link descriptor addr
* @bm_action: put to idle_list or release to msdu_list
*
* Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS
*/
QDF_STATUS
dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
hal_buff_addrinfo_t link_desc_addr,
uint8_t bm_action);
/**
* dp_rxdma_err_process() - RxDMA error processing functionality
* @int_ctx: pointer to DP interrupt context
* @soc: core txrx main context
* @mac_id: mac id which is one of 3 mac_ids
* @quota: No. of units (packets) that can be serviced in one shot.
*
* Return: num of buffers processed
*/
uint32_t
dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
uint32_t mac_id, uint32_t quota);
/**
* dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err
* frames to OS or wifi parse errors.
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @txrx_peer: peer reference
* @err_code: rxdma err code
* @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and
* pool_id has same mapping)
*
* Return: None
*/
void
dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer,
uint8_t err_code, uint8_t mac_id);
/**
* dp_rx_process_mic_error(): Function to pass mic error indication to umac
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @txrx_peer: txrx peer handle
*
* Return: void
*/
void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer);
/**
* dp_2k_jump_handle() - Function to handle 2k jump exception
* on WBM ring
* @soc: core DP main context
* @nbuf: buffer pointer
* @rx_tlv_hdr: start of rx tlv header
* @peer_id: peer id of first msdu
* @tid: Tid for which exception occurred
*
* This function handles 2k jump violations arising out
* of receiving aggregates in non BA case. This typically
* may happen if aggregates are received on a QOS enabled TID
* while Rx window size is still initialized to value of 2. Or
* it may also happen if negotiated window size is 1 but peer
* sends aggregates.
*/
void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr,
uint16_t peer_id, uint8_t tid);
#ifndef QCA_HOST_MODE_WIFI_DISABLED
/**
* dp_rx_err_process() - Processes error frames routed to REO error ring
* @int_ctx: pointer to DP interrupt context
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced
* @quota: No. of units (packets) that can be serviced in one shot.
*
* This function implements error processing and top level demultiplexer
* for all the frames routed to REO error ring.
*
* Return: uint32_t: No. of elements processed
*/
uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota);
/**
* dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring
* @int_ctx: pointer to DP interrupt context
* @soc: core txrx main context
* @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be
* serviced
* @quota: No. of units (packets) that can be serviced in one shot.
*
* This function implements error processing and top level demultiplexer
* for all the frames routed to WBM2HOST sw release ring.
*
* Return: uint32_t: No. of elements processed
*/
uint32_t
dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl, uint32_t quota);
#ifdef QCA_OL_RX_LOCK_LESS_ACCESS
/**
* dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring
* @int_ctx: pointer to DP interrupt context
* @soc: DP soc structure pointer
* @hal_ring_hdl: HAL ring handle
*
* Return: 0 on success; error on failure
*/
static inline int
dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
}
/**
* dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring
* @int_ctx: pointer to DP interrupt context
* @soc: DP soc structure pointer
* @hal_ring_hdl: HAL ring handle
*
* Return: None
*/
static inline void
dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
}
#else
static inline int
dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
return dp_srng_access_start(int_ctx, soc, hal_ring_hdl);
}
static inline void
dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc,
hal_ring_handle_t hal_ring_hdl)
{
dp_srng_access_end(int_ctx, soc, hal_ring_hdl);
}
#endif
#ifdef RX_DESC_SANITY_WAR
QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
#else
static inline
QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc,
hal_ring_handle_t hal_ring_hdl,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc)
{
return QDF_STATUS_SUCCESS;
}
#endif
#ifdef RX_DESC_DEBUG_CHECK
/**
* dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr
* corruption
* @soc: DP SoC context
* @ring_desc: REO ring descriptor
* @rx_desc: Rx descriptor
*
* Return: NONE
*/
QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc);
#else
static inline
QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
struct dp_rx_desc *rx_desc)
{
return QDF_STATUS_SUCCESS;
}
#endif
#endif /* QCA_HOST_MODE_WIFI_DISABLED */
/**
* dp_rx_wbm_sg_list_reset() - Initialize sg list
*
* This api should be called at soc init and afterevery sg processing.
*@soc: DP SOC handle
*/
static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
{
if (soc) {
soc->wbm_sg_param.wbm_is_first_msdu_in_sg = false;
soc->wbm_sg_param.wbm_sg_nbuf_head = NULL;
soc->wbm_sg_param.wbm_sg_nbuf_tail = NULL;
soc->wbm_sg_param.wbm_sg_desc_msdu_len = 0;
}
}
/**
* dp_rx_wbm_sg_list_deinit() - De-initialize sg list
*
* This api should be called in down path, to avoid any leak.
*@soc: DP SOC handle
*/
static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
{
if (soc) {
if (soc->wbm_sg_param.wbm_sg_nbuf_head)
qdf_nbuf_list_free(soc->wbm_sg_param.wbm_sg_nbuf_head);
dp_rx_wbm_sg_list_reset(soc);
}
}
/**
* dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate
* to refill
* @soc: DP SOC handle
* @buf_info: the last link desc buf info
* @ring_buf_info: current buf address pointor including link desc
*
* Return: none.
*/
void dp_rx_link_desc_refill_duplicate_check(
struct dp_soc *soc,
struct hal_buf_info *buf_info,
hal_buff_addrinfo_t ring_buf_info);
/**
* dp_rx_srng_get_num_pending() - get number of pending entries
* @hal_soc: hal soc opaque pointer
* @hal_ring_hdl: opaque pointer to the HAL Rx Ring
* @num_entries: number of entries in the hal_ring.
* @near_full: pointer to a boolean. This is set if ring is near full.
*
* The function returns the number of entries in a destination ring which are
* yet to be reaped. The function also checks if the ring is near full.
* If more than half of the ring needs to be reaped, the ring is considered
* approaching full.
* The function uses hal_srng_dst_num_valid_locked to get the number of valid
* entries. It should not be called within a SRNG lock. HW pointer value is
* synced into cached_hp.
*
* Return: Number of pending entries if any
*/
uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc,
hal_ring_handle_t hal_ring_hdl,
uint32_t num_entries,
bool *near_full);
#ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
/**
* dp_rx_ring_record_entry() - Record an entry into the rx ring history.
* @soc: Datapath soc structure
* @ring_num: REO ring number
* @ring_desc: REO ring descriptor
*
* Return: None
*/
void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
hal_ring_desc_t ring_desc);
#else
static inline void
dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num,
hal_ring_desc_t ring_desc)
{
}
#endif
#ifdef QCA_SUPPORT_WDS_EXTENDED
/**
* dp_rx_is_list_ready() - Make different lists for 4-address
* and 3-address frames
* @nbuf_head: skb list head
* @vdev: vdev
* @txrx_peer : txrx_peer
* @peer_id: peer id of new received frame
* @vdev_id: vdev_id of new received frame
*
* Return: true if peer_ids are different.
*/
static inline bool
dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer,
uint16_t peer_id,
uint8_t vdev_id)
{
if (nbuf_head && txrx_peer && txrx_peer->peer_id != peer_id)
return true;
return false;
}
#else
static inline bool
dp_rx_is_list_ready(qdf_nbuf_t nbuf_head,
struct dp_vdev *vdev,
struct dp_txrx_peer *txrx_peer,
uint16_t peer_id,
uint8_t vdev_id)
{
if (nbuf_head && vdev && (vdev->vdev_id != vdev_id))
return true;
return false;
}
#endif
#ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
/**
* dp_rx_mark_first_packet_after_wow_wakeup - get first packet after wow wakeup
* @pdev: pointer to dp_pdev structure
* @rx_tlv: pointer to rx_pkt_tlvs structure
* @nbuf: pointer to skb buffer
*
* Return: None
*/
void dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
uint8_t *rx_tlv,
qdf_nbuf_t nbuf);
#else
static inline void
dp_rx_mark_first_packet_after_wow_wakeup(struct dp_pdev *pdev,
uint8_t *rx_tlv,
qdf_nbuf_t nbuf)
{
}
#endif
#if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc *soc)
{
return DP_DEFRAG_RBM(soc->wbm_sw0_bm_id);
}
static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc *soc)
{
return DP_WBM2SW_RBM(soc->wbm_sw0_bm_id);
}
#else
static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc *soc)
{
struct wlan_cfg_dp_soc_ctxt *cfg_ctx = soc->wlan_cfg_ctx;
uint8_t wbm2_sw_rx_rel_ring_id;
wbm2_sw_rx_rel_ring_id = wlan_cfg_get_rx_rel_ring_id(cfg_ctx);
return HAL_RX_BUF_RBM_SW_BM(soc->wbm_sw0_bm_id,
wbm2_sw_rx_rel_ring_id);
}
static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc *soc)
{
return dp_rx_get_rx_bm_id(soc);
}
#endif
#else
static inline QDF_STATUS
dp_rx_link_desc_return_by_addr(struct dp_soc *soc,
hal_buff_addrinfo_t link_desc_addr,
uint8_t bm_action)
{
return QDF_STATUS_SUCCESS;
}
static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc)
{
}
static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc)
{
}
static inline uint8_t
dp_rx_get_defrag_bm_id(struct dp_soc *soc)
{
return 0;
}
static inline uint8_t
dp_rx_get_rx_bm_id(struct dp_soc *soc)
{
return 0;
}
#endif /* WLAN_SOFTUMAC_SUPPORT */
#endif /* _DP_RX_H */