diff --git a/dp/wifi3.0/dp_rx.c b/dp/wifi3.0/dp_rx.c index c55516245f..1232f1451c 100644 --- a/dp/wifi3.0/dp_rx.c +++ b/dp/wifi3.0/dp_rx.c @@ -167,10 +167,11 @@ dp_pdev_frag_alloc_and_map(struct dp_soc *dp_soc, * dp_rx_refill_ring_record_entry() - Record an entry into refill_ring history * @soc: Datapath soc structure * @ring_num: Refill ring number + * @hal_ring_hdl: * @num_req: number of buffers requested for refill * @num_refill: number of buffers refilled * - * Returns: None + * Return: None */ static inline void dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, @@ -209,8 +210,8 @@ dp_rx_refill_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, #endif /** - * dp_pdev_nbuf_alloc_and_map() - Allocate nbuf for desc buffer and map - * + * dp_pdev_nbuf_alloc_and_map_replenish() - Allocate nbuf for desc buffer and + * map * @dp_soc: struct dp_soc * * @mac_id: Mac id * @num_entries_avail: num_entries_avail @@ -599,9 +600,8 @@ qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf, } #endif -/* +/** * dp_rx_desc_replenish() - Replenish the rx descriptors one at a time - * * @soc: core txrx main context * @dp_rxdma_srng: rxdma ring * @rx_desc_pool: rx descriptor pool @@ -636,14 +636,6 @@ void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng, hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng); } -/* - * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring - * - * @soc: core txrx main context - * @nbuf_list: nbuf list for delayed free - * - * Return: void - */ void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) { int mac_id, i, j; @@ -707,24 +699,6 @@ void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list) } #endif -/* - * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs - * called during dp rx initialization - * and at the end of dp_rx_process. - * - * @soc: core txrx main context - * @mac_id: mac_id which is one of 3 mac_ids - * @dp_rxdma_srng: dp rxdma circular ring - * @rx_desc_pool: Pointer to free Rx descriptor pool - * @num_req_buffers: number of buffer to be replenished - * @desc_list: list of descs if called from dp_rx_process - * or NULL during dp rx initialization or out of buffer - * interrupt. - * @tail: tail of descs list - * @req_only: If true don't replenish more than req buffers - * @func_name: name of the caller function - * Return: return success or failure - */ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, struct dp_srng *dp_rxdma_srng, struct rx_desc_pool *rx_desc_pool, @@ -933,17 +907,6 @@ free_descs: qdf_export_symbol(__dp_rx_buffers_replenish); -/* - * dp_rx_deliver_raw() - process RAW mode pkts and hand over the - * pkts to RAW mode simulation to - * decapsulate the pkt. - * - * @vdev: vdev on which RAW mode is enabled - * @nbuf_list: list of RAW pkts to process - * @txrx_peer: peer object from which the pkt is rx - * - * Return: void - */ void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, struct dp_txrx_peer *txrx_peer) @@ -990,7 +953,7 @@ void dp_rx_da_learn(struct dp_soc *soc, uint8_t *rx_tlv_hdr, #endif #ifdef QCA_SUPPORT_TX_MIN_RATES_FOR_SPECIAL_FRAMES -/* +/** * dp_classify_critical_pkts() - API for marking critical packets * @soc: dp_soc context * @vdev: vdev on which packet is to be sent @@ -1031,17 +994,6 @@ void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id) } #endif -/* - * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets - * - * @soc: core txrx main context - * @ta_peer : source peer entry - * @rx_tlv_hdr : start address of rx tlvs - * @nbuf : nbuf that has to be intrabss forwarded - * @tid_stats : tid stats pointer - * - * Return: bool: true if it is forwarded else false - */ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, struct cdp_tid_rx_stats *tid_stats) @@ -1092,18 +1044,6 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, return false; } -/* - * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets - * - * @soc: core txrx main context - * @ta_peer: source peer entry - * @tx_vdev_id: VDEV ID for Intra-BSS TX - * @rx_tlv_hdr: start address of rx tlvs - * @nbuf: nbuf that has to be intrabss forwarded - * @tid_stats: tid stats pointer - * - * Return: bool: true if it is forwarded else false - */ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, uint8_t tx_vdev_id, uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, @@ -1157,20 +1097,6 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer, #ifdef MESH_MODE_SUPPORT -/** - * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats - * - * @vdev: DP Virtual device handle - * @nbuf: Buffer pointer - * @rx_tlv_hdr: start of rx tlv header - * @txrx_peer: pointer to peer - * - * This function allocated memory for mesh receive stats and fill the - * required stats. Stores the memory address in skb cb. - * - * Return: void - */ - void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer) @@ -1269,19 +1195,6 @@ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, } -/** - * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets - * - * @vdev: DP Virtual device handle - * @nbuf: Buffer pointer - * @rx_tlv_hdr: start of rx tlv header - * - * This checks if the received packet is matching any filter out - * catogery and and drop the packet if it matches. - * - * Return: status(0 indicates drop, 1 indicate to no drop) - */ - QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr) { @@ -1349,15 +1262,6 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, #endif #ifdef RX_PEER_INVALID_ENH -/** - * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac - * @soc: DP SOC handle - * @mpdu: mpdu for which peer is invalid - * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and - * pool_id has same mapping) - * - * return: integer type - */ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t mpdu, uint8_t mac_id) { @@ -1472,16 +1376,6 @@ free: return 0; } -/** - * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler - * @soc: DP SOC handle - * @mpdu: mpdu for which peer is invalid - * @mpdu_done: if an mpdu is completed - * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and - * pool_id has same mapping) - * - * return: integer type - */ void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id) @@ -1594,15 +1488,6 @@ static void dp_rx_print_offload_info(struct dp_soc *soc, dp_verbose_debug("---------------------------------------------------------"); } -/** - * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb - * @soc: DP SOC handle - * @rx_tlv: RX TLV received for the msdu - * @msdu: msdu for which GRO info needs to be filled - * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets - * - * Return: None - */ void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt) { @@ -1697,17 +1582,6 @@ static inline uint32_t dp_get_l3_hdr_pad_len(struct dp_soc *soc, return l3_hdr_pad; } -/** - * dp_rx_sg_create() - create a frag_list for MSDUs which are spread across - * multiple nbufs. - * @soc: DP SOC handle - * @nbuf: pointer to the first msdu of an amsdu. - * - * This function implements the creation of RX frag_list for cases - * where an MSDU is spread across multiple nbufs. - * - * Return: returns the head nbuf which contains complete frag_list. - */ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) { qdf_nbuf_t parent, frag_list, next = NULL; @@ -1807,11 +1681,6 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf) } #ifdef DP_RX_SG_FRAME_SUPPORT -/** - * dp_rx_is_sg_supported() - SG packets processing supported or not. - * - * Return: returns true when processing is supported else false. - */ bool dp_rx_is_sg_supported(void) { return true; @@ -1826,13 +1695,6 @@ bool dp_rx_is_sg_supported(void) #endif /* QCA_HOST_MODE_WIFI_DISABLED */ #ifdef QCA_PEER_EXT_STATS -/* - * dp_rx_compute_tid_delay - Computer per TID delay stats - * @peer: DP soc context - * @nbuf: NBuffer - * - * Return: Void - */ void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, qdf_nbuf_t nbuf) { @@ -1843,15 +1705,6 @@ void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, } #endif /* QCA_PEER_EXT_STATS */ -/** - * dp_rx_compute_delay() - Compute and fill in all timestamps - * to pass in correct fields - * - * @vdev: pdev handle - * @tx_desc: tx descriptor - * @tid: tid value - * Return: none - */ void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf) { uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf); @@ -1958,13 +1811,6 @@ dp_rx_deliver_to_stack_ext(struct dp_soc *soc, struct dp_vdev *vdev, #endif #ifdef PEER_CACHE_RX_PKTS -/** - * dp_rx_flush_rx_cached() - flush cached rx frames - * @peer: peer - * @drop: flag to drop frames or forward to net stack - * - * Return: None - */ void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop) { struct dp_peer_cached_bufq *bufqi; @@ -2140,9 +1986,8 @@ dp_rx_enqueue_rx(struct dp_peer *peer, * using the appropriate call back functions. * @soc: soc * @vdev: vdev - * @peer: peer + * @txrx_peer: peer * @nbuf_head: skb list head - * @nbuf_tail: skb list tail * * Return: None */ @@ -2171,7 +2016,6 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, * @vdev: vdev * @txrx_peer: txrx peer * @nbuf_head: skb list head - * @nbuf_tail: skb list tail * * Check the return status of the call back function and drop * the packets if the return status indicates a failure. @@ -2203,12 +2047,12 @@ static void dp_rx_check_delivery_to_stack(struct dp_soc *soc, } #endif /* ifdef DELIVERY_TO_STACK_STATUS_CHECK */ -/* +/** * dp_rx_validate_rx_callbacks() - validate rx callbacks - * @soc DP soc + * @soc: DP soc * @vdev: DP vdev handle * @txrx_peer: pointer to the txrx peer object - * nbuf_head: skb list head + * @nbuf_head: skb list head * * Return: QDF_STATUS - QDF_STATUS_SUCCESS * QDF_STATUS_E_FAILURE @@ -2498,18 +2342,6 @@ dp_peer_update_rx_pkt_per_lmac(struct dp_txrx_peer *txrx_peer, } #endif -/** - * dp_rx_msdu_stats_update() - update per msdu stats. - * @soc: core txrx main context - * @nbuf: pointer to the first msdu of an amsdu. - * @rx_tlv_hdr: pointer to the start of RX TLV headers. - * @txrx_peer: pointer to the txrx peer object. - * @ring_id: reo dest ring number on which pkt is reaped. - * @tid_stats: per tid rx stats. - * - * update all the per msdu stats for that nbuf. - * Return: void - */ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, @@ -2561,15 +2393,6 @@ int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, #endif #ifdef RX_DESC_DEBUG_CHECK -/** - * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr - * corruption - * - * @ring_desc: REO ring descriptor - * @rx_desc: Rx descriptor - * - * Return: NONE - */ QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, hal_ring_desc_t ring_desc, struct dp_rx_desc *rx_desc) @@ -2651,18 +2474,6 @@ static bool dp_rx_is_udp_allowed_over_roam_peer(struct dp_vdev *vdev, return false; } #endif -/** - * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if - * no corresbonding peer found - * @soc: core txrx main context - * @nbuf: pkt skb pointer - * - * This function will try to deliver some RX special frames to stack - * even there is no peer matched found. for instance, LFR case, some - * eapol data will be sent to host before peer_map done. - * - * Return: None - */ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) { uint16_t peer_id; @@ -2766,23 +2577,6 @@ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf) } #endif -/** - * dp_rx_srng_get_num_pending() - get number of pending entries - * @hal_soc: hal soc opaque pointer - * @hal_ring: opaque pointer to the HAL Rx Ring - * @num_entries: number of entries in the hal_ring. - * @near_full: pointer to a boolean. This is set if ring is near full. - * - * The function returns the number of entries in a destination ring which are - * yet to be reaped. The function also checks if the ring is near full. - * If more than half of the ring needs to be reaped, the ring is considered - * approaching full. - * The function useses hal_srng_dst_num_valid_locked to get the number of valid - * entries. It should not be called within a SRNG lock. HW pointer value is - * synced into cached_hp. - * - * Return: Number of pending entries if any - */ uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, hal_ring_handle_t hal_ring_hdl, uint32_t num_entries, @@ -2820,13 +2614,6 @@ void dp_rx_skip_tlvs(struct dp_soc *soc, qdf_nbuf_t nbuf, uint32_t l3_padding) #ifndef QCA_HOST_MODE_WIFI_DISABLED #ifdef DP_RX_DROP_RAW_FRM -/** - * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop - * @nbuf: pkt skb pointer - * - * Return: true - raw frame, dropped - * false - not raw frame, do nothing - */ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) { if (qdf_nbuf_is_raw_frame(nbuf)) { @@ -2839,14 +2626,6 @@ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) #endif #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY -/** - * dp_rx_ring_record_entry() - Record an entry into the rx ring history. - * @soc: Datapath soc structure - * @ring_num: REO ring number - * @ring_desc: REO ring descriptor - * - * Returns: None - */ void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, hal_ring_desc_t ring_desc) @@ -2878,13 +2657,6 @@ dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, #endif #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR -/** - * dp_rx_update_stats() - Update soc level rx packet count - * @soc: DP soc handle - * @nbuf: nbuf received - * - * Returns: none - */ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) { DP_STATS_INC_PKT(soc, rx.ingress, 1, @@ -2893,16 +2665,6 @@ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) #endif #ifdef WLAN_FEATURE_PKT_CAPTURE_V2 -/** - * dp_rx_deliver_to_pkt_capture() - deliver rx packet to packet capture - * @soc : dp_soc handle - * @pdev: dp_pdev handle - * @peer_id: peer_id of the peer for which completion came - * @ppdu_id: ppdu_id - * @netbuf: Buffer pointer - * - * This function is used to deliver rx packet to packet capture - */ void dp_rx_deliver_to_pkt_capture(struct dp_soc *soc, struct dp_pdev *pdev, uint16_t peer_id, uint32_t is_offload, qdf_nbuf_t netbuf) @@ -3162,21 +2924,6 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, qdf_export_symbol(dp_pdev_rx_buffers_attach); -/** - * dp_rx_enable_mon_dest_frag() - Enable frag processing for - * monitor destination ring via frag. - * - * Enable this flag only for monitor destination buffer processing - * if DP_RX_MON_MEM_FRAG feature is enabled. - * If flag is set then frag based function will be called for alloc, - * map, prep desc and free ops for desc buffer else normal nbuf based - * function will be called. - * - * @rx_desc_pool: Rx desc pool - * @is_mon_dest_desc: Is it for monitor dest buffer - * - * Return: None - */ #ifdef DP_RX_MON_MEM_FRAG void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, bool is_mon_dest_desc) @@ -3197,15 +2944,6 @@ void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, qdf_export_symbol(dp_rx_enable_mon_dest_frag); -/* - * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor - * pool - * - * @pdev: core txrx pdev context - * - * Return: QDF_STATUS - QDF_STATUS_SUCCESS - * QDF_STATUS_E_NOMEM - */ QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) { @@ -3240,11 +2978,6 @@ dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev) return status; } -/* - * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool - * - * @pdev: core txrx pdev context - */ void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) { int mac_for_pdev = pdev->lmac_id; @@ -3256,14 +2989,6 @@ void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev) dp_rx_desc_pool_free(soc, rx_desc_pool); } -/* - * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors - * - * @pdev: core txrx pdev context - * - * Return: QDF_STATUS - QDF_STATUS_SUCCESS - * QDF_STATUS_E_NOMEM - */ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) { int mac_for_pdev = pdev->lmac_id; @@ -3275,7 +3000,7 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) rx_desc_pool = &soc->rx_desc_buf[mac_for_pdev]; if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) { - /** + /* * If NSS is enabled, rx_desc_pool is already filled. * Hence, just disable desc_pool frag flag. */ @@ -3308,13 +3033,6 @@ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev) return QDF_STATUS_SUCCESS; } -/* - * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools - * @pdev: core txrx pdev context - * - * This function resets the freelist of rx descriptors and destroys locks - * associated with this list of descriptors. - */ void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) { int mac_for_pdev = pdev->lmac_id; @@ -3326,14 +3044,6 @@ void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev) dp_rx_desc_pool_deinit(soc, rx_desc_pool, mac_for_pdev); } -/* - * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring - * - * @pdev: core txrx pdev context - * - * Return: QDF_STATUS - QDF_STATUS_SUCCESS - * QDF_STATUS_E_NOMEM - */ QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) { @@ -3359,11 +3069,6 @@ dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev) rxdma_entries - 1); } -/* - * dp_rx_pdev_buffers_free - Free nbufs (skbs) - * - * @pdev: core txrx pdev context - */ void dp_rx_pdev_buffers_free(struct dp_pdev *pdev) { diff --git a/dp/wifi3.0/dp_rx.h b/dp/wifi3.0/dp_rx.h index 036a2c3d12..47de7e580e 100644 --- a/dp/wifi3.0/dp_rx.h +++ b/dp/wifi3.0/dp_rx.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -78,8 +78,8 @@ /** * enum dp_rx_desc_state * - * @RX_DESC_REPLENISH: rx desc replenished - * @RX_DESC_FREELIST: rx desc in freelist + * @RX_DESC_REPLENISHED: rx desc replenished + * @RX_DESC_IN_FREELIST: rx desc in freelist */ enum dp_rx_desc_state { RX_DESC_REPLENISHED, @@ -114,26 +114,28 @@ struct dp_rx_desc_dbg_info { /** * struct dp_rx_desc * - * @nbuf : VA of the "skb" posted - * @rx_buf_start : VA of the original Rx buffer, before - * movement of any skb->data pointer - * @paddr_buf_start : PA of the original Rx buffer, before - * movement of any frag pointer - * @cookie : index into the sw array which holds - * the sw Rx descriptors - * Cookie space is 21 bits: - * lower 18 bits -- index - * upper 3 bits -- pool_id - * @pool_id : pool Id for which this allocated. - * Can only be used if there is no flow - * steering - * @chip_id : chip_id indicating MLO chip_id - * valid or used only in case of multi-chip MLO - * @in_use rx_desc is in use - * @unmapped used to mark rx_desc an unmapped if the corresponding - * nbuf is already unmapped - * @in_err_state : Nbuf sanity failed for this descriptor. - * @nbuf_data_addr : VA of nbuf data posted + * @nbuf: VA of the "skb" posted + * @rx_buf_start: VA of the original Rx buffer, before + * movement of any skb->data pointer + * @paddr_buf_start: PA of the original Rx buffer, before + * movement of any frag pointer + * @cookie: index into the sw array which holds + * the sw Rx descriptors + * Cookie space is 21 bits: + * lower 18 bits -- index + * upper 3 bits -- pool_id + * @pool_id: pool Id for which this allocated. + * Can only be used if there is no flow + * steering + * @chip_id: chip_id indicating MLO chip_id + * valid or used only in case of multi-chip MLO + * @magic: + * @nbuf_data_addr: VA of nbuf data posted + * @dbg_info: + * @in_use: rx_desc is in use + * @unmapped: used to mark rx_desc an unmapped if the corresponding + * nbuf is already unmapped + * @in_err_state: Nbuf sanity failed for this descriptor. */ struct dp_rx_desc { qdf_nbuf_t nbuf; @@ -271,7 +273,7 @@ bool dp_rx_is_special_frame(qdf_nbuf_t nbuf, uint32_t frame_mask) * note: Msdu_len must have been stored in QDF_NBUF_CB_RX_PKT_LEN(nbuf) and * single nbuf is expected. * - * return: true - nbuf has been delivered to stack, false - not. + * Return: true - nbuf has been delivered to stack, false - not. */ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, uint32_t frame_mask, @@ -298,7 +300,7 @@ bool dp_rx_deliver_special_frame(struct dp_soc *soc, struct dp_txrx_peer *peer, * not practical for getting rx * stats like rate, mcs, nss, etc. * - * @hal-soc_hdl: soc handler + * @hal_soc_hdl: soc handler * @rx_tlv_hdr: rx tlv header * @nbuf: RX skb pointer * @@ -388,7 +390,7 @@ bool dp_rx_check_ndi_mdns_fwding(struct dp_txrx_peer *ta_txrx_peer, /** * struct dp_rx_cached_buf - rx cached buffer - * @list: linked list node + * @node: linked list node * @buf: skb buffer */ struct dp_rx_cached_buf { @@ -398,13 +400,13 @@ struct dp_rx_cached_buf { #endif /* QCA_HOST_MODE_WIFI_DISABLED */ -/* - *dp_rx_xor_block() - xor block of data - *@b: destination data block - *@a: source data block - *@len: length of the data to process +/** + * dp_rx_xor_block() - xor block of data + * @b: destination data block + * @a: source data block + * @len: length of the data to process * - *Returns: None + * Return: None */ static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) { @@ -414,31 +416,31 @@ static inline void dp_rx_xor_block(uint8_t *b, const uint8_t *a, qdf_size_t len) b[i] ^= a[i]; } -/* - *dp_rx_rotl() - rotate the bits left - *@val: unsigned integer input value - *@bits: number of bits +/** + * dp_rx_rotl() - rotate the bits left + * @val: unsigned integer input value + * @bits: number of bits * - *Returns: Integer with left rotated by number of 'bits' + * Return: Integer with left rotated by number of 'bits' */ static inline uint32_t dp_rx_rotl(uint32_t val, int bits) { return (val << bits) | (val >> (32 - bits)); } -/* - *dp_rx_rotr() - rotate the bits right - *@val: unsigned integer input value - *@bits: number of bits +/** + * dp_rx_rotr() - rotate the bits right + * @val: unsigned integer input value + * @bits: number of bits * - *Returns: Integer with right rotated by number of 'bits' + * Return: Integer with right rotated by number of 'bits' */ static inline uint32_t dp_rx_rotr(uint32_t val, int bits) { return (val >> bits) | (val << (32 - bits)); } -/* +/** * dp_set_rx_queue() - set queue_mapping in skb * @nbuf: skb * @queue_id: rx queue_id @@ -457,25 +459,25 @@ static inline void dp_set_rx_queue(qdf_nbuf_t nbuf, uint8_t queue_id) } #endif -/* - *dp_rx_xswap() - swap the bits left - *@val: unsigned integer input value +/** + * dp_rx_xswap() - swap the bits left + * @val: unsigned integer input value * - *Returns: Integer with bits swapped + * Return: Integer with bits swapped */ static inline uint32_t dp_rx_xswap(uint32_t val) { return ((val & 0x00ff00ff) << 8) | ((val & 0xff00ff00) >> 8); } -/* - *dp_rx_get_le32_split() - get little endian 32 bits split - *@b0: byte 0 - *@b1: byte 1 - *@b2: byte 2 - *@b3: byte 3 +/** + * dp_rx_get_le32_split() - get little endian 32 bits split + * @b0: byte 0 + * @b1: byte 1 + * @b2: byte 2 + * @b3: byte 3 * - *Returns: Integer with split little endian 32 bits + * Return: Integer with split little endian 32 bits */ static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, uint8_t b3) @@ -483,26 +485,23 @@ static inline uint32_t dp_rx_get_le32_split(uint8_t b0, uint8_t b1, uint8_t b2, return b0 | (b1 << 8) | (b2 << 16) | (b3 << 24); } -/* - *dp_rx_get_le32() - get little endian 32 bits - *@b0: byte 0 - *@b1: byte 1 - *@b2: byte 2 - *@b3: byte 3 +/** + * dp_rx_get_le32() - get little endian 32 bits + * @p: source 32-bit value * - *Returns: Integer with little endian 32 bits + * Return: Integer with little endian 32 bits */ static inline uint32_t dp_rx_get_le32(const uint8_t *p) { return dp_rx_get_le32_split(p[0], p[1], p[2], p[3]); } -/* +/** * dp_rx_put_le32() - put little endian 32 bits * @p: destination char array * @v: source 32-bit integer * - * Returns: None + * Return: None */ static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) { @@ -528,8 +527,8 @@ static inline void dp_rx_put_le32(uint8_t *p, uint32_t v) /** * struct dp_rx_desc_list_elem_t * - * @next : Next pointer to form free list - * @rx_desc : DP Rx descriptor + * @next: Next pointer to form free list + * @rx_desc: DP Rx descriptor */ union dp_rx_desc_list_elem_t { union dp_rx_desc_list_elem_t *next; @@ -541,6 +540,7 @@ union dp_rx_desc_list_elem_t { * dp_rx_desc_find() - find dp rx descriptor from page ID and offset * @page_id: Page ID * @offset: Offset of the descriptor element + * @rx_pool: RX pool * * Return: RX descriptor element */ @@ -791,6 +791,7 @@ fail: * * When multi page alloc is disabled SW cookie validness is * checked while fetching Rx descriptor, so no need to check here + * * Return: true if cookie is valid else false */ static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, @@ -800,42 +801,140 @@ static inline bool dp_rx_is_sw_cookie_valid(struct dp_soc *soc, } #endif +/** + * dp_rx_desc_pool_is_allocated() - check if memory is allocated for the + * rx descriptor pool + * @rx_desc_pool: rx descriptor pool pointer + * + * Return: QDF_STATUS QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool); + +/** + * dp_rx_desc_pool_alloc() - Allocate a memory pool for software rx + * descriptors + * @soc: core txrx main context + * @pool_size: number of rx descriptors (size of the pool) + * @rx_desc_pool: rx descriptor pool pointer + * + * Return: QDF_STATUS QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + * QDF_STATUS_E_FAULT + */ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_size, struct rx_desc_pool *rx_desc_pool); +/** + * dp_rx_desc_pool_init() - Initialize the software RX descriptor pool + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @pool_size: size of the rx descriptor pool + * @rx_desc_pool: rx descriptor pool pointer + * + * Convert the pool of memory into a list of rx descriptors and create + * locks to access this list of rx descriptors. + * + */ void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id, uint32_t pool_size, struct rx_desc_pool *rx_desc_pool); +/** + * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to + * freelist. + * @soc: core txrx main context + * @local_desc_list: local desc list provided by the caller + * @tail: attach the point to last desc of local desc list + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + */ void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc, union dp_rx_desc_list_elem_t **local_desc_list, union dp_rx_desc_list_elem_t **tail, uint16_t pool_id, struct rx_desc_pool *rx_desc_pool); +/** + * dp_rx_get_free_desc_list() - provide a list of descriptors from + * the free rx desc pool. + * @soc: core txrx main context + * @pool_id: pool_id which is one of 3 mac_ids + * @rx_desc_pool: rx descriptor pool pointer + * @num_descs: number of descs requested from freelist + * @desc_list: attach the descs to this list (output parameter) + * @tail: attach the point to last desc of free list (output parameter) + * + * Return: number of descs allocated from free list. + */ uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id, struct rx_desc_pool *rx_desc_pool, uint16_t num_descs, union dp_rx_desc_list_elem_t **desc_list, union dp_rx_desc_list_elem_t **tail); +/** + * dp_rx_pdev_desc_pool_alloc() - allocate memory for software rx descriptor + * pool + * @pdev: core txrx pdev context + * + * Return: QDF_STATUS - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ QDF_STATUS dp_rx_pdev_desc_pool_alloc(struct dp_pdev *pdev); + +/** + * dp_rx_pdev_desc_pool_free() - free software rx descriptor pool + * @pdev: core txrx pdev context + */ void dp_rx_pdev_desc_pool_free(struct dp_pdev *pdev); +/** + * dp_rx_pdev_desc_pool_init() - initialize software rx descriptors + * @pdev: core txrx pdev context + * + * Return: QDF_STATUS - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ QDF_STATUS dp_rx_pdev_desc_pool_init(struct dp_pdev *pdev); + +/** + * dp_rx_pdev_desc_pool_deinit() - de-initialize software rx descriptor pools + * @pdev: core txrx pdev context + * + * This function resets the freelist of rx descriptors and destroys locks + * associated with this list of descriptors. + */ void dp_rx_pdev_desc_pool_deinit(struct dp_pdev *pdev); + void dp_rx_desc_pool_deinit(struct dp_soc *soc, struct rx_desc_pool *rx_desc_pool, uint32_t pool_id); QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev); + +/** + * dp_rx_pdev_buffers_alloc() - Allocate nbufs (skbs) and replenish RxDMA ring + * @pdev: core txrx pdev context + * + * Return: QDF_STATUS - QDF_STATUS_SUCCESS + * QDF_STATUS_E_NOMEM + */ QDF_STATUS dp_rx_pdev_buffers_alloc(struct dp_pdev *pdev); + +/** + * dp_rx_pdev_buffers_free() - Free nbufs (skbs) + * @pdev: core txrx pdev context + */ void dp_rx_pdev_buffers_free(struct dp_pdev *pdev); void dp_rx_pdev_detach(struct dp_pdev *pdev); +/** + * dp_print_napi_stats() - NAPI stats + * @soc: soc handle + */ void dp_print_napi_stats(struct dp_soc *soc); /** @@ -858,7 +957,7 @@ dp_rx_process(struct dp_intr *int_ctx, hal_ring_handle_t hal_ring_hdl, * dp_rx_err_process() - Processes error frames routed to REO error ring * @int_ctx: pointer to DP interrupt context * @soc: core txrx main context - * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be serviced * @quota: No. of units (packets) that can be serviced in one shot. * * This function implements error processing and top level demultiplexer @@ -873,7 +972,8 @@ uint32_t dp_rx_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, * dp_rx_wbm_err_process() - Processes error frames routed to WBM release ring * @int_ctx: pointer to DP interrupt context * @soc: core txrx main context - * @hal_ring: opaque pointer to the HAL Rx Error Ring, which will be serviced + * @hal_ring_hdl: opaque pointer to the HAL Rx Error Ring, which will be + * serviced * @quota: No. of units (packets) that can be serviced in one shot. * * This function implements error processing and top level demultiplexer @@ -905,7 +1005,7 @@ qdf_nbuf_t dp_rx_sg_create(struct dp_soc *soc, qdf_nbuf_t nbuf); */ bool dp_rx_is_sg_supported(void); -/* +/** * dp_rx_desc_nbuf_and_pool_free() - free the sw rx desc pool called during * de-initialization of wifi module. * @@ -920,13 +1020,13 @@ void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id, #endif /* QCA_HOST_MODE_WIFI_DISABLED */ -/* +/** * dp_rx_desc_nbuf_free() - free the sw rx desc nbufs called during * de-initialization of wifi module. * * @soc: core txrx main context - * @pool_id: pool_id which is one of 3 mac_ids * @rx_desc_pool: rx descriptor pool pointer + * @is_mon_pool: true if this is a monitor pool * * Return: None */ @@ -935,7 +1035,7 @@ void dp_rx_desc_nbuf_free(struct dp_soc *soc, bool is_mon_pool); #ifdef DP_RX_MON_MEM_FRAG -/* +/** * dp_rx_desc_frag_free() - free the sw rx desc frag called during * de-initialization of wifi module. * @@ -953,7 +1053,7 @@ void dp_rx_desc_frag_free(struct dp_soc *soc, { } #endif -/* +/** * dp_rx_desc_pool_free() - free the sw rx desc array called during * de-initialization of wifi module. * @@ -965,11 +1065,21 @@ void dp_rx_desc_frag_free(struct dp_soc *soc, void dp_rx_desc_pool_free(struct dp_soc *soc, struct rx_desc_pool *rx_desc_pool); +/** + * dp_rx_deliver_raw() - process RAW mode pkts and hand over the + * pkts to RAW mode simulation to + * decapsulate the pkt. + * @vdev: vdev on which RAW mode is enabled + * @nbuf_list: list of RAW pkts to process + * @txrx_peer: peer object from which the pkt is rx + * + * Return: void + */ void dp_rx_deliver_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf_list, - struct dp_txrx_peer *peer); + struct dp_txrx_peer *txrx_peer); #ifdef RX_DESC_LOGGING -/* +/** * dp_rx_desc_alloc_dbg_info() - Alloc memory for rx descriptor debug * structure * @rx_desc: rx descriptor pointer @@ -982,7 +1092,7 @@ void dp_rx_desc_alloc_dbg_info(struct dp_rx_desc *rx_desc) rx_desc->dbg_info = qdf_mem_malloc(sizeof(struct dp_rx_desc_dbg_info)); } -/* +/** * dp_rx_desc_free_dbg_info() - Free rx descriptor debug * structure memory * @rx_desc: rx descriptor pointer @@ -995,10 +1105,12 @@ void dp_rx_desc_free_dbg_info(struct dp_rx_desc *rx_desc) qdf_mem_free(rx_desc->dbg_info); } -/* +/** * dp_rx_desc_update_dbg_info() - Update rx descriptor debug info * structure memory * @rx_desc: rx descriptor pointer + * @func_name: name of calling function + * @flag: * * Return: None */ @@ -1044,7 +1156,7 @@ void dp_rx_desc_update_dbg_info(struct dp_rx_desc *rx_desc, #endif /* RX_DESC_LOGGING */ /** - * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list + * __dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list * * @head: pointer to the head of local free list * @tail: pointer to the tail of local free list @@ -1072,12 +1184,60 @@ void __dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head, *tail = *head; } +/** + * dp_rx_process_invalid_peer(): Function to pass invalid peer list to umac + * @soc: DP SOC handle + * @nbuf: network buffer + * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and + * pool_id has same mapping) + * + * Return: integer type + */ uint8_t dp_rx_process_invalid_peer(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t mac_id); + +/** + * dp_rx_process_invalid_peer_wrapper(): Function to wrap invalid peer handler + * @soc: DP SOC handle + * @mpdu: mpdu for which peer is invalid + * @mpdu_done: if an mpdu is completed + * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and + * pool_id has same mapping) + * + * Return: integer type + */ void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc, qdf_nbuf_t mpdu, bool mpdu_done, uint8_t mac_id); + +/** + * dp_rx_process_mic_error(): Function to pass mic error indication to umac + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @txrx_peer: txrx peer handle + * + * Return: void + */ void dp_rx_process_mic_error(struct dp_soc *soc, qdf_nbuf_t nbuf, - uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); + uint8_t *rx_tlv_hdr, + struct dp_txrx_peer *txrx_peer); + +/** + * dp_2k_jump_handle() - Function to handle 2k jump exception + * on WBM ring + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @peer_id: peer id of first msdu + * @tid: Tid for which exception occurred + * + * This function handles 2k jump violations arising out + * of receiving aggregates in non BA case. This typically + * may happen if aggregates are received on a QOS enabled TID + * while Rx window size is still initialized to value of 2. Or + * it may also happen if negotiated window size is 1 but peer + * sends aggregates. + */ void dp_2k_jump_handle(struct dp_soc *soc, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr, uint16_t peer_id, uint8_t tid); @@ -1290,7 +1450,7 @@ static inline bool dp_nbuf_dst_addr_is_self_addr(struct dp_vdev *vdev, QDF_NBUF_DEST_MAC_OFFSET); } -/* +/** * dp_rx_intrabss_eapol_drop_check() - API For EAPOL * pkt with DA not equal to vdev mac addr, fwd is not allowed. * @soc: core txrx main context @@ -1328,13 +1488,34 @@ bool dp_rx_intrabss_eapol_drop_check(struct dp_soc *soc, } #endif /* DISABLE_EAPOL_INTRABSS_FWD */ +/** + * dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets + * @soc: core txrx main context + * @ta_peer: source peer entry + * @rx_tlv_hdr: start address of rx tlvs + * @nbuf: nbuf that has to be intrabss forwarded + * @tid_stats: tid stats pointer + * + * Return: bool: true if it is forwarded else false + */ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, - struct dp_txrx_peer *ta_txrx_peer, + struct dp_txrx_peer *ta_peer, uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, struct cdp_tid_rx_stats *tid_stats); +/** + * dp_rx_intrabss_ucast_fwd() - Does intrabss forward for unicast packets + * @soc: core txrx main context + * @ta_peer: source peer entry + * @tx_vdev_id: VDEV ID for Intra-BSS TX + * @rx_tlv_hdr: start address of rx tlvs + * @nbuf: nbuf that has to be intrabss forwarded + * @tid_stats: tid stats pointer + * + * Return: bool: true if it is forwarded else false + */ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, - struct dp_txrx_peer *ta_txrx_peer, + struct dp_txrx_peer *ta_peer, uint8_t tx_vdev_id, uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf, struct cdp_tid_rx_stats *tid_stats); @@ -1398,7 +1579,7 @@ dp_rx_ipa_wds_srcport_learn(struct dp_soc *soc, } #endif -/* +/** * dp_rx_desc_dump() - dump the sw rx descriptor * * @rx_desc: sw rx descriptor @@ -1412,7 +1593,7 @@ static inline void dp_rx_desc_dump(struct dp_rx_desc *rx_desc) #ifndef QCA_HOST_MODE_WIFI_DISABLED -/* +/** * check_qwrap_multicast_loopback() - Check if rx packet is a loopback packet. * In qwrap mode, packets originated from * any vdev should not loopback and @@ -1477,6 +1658,7 @@ static inline bool check_qwrap_multicast_loopback(struct dp_vdev *vdev, * @ring_index: REO ring number, not used for error & monitor ring * @is_reo_exception: flag to indicate if rx from REO ring or exception ring * @is_update_stats: flag to indicate whether to update stats or not + * * Return: void */ static inline void @@ -1496,6 +1678,7 @@ dp_rx_update_protocol_tag(struct dp_soc *soc, struct dp_vdev *vdev, * @vdev: vdev on which the packet is received * @nbuf: QDF pkt buffer on which the protocol tag should be set * @rx_tlv_hdr: rBbase address where the RX TLVs starts + * * Return: bool */ static inline bool @@ -1515,7 +1698,7 @@ dp_rx_err_cce_drop(struct dp_soc *soc, struct dp_vdev *vdev, * @vdev: vdev on which the packet is received * @nbuf: QDF pkt buffer on which the protocol tag should be set * @rx_tlv_hdr: base address where the RX TLVs starts - * @is_update_stats: flag to indicate whether to update stats or not + * @update_stats: flag to indicate whether to update stats or not * * Return: void */ @@ -1527,12 +1710,12 @@ dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, #endif /* WLAN_SUPPORT_RX_FLOW_TAG */ #define CRITICAL_BUFFER_THRESHOLD 64 -/* - * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs +/** + * __dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs * called during dp rx initialization * and at the end of dp_rx_process. * - * @soc: core txrx main context + * @dp_soc: core txrx main context * @mac_id: mac_id which is one of 3 mac_ids * @dp_rxdma_srng: dp rxdma circular ring * @rx_desc_pool: Pointer to free Rx descriptor pool @@ -1543,6 +1726,7 @@ dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev, * @tail: tail of descs list * @req_only: If true don't replenish more than req buffers * @func_name: name of the caller function + * * Return: return success or failure */ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, @@ -1553,7 +1737,8 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, union dp_rx_desc_list_elem_t **tail, bool req_only, const char *func_name); -/* + +/** * __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs * use direct APIs to get invalidate * and get the physical address of the @@ -1561,7 +1746,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, * dp rx initialization and at the end * of dp_rx_process. * - * @soc: core txrx main context + * @dp_soc: core txrx main context * @mac_id: mac_id which is one of 3 mac_ids * @dp_rxdma_srng: dp rxdma circular ring * @rx_desc_pool: Pointer to free Rx descriptor pool @@ -1570,6 +1755,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id, * or NULL during dp rx initialization or out of buffer * interrupt. * @tail: tail of descs list + * * Return: return success or failure */ QDF_STATUS @@ -1580,35 +1766,38 @@ __dp_rx_buffers_no_map_replenish(struct dp_soc *dp_soc, uint32_t mac_id, union dp_rx_desc_list_elem_t **desc_list, union dp_rx_desc_list_elem_t **tail); -/* - * __dp_rx_buffers_no_map__lt_replenish() - replenish rxdma ring with rx nbufs +/** + * __dp_rx_buffers_no_map_lt_replenish() - replenish rxdma ring with rx nbufs * use direct APIs to get invalidate * and get the physical address of the * nbuf instead of map api,called when * low threshold interrupt is triggered * - * @soc: core txrx main context + * @dp_soc: core txrx main context * @mac_id: mac_id which is one of 3 mac_ids * @dp_rxdma_srng: dp rxdma circular ring * @rx_desc_pool: Pointer to free Rx descriptor pool + * * Return: return success or failure */ QDF_STATUS __dp_rx_buffers_no_map_lt_replenish(struct dp_soc *dp_soc, uint32_t mac_id, struct dp_srng *dp_rxdma_srng, struct rx_desc_pool *rx_desc_pool); -/* + +/** * __dp_pdev_rx_buffers_no_map_attach() - replenish rxdma ring with rx nbufs * use direct APIs to get invalidate * and get the physical address of the * nbuf instead of map api,called during * dp rx initialization. * - * @soc: core txrx main context + * @dp_soc: core txrx main context * @mac_id: mac_id which is one of 3 mac_ids * @dp_rxdma_srng: dp rxdma circular ring * @rx_desc_pool: Pointer to free Rx descriptor pool * @num_req_buffers: number of buffer to be replenished + * * Return: return success or failure */ QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, @@ -1617,11 +1806,11 @@ QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *dp_soc, struct rx_desc_pool *rx_desc_pool, uint32_t num_req_buffers); -/* +/** * dp_pdev_rx_buffers_attach() - replenish rxdma ring with rx nbufs * called during dp rx initialization * - * @soc: core txrx main context + * @dp_soc: core txrx main context * @mac_id: mac_id which is one of 3 mac_ids * @dp_rxdma_srng: dp rxdma circular ring * @rx_desc_pool: Pointer to free Rx descriptor pool @@ -1640,8 +1829,7 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id, * (WBM), following error handling * * @soc: core DP main context - * @buf_addr_info: opaque pointer to the REO error ring descriptor - * @buf_addr_info: void pointer to the buffer_addr_info + * @ring_desc: opaque pointer to the REO error ring descriptor * @bm_action: put to idle_list or release to msdu_list * * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS @@ -1656,6 +1844,7 @@ dp_rx_link_desc_return(struct dp_soc *soc, hal_ring_desc_t ring_desc, * * @soc: core DP main context * @link_desc_addr: link descriptor addr + * @bm_action: put to idle_list or release to msdu_list * * Return: QDF_STATUS_E_FAILURE for failure else QDF_STATUS_SUCCESS */ @@ -1666,9 +1855,9 @@ dp_rx_link_desc_return_by_addr(struct dp_soc *soc, /** * dp_rxdma_err_process() - RxDMA error processing functionality + * @int_ctx: pointer to DP interrupt context * @soc: core txrx main context * @mac_id: mac id which is one of 3 mac_ids - * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced * @quota: No. of units (packets) that can be serviced in one shot. * * Return: num of buffers processed @@ -1677,19 +1866,45 @@ uint32_t dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc, uint32_t mac_id, uint32_t quota); +/** + * dp_rx_fill_mesh_stats() - Fills the mesh per packet receive stats + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @txrx_peer: pointer to peer + * + * This function allocated memory for mesh receive stats and fill the + * required stats. Stores the memory address in skb cb. + * + * Return: void + */ void dp_rx_fill_mesh_stats(struct dp_vdev *vdev, qdf_nbuf_t nbuf, - uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer); + uint8_t *rx_tlv_hdr, + struct dp_txrx_peer *txrx_peer); + +/** + * dp_rx_filter_mesh_packets() - Filters mesh unwanted packets + * @vdev: DP Virtual device handle + * @nbuf: Buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * + * This checks if the received packet is matching any filter out + * catogery and and drop the packet if it matches. + * + * Return: QDF_STATUS_SUCCESS indicates drop, + * QDF_STATUS_E_FAILURE indicate to not drop + */ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf, uint8_t *rx_tlv_hdr); int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev, struct dp_txrx_peer *peer); -/* +/** * dp_rx_dump_info_and_assert() - dump RX Ring info and Rx Desc info * * @soc: core txrx main context - * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced + * @hal_ring_hdl: opaque pointer to the HAL Rx Ring, which will be serviced * @ring_desc: opaque pointer to the RX ring descriptor * @rx_desc: host rx descriptor * @@ -1700,9 +1915,25 @@ void dp_rx_dump_info_and_assert(struct dp_soc *soc, hal_ring_desc_t ring_desc, struct dp_rx_desc *rx_desc); +/** + * dp_rx_compute_delay() - Compute and fill in all timestamps + * to pass in correct fields + * @vdev: pdev handle + * @nbuf: network buffer + * + * Return: none + */ void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf); #ifdef QCA_PEER_EXT_STATS + +/** + * dp_rx_compute_tid_delay - Compute per TID delay stats + * @stats: TID delay stats to update + * @nbuf: NBuffer + * + * Return: Void + */ void dp_rx_compute_tid_delay(struct cdp_delay_tid_stats *stats, qdf_nbuf_t nbuf); #endif /* QCA_PEER_EXT_STATS */ @@ -1775,7 +2006,7 @@ void dp_rx_desc_frag_prep(struct dp_rx_desc *rx_desc, * @rx_desc: rx descriptor * @ring_paddr: paddr obatined from the ring * - * Returns: QDF_STATUS + * Return: QDF_STATUS */ static inline bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, @@ -1824,12 +2055,40 @@ bool dp_rx_desc_paddr_sanity_check(struct dp_rx_desc *rx_desc, } #endif /* RX_DESC_DEBUG_CHECK */ +/** + * dp_rx_enable_mon_dest_frag() - Enable frag processing for + * monitor destination ring via frag. + * @rx_desc_pool: Rx desc pool + * @is_mon_dest_desc: Is it for monitor dest buffer + * + * Enable this flag only for monitor destination buffer processing + * if DP_RX_MON_MEM_FRAG feature is enabled. + * If flag is set then frag based function will be called for alloc, + * map, prep desc and free ops for desc buffer else normal nbuf based + * function will be called. + * + * Return: None + */ void dp_rx_enable_mon_dest_frag(struct rx_desc_pool *rx_desc_pool, bool is_mon_dest_desc); -void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, - uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, - uint8_t err_code, uint8_t mac_id); +/** + * dp_rx_process_rxdma_err() - Function to deliver rxdma unencrypted_err + * frames to OS or wifi parse errors. + * @soc: core DP main context + * @nbuf: buffer pointer + * @rx_tlv_hdr: start of rx tlv header + * @txrx_peer: peer reference + * @err_code: rxdma err code + * @mac_id: mac_id which is one of 3 mac_ids(Assuming mac_id and + * pool_id has same mapping) + * + * Return: None + */ +void +dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf, + uint8_t *rx_tlv_hdr, struct dp_txrx_peer *txrx_peer, + uint8_t err_code, uint8_t mac_id); #ifndef QCA_MULTIPASS_SUPPORT static inline @@ -1839,7 +2098,21 @@ bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, return false; } #else -bool dp_rx_multipass_process(struct dp_txrx_peer *peer, qdf_nbuf_t nbuf, +/** + * dp_rx_multipass_process - insert vlan tag on frames for traffic separation + * @txrx_peer: DP txrx peer handle + * @nbuf: skb + * @tid: traffic priority + * + * Return: bool: true in case of success else false + * Success is considered if: + * i. If frame has vlan header + * ii. If the frame comes from different peer and dont need multipass processing + * Failure is considered if: + * i. Frame comes from multipass peer but doesn't contain vlan header. + * In failure case, drop such frames. + */ +bool dp_rx_multipass_process(struct dp_txrx_peer *txrx_peer, qdf_nbuf_t nbuf, uint8_t tid); #endif @@ -1862,7 +2135,7 @@ QDF_STATUS dp_peer_set_rx_capture_enabled(struct dp_pdev *pdev, * Caller to hold peer refcount and check for valid peer * @soc: soc * @vdev: vdev - * @txrx_peer: txrx peer + * @peer: txrx peer * @nbuf_head: skb list head * @nbuf_tail: skb list tail * @@ -1884,7 +2157,7 @@ QDF_STATUS dp_rx_deliver_to_stack(struct dp_soc *soc, * @nbuf_head: skb list head * @nbuf_tail: skb list tail * - * return: QDF_STATUS + * Return: QDF_STATUS */ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, struct dp_vdev *vdev, @@ -1896,11 +2169,11 @@ QDF_STATUS dp_rx_eapol_deliver_to_stack(struct dp_soc *soc, #ifndef QCA_HOST_MODE_WIFI_DISABLED #ifdef QCA_OL_RX_LOCK_LESS_ACCESS -/* - * dp_rx_ring_access_start()- Wrapper function to log access start of a hal ring +/** + * dp_rx_srng_access_start()- Wrapper function to log access start of a hal ring * @int_ctx: pointer to DP interrupt context - * @dp_soc - DP soc structure pointer - * @hal_ring_hdl - HAL ring handle + * @soc: DP soc structure pointer + * @hal_ring_hdl: HAL ring handle * * Return: 0 on success; error on failure */ @@ -1911,13 +2184,13 @@ dp_rx_srng_access_start(struct dp_intr *int_ctx, struct dp_soc *soc, return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl); } -/* - * dp_rx_ring_access_end()- Wrapper function to log access end of a hal ring +/** + * dp_rx_srng_access_end()- Wrapper function to log access end of a hal ring * @int_ctx: pointer to DP interrupt context - * @dp_soc - DP soc structure pointer - * @hal_ring_hdl - HAL ring handle + * @soc: DP soc structure pointer + * @hal_ring_hdl: HAL ring handle * - * Return - None + * Return: None */ static inline void dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, @@ -1943,7 +2216,7 @@ dp_rx_srng_access_end(struct dp_intr *int_ctx, struct dp_soc *soc, #endif /* QCA_HOST_MODE_WIFI_DISABLED */ -/* +/** * dp_rx_wbm_sg_list_reset() - Initialize sg list * * This api should be called at soc init and afterevery sg processing. @@ -1959,7 +2232,7 @@ static inline void dp_rx_wbm_sg_list_reset(struct dp_soc *soc) } } -/* +/** * dp_rx_wbm_sg_list_deinit() - De-initialize sg list * * This api should be called in down path, to avoid any leak. @@ -2001,14 +2274,14 @@ static inline void dp_rx_wbm_sg_list_deinit(struct dp_soc *soc) #endif /* QCA_HOST_MODE_WIFI_DISABLED */ -/* +/** * dp_rx_link_desc_refill_duplicate_check() - check if link desc duplicate - to refill + * to refill * @soc: DP SOC handle * @buf_info: the last link desc buf info * @ring_buf_info: current buf address pointor including link desc * - * return: none. + * Return: none. */ void dp_rx_link_desc_refill_duplicate_check( struct dp_soc *soc, @@ -2021,7 +2294,7 @@ void dp_rx_link_desc_refill_duplicate_check( * @soc : dp_soc handle * @pdev: dp_pdev handle * @peer_id: peer_id of the peer for which completion came - * @ppdu_id: ppdu_id + * @is_offload: * @netbuf: Buffer pointer * * This function is used to deliver rx packet to packet capture @@ -2075,6 +2348,15 @@ static inline bool dp_rx_mcast_echo_check(struct dp_soc *soc, #endif /* QCA_HOST_MODE_WIFI_DISABLED */ #ifdef RECEIVE_OFFLOAD +/** + * dp_rx_fill_gro_info() - Fill GRO info from RX TLV into skb->cb + * @soc: DP SOC handle + * @rx_tlv: RX TLV received for the msdu + * @msdu: msdu for which GRO info needs to be filled + * @rx_ol_pkt_cnt: counter to be incremented for GRO eligible packets + * + * Return: None + */ void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, qdf_nbuf_t msdu, uint32_t *rx_ol_pkt_cnt); #else @@ -2085,19 +2367,70 @@ void dp_rx_fill_gro_info(struct dp_soc *soc, uint8_t *rx_tlv, } #endif +/** + * dp_rx_msdu_stats_update() - update per msdu stats. + * @soc: core txrx main context + * @nbuf: pointer to the first msdu of an amsdu. + * @rx_tlv_hdr: pointer to the start of RX TLV headers. + * @txrx_peer: pointer to the txrx peer object. + * @ring_id: reo dest ring number on which pkt is reaped. + * @tid_stats: per tid rx stats. + * + * update all the per msdu stats for that nbuf. + * + * Return: void + */ void dp_rx_msdu_stats_update(struct dp_soc *soc, qdf_nbuf_t nbuf, - uint8_t *rx_tlv_hdr, struct dp_txrx_peer *peer, + uint8_t *rx_tlv_hdr, + struct dp_txrx_peer *txrx_peer, uint8_t ring_id, struct cdp_tid_rx_stats *tid_stats); +/** + * dp_rx_deliver_to_stack_no_peer() - try deliver rx data even if + * no corresbonding peer found + * @soc: core txrx main context + * @nbuf: pkt skb pointer + * + * This function will try to deliver some RX special frames to stack + * even there is no peer matched found. for instance, LFR case, some + * eapol data will be sent to host before peer_map done. + * + * Return: None + */ void dp_rx_deliver_to_stack_no_peer(struct dp_soc *soc, qdf_nbuf_t nbuf); +/** + * dp_rx_srng_get_num_pending() - get number of pending entries + * @hal_soc: hal soc opaque pointer + * @hal_ring_hdl: opaque pointer to the HAL Rx Ring + * @num_entries: number of entries in the hal_ring. + * @near_full: pointer to a boolean. This is set if ring is near full. + * + * The function returns the number of entries in a destination ring which are + * yet to be reaped. The function also checks if the ring is near full. + * If more than half of the ring needs to be reaped, the ring is considered + * approaching full. + * The function uses hal_srng_dst_num_valid_locked to get the number of valid + * entries. It should not be called within a SRNG lock. HW pointer value is + * synced into cached_hp. + * + * Return: Number of pending entries if any + */ uint32_t dp_rx_srng_get_num_pending(hal_soc_handle_t hal_soc, hal_ring_handle_t hal_ring_hdl, uint32_t num_entries, bool *near_full); #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY +/** + * dp_rx_ring_record_entry() - Record an entry into the rx ring history. + * @soc: Datapath soc structure + * @ring_num: REO ring number + * @ring_desc: REO ring descriptor + * + * Return: None + */ void dp_rx_ring_record_entry(struct dp_soc *soc, uint8_t ring_num, hal_ring_desc_t ring_desc); #else @@ -2126,6 +2459,13 @@ QDF_STATUS dp_rx_desc_sanity(struct dp_soc *soc, hal_soc_handle_t hal_soc, #endif #ifdef DP_RX_DROP_RAW_FRM +/** + * dp_rx_is_raw_frame_dropped() - if raw frame nbuf, free and drop + * @nbuf: pkt skb pointer + * + * Return: true - raw frame, dropped + * false - not raw frame, do nothing + */ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf); #else static inline @@ -2136,6 +2476,15 @@ bool dp_rx_is_raw_frame_dropped(qdf_nbuf_t nbuf) #endif #ifdef RX_DESC_DEBUG_CHECK +/** + * dp_rx_desc_nbuf_sanity_check - Add sanity check to catch REO rx_desc paddr + * corruption + * @soc: DP SoC context + * @ring_desc: REO ring descriptor + * @rx_desc: Rx descriptor + * + * Return: NONE + */ QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, hal_ring_desc_t ring_desc, struct dp_rx_desc *rx_desc); @@ -2150,6 +2499,13 @@ QDF_STATUS dp_rx_desc_nbuf_sanity_check(struct dp_soc *soc, #endif #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR +/** + * dp_rx_update_stats() - Update soc level rx packet count + * @soc: DP soc handle + * @nbuf: nbuf received + * + * Return: none + */ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); #else static inline @@ -2160,6 +2516,7 @@ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf) /** * dp_rx_cksum_offload() - set the nbuf checksum as defined by hardware. + * @pdev: dp_pdev handle * @nbuf: pointer to the first msdu of an amsdu. * @rx_tlv_hdr: pointer to the start of RX TLV headers. * @@ -2254,10 +2611,10 @@ void dp_rx_update_stats(struct dp_soc *soc, qdf_nbuf_t nbuf); #ifdef QCA_SUPPORT_WDS_EXTENDED /** * dp_rx_is_list_ready() - Make different lists for 4-address - and 3-address frames + * and 3-address frames * @nbuf_head: skb list head * @vdev: vdev -* @txrx_peer : txrx_peer + * @txrx_peer : txrx_peer * @peer_id: peer id of new received frame * @vdev_id: vdev_id of new received frame * @@ -2698,9 +3055,8 @@ void dp_rx_nbuf_free(qdf_nbuf_t nbuf) #endif #ifdef DP_UMAC_HW_RESET_SUPPORT -/* +/** * dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring - * * @soc: core txrx main context * @nbuf_list: nbuf list for delayed free * @@ -2708,7 +3064,7 @@ void dp_rx_nbuf_free(qdf_nbuf_t nbuf) */ void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list); -/* +/** * dp_rx_desc_delayed_free() - Delayed free of the rx descs * * @soc: core txrx main context @@ -2720,6 +3076,7 @@ void dp_rx_desc_delayed_free(struct dp_soc *soc); /** * dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id + * @soc: core txrx main context * @nbuf : pointer to the first msdu of an amsdu. * @peer_id : Peer id of the peer * @txrx_ref_handle : Buffer to save the handle for txrx peer's reference