diff --git a/dp/wifi3.0/dp_reo.c b/dp/wifi3.0/dp_reo.c index 7aa353f153..a0a6f0ca8f 100644 --- a/dp/wifi3.0/dp_reo.c +++ b/dp/wifi3.0/dp_reo.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -71,35 +71,10 @@ QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, struct dp_reo_cmd_info *reo_cmd; int num; - switch (type) { - case CMD_GET_QUEUE_STATS: - num = hal_reo_cmd_queue_stats(soc->reo_cmd_ring.hal_srng, - soc->hal_soc, params); - break; - case CMD_FLUSH_QUEUE: - num = hal_reo_cmd_flush_queue(soc->reo_cmd_ring.hal_srng, - soc->hal_soc, params); - break; - case CMD_FLUSH_CACHE: - num = hal_reo_cmd_flush_cache(soc->reo_cmd_ring.hal_srng, - soc->hal_soc, params); - break; - case CMD_UNBLOCK_CACHE: - num = hal_reo_cmd_unblock_cache(soc->reo_cmd_ring.hal_srng, - soc->hal_soc, params); - break; - case CMD_FLUSH_TIMEOUT_LIST: - num = hal_reo_cmd_flush_timeout_list(soc->reo_cmd_ring.hal_srng, - soc->hal_soc, params); - break; - case CMD_UPDATE_RX_REO_QUEUE: - num = hal_reo_cmd_update_rx_queue(soc->reo_cmd_ring.hal_srng, - soc->hal_soc, params); - break; - default: - dp_err_log("Invalid REO command type: %d", type); + num = hal_reo_send_cmd(soc->hal_soc, soc->reo_cmd_ring.hal_srng, type, + params); + if (num < 0) return QDF_STATUS_E_INVAL; - }; dp_reo_cmd_srng_event_record(soc, type, num); @@ -130,7 +105,7 @@ QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type, uint32_t dp_reo_status_ring_handler(struct dp_intr *int_ctx, struct dp_soc *soc) { - uint32_t *reo_desc; + hal_ring_desc_t reo_desc; struct dp_reo_cmd_info *reo_cmd = NULL; union hal_reo_status reo_status; int num; diff --git a/hal/wifi3.0/be/hal_be_reo.c b/hal/wifi3.0/be/hal_be_reo.c new file mode 100644 index 0000000000..8a269de667 --- /dev/null +++ b/hal/wifi3.0/be/hal_be_reo.c @@ -0,0 +1,1358 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" +#include "hal_hw_headers.h" +#include "hal_be_hw_headers.h" +#include "hal_reo.h" +#include "hal_be_reo.h" +#include "hal_be_api.h" + +uint32_t hal_get_reo_reg_base_offset_be(void) +{ + return REO_REG_REG_BASE; +} + +/** + * hal_reo_qdesc_setup - Setup HW REO queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory + * @hw_qdesc_paddr: Physical address of REO queue descriptor memory + * @tid: TID + * + */ +void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid, + uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, + qdf_dma_addr_t hw_qdesc_paddr, + int pn_type) +{ + uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr; + uint32_t *reo_queue_ext_desc; + uint32_t reg_val; + uint32_t pn_enable; + uint32_t pn_size = 0; + + qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue)); + + hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER, + RESERVED_0A, 0xDDBEEF); + + /* This a just a SW meta data and will be copied to REO destination + * descriptors indicated by hardware. + * TODO: Setting TID in this field. See if we should set something else. + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, + RECEIVE_QUEUE_NUMBER, tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, + VLD, 1); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + HAL_RX_LINK_DESC_CNTR); + + /* + * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0 + */ + + reg_val = TID_TO_WME_AC(tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val); + + if (ba_window_size < 1) + ba_window_size = 1; + + /* WAR to get 2k exception in Non BA case. + * Setting window size to 2 to get 2k jump exception + * when we receive aggregates in Non BA case + */ + ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size); + + /* Set RTY bit for non-BA case. Duplicate detection is currently not + * done by HW in non-BA case if RTY bit is not set. + * TODO: This is a temporary War and should be removed once HW fix is + * made to check and discard duplicates even if RTY bit is not set. + */ + if (ba_window_size == 1) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1); + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE, + ba_window_size - 1); + + switch (pn_type) { + case HAL_PN_WPA: + pn_enable = 1; + pn_size = PN_SIZE_48; + break; + case HAL_PN_WAPI_EVEN: + case HAL_PN_WAPI_UNEVEN: + pn_enable = 1; + pn_size = PN_SIZE_128; + break; + default: + pn_enable = 0; + break; + } + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED, + pn_enable); + + if (pn_type == HAL_PN_WAPI_EVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, + PN_SHALL_BE_EVEN, 1); + else if (pn_type == HAL_PN_WAPI_UNEVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, + PN_SHALL_BE_UNEVEN, 1); + + /* + * TODO: Need to check if PN handling in SW needs to be enabled + * So far this is not a requirement + */ + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE, + pn_size); + + /* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set + * based on BA window size and/or AMPDU capabilities + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, + IGNORE_AMPDU_FLAG, 1); + + if (start_seq <= 0xfff) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN, + start_seq); + + /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA, + * but REO is not delivering packets if we set it to 1. Need to enable + * this once the issue is resolved + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0); + + /* TODO: Check if we should set start PN for WAPI */ + + /* TODO: HW queue descriptors are currently allocated for max BA + * window size for all QOS TIDs so that same descriptor can be used + * later when ADDBA request is recevied. This should be changed to + * allocate HW queue descriptors based on BA window size being + * negotiated (0 for non BA cases), and reallocate when BA window + * size changes and also send WMI message to FW to change the REO + * queue descriptor in Rx peer entry as part of dp_rx_tid_update. + */ + if (tid == HAL_NON_QOS_TID) + return; + + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue *)reo_queue_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, 3 * + sizeof(struct rx_reo_queue_ext)); + /* Initialize first reo queue extension descriptor */ + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A, + 0xADBEEF); + /* Initialize second reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A, + 0xBDBEEF); + /* Initialize third reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A, + 0xCDBEEF); +} +qdf_export_symbol(hal_reo_qdesc_setup); + +/** + * hal_get_ba_aging_timeout_be - Get BA Aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * @value: window size to get + */ +void hal_get_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t *value) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + switch (ac) { + case WME_AC_BE: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + REO_REG_REG_BASE)) / 1000; + break; + case WME_AC_BK: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + REO_REG_REG_BASE)) / 1000; + break; + case WME_AC_VI: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + REO_REG_REG_BASE)) / 1000; + break; + case WME_AC_VO: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + REO_REG_REG_BASE)) / 1000; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid AC: %d\n", ac); + } +} +qdf_export_symbol(hal_get_ba_aging_timeout_be); + +/** + * hal_set_ba_aging_timeout_be - Set BA Aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice + * @value: Input value to set + */ +void hal_set_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t value) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + switch (ac) { + case WME_AC_BE: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + REO_REG_REG_BASE), + value * 1000); + break; + case WME_AC_BK: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + REO_REG_REG_BASE), + value * 1000); + break; + case WME_AC_VI: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + REO_REG_REG_BASE), + value * 1000); + break; + case WME_AC_VO: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + REO_REG_REG_BASE), + value * 1000); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid AC: %d\n", ac); + } +} +qdf_export_symbol(hal_set_ba_aging_timeout_be); + +static inline void +hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc, + enum hal_reo_cmd_type type, + uint32_t paddr_lo, + uint8_t paddr_hi) +{ + switch (type) { + case CMD_GET_QUEUE_STATS: + HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_QUEUE: + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE, + FLUSH_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE, + FLUSH_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_CACHE: + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + FLUSH_ADDR_31_0, paddr_lo); + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + FLUSH_ADDR_39_32, paddr_hi); + break; + case CMD_UPDATE_RX_REO_QUEUE: + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid REO command type", __func__); + break; + } +} + +static inline int +hal_reo_cmd_queue_stats_be(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E, + sizeof(struct reo_get_queue_stats)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_get_queue_stats) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS, + cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, CLEAR_STATS, + cmd->u.stats_params.clear); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD, true) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E, + sizeof(struct reo_flush_queue)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_queue) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE, + BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH, + cmd->u.fl_queue_params.block_use_after_flush); + + if (cmd->u.fl_queue_params.block_use_after_flush) { + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE, + BLOCK_RESOURCE_INDEX, + cmd->u.fl_queue_params.index); + } + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_reo_cmd_flush_cache_params *cp; + uint8_t index = 0; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + cp = &cmd->u.fl_cache_params; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + + /* We need a cache block resource for this operation, and REO HW has + * only 4 such blocking resources. These resources are managed using + * reo_res_bitmap, and we return failure if none is available. + */ + if (cp->block_use_after_flush) { + index = hal_find_zero_bit(hal_soc->reo_res_bitmap); + if (index > 3) { + qdf_print("No blocking resource available!"); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + hal_soc->index = index; + } + + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + hal_srng_access_end(hal_soc, hal_ring_hdl); + hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl)); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E, + sizeof(struct reo_flush_cache)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_cache) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + FORWARD_ALL_MPDUS_IN_QUEUE, + cp->fwd_mpdus_in_queue); + + /* set it to 0 for now */ + cp->rel_block_index = 0; + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index); + + if (cp->block_use_after_flush) { + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + CACHE_BLOCK_RESOURCE_INDEX, index); + } + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, + BLOCK_CACHE_USAGE_AFTER_FLUSH, + cp->block_use_after_flush); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, FLUSH_ENTIRE_CACHE, + cp->flush_entire_cache); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD, true) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) + +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + uint8_t index = 0; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + index = hal_find_one_bit(hal_soc->reo_res_bitmap); + if (index > 3) { + hal_srng_access_end(hal_soc, hal_ring_hdl); + qdf_print("No blocking resource to unblock!"); + return -EBUSY; + } + } + + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E, + sizeof(struct reo_unblock_cache)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_unblock_cache) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE, + UNBLOCK_TYPE, cmd->u.unblk_cache_params.type); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE, + CACHE_BLOCK_RESOURCE_INDEX, + cmd->u.unblk_cache_params.index); + } + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E, + sizeof(struct reo_flush_timeout_list)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_timeout_list) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, AC_TIMOUT_LIST, + cmd->u.fl_tim_list_params.ac_list); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, + MINIMUM_RELEASE_DESC_COUNT, + cmd->u.fl_tim_list_params.min_rel_desc); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, + MINIMUM_FORWARD_BUF_COUNT, + cmd->u.fl_tim_list_params.min_fwd_buf); + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + struct hal_reo_cmd_update_queue_params *p; + + p = &cmd->u.upd_queue_params; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E, + sizeof(struct reo_update_rx_reo_queue)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_update_rx_reo_queue) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_RECEIVE_QUEUE_NUMBER, + p->update_rx_queue_num); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, UPDATE_VLD, + p->update_vld); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->update_assoc_link_desc); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_SOFT_REORDER_ENABLE, + p->update_soft_reorder_enab); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_AC, p->update_ac); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_BAR, p->update_bar); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_BAR, p->update_bar); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_RTY, p->update_rty); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_CHK_2K_MODE, p->update_chk_2k_mode); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_OOR_MODE, p->update_oor_mode); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN_CHECK_NEEDED, + p->update_pn_check_needed); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN_HANDLING_ENABLE, + p->update_pn_hand_enab); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN_SIZE, p->update_pn_size); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_SVLD, p->update_svld); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_SSN, p->update_ssn); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_SEQ_2K_ERROR_DETECTED_FLAG, + p->update_seq_2k_err_detect); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN_VALID, p->update_pn_valid); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + UPDATE_PN, p->update_pn); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + RECEIVE_QUEUE_NUMBER, p->rx_queue_num); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + VLD, p->vld); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->assoc_link_desc); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + DISABLE_DUPLICATE_DETECTION, + p->disable_dup_detect); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + SOFT_REORDER_ENABLE, p->soft_reorder_enab); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, AC, p->ac); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + BAR, p->bar); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + CHK_2K_MODE, p->chk_2k_mode); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + RTY, p->rty); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + OOR_MODE, p->oor_mode); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_CHECK_NEEDED, p->pn_check_needed); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_SHALL_BE_EVEN, p->pn_even); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_SHALL_BE_UNEVEN, p->pn_uneven); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_HANDLING_ENABLE, p->pn_hand_enab); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + IGNORE_AMPDU_FLAG, p->ignore_ampdu); + + if (p->ba_window_size < 1) + p->ba_window_size = 1; + /* + * WAR to get 2k exception in Non BA case. + * Setting window size to 2 to get 2k jump exception + * when we receive aggregates in Non BA case + */ + if (p->ba_window_size == 1) + p->ba_window_size++; + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + BA_WINDOW_SIZE, p->ba_window_size - 1); + + if (p->pn_size == 24) + p->pn_size = PN_SIZE_24; + else if (p->pn_size == 48) + p->pn_size = PN_SIZE_48; + else if (p->pn_size == 128) + p->pn_size = PN_SIZE_128; + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_SIZE, p->pn_size); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + SVLD, p->svld); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + SSN, p->ssn); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_ERROR_DETECTED_FLAG, p->pn_err_detect); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_31_0, p->pn_31_0); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_63_32, p->pn_63_32); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_95_64, p->pn_95_64); + + HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, + PN_127_96, p->pn_127_96); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD, false) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER, + val); +} + +int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + enum hal_reo_cmd_type cmd, + void *params) +{ + struct hal_reo_cmd_params *cmd_params = + (struct hal_reo_cmd_params *)params; + int num = 0; + + switch (cmd) { + case CMD_GET_QUEUE_STATS: + num = hal_reo_cmd_queue_stats_be(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_FLUSH_QUEUE: + num = hal_reo_cmd_flush_queue_be(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_FLUSH_CACHE: + num = hal_reo_cmd_flush_cache_be(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_UNBLOCK_CACHE: + num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_FLUSH_TIMEOUT_LIST: + num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl, + hal_soc_hdl, + cmd_params); + break; + case CMD_UPDATE_RX_REO_QUEUE: + num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + default: + hal_err("Invalid REO command type: %d", cmd); + return -EINVAL; + }; + + return num; +} + +void +hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_queue_status *st = + (struct hal_reo_queue_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + uint64_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV, + &(st->header), hal_soc); + + /* SSN */ + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)]; + st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val); + + /* current index */ + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + CURRENT_INDEX)]; + st->curr_idx = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + CURRENT_INDEX, val); + + /* PN bits */ + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + PN_31_0)]; + st->pn_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + PN_31_0, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + PN_63_32)]; + st->pn_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + PN_63_32, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + PN_95_64)]; + st->pn_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + PN_95_64, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + PN_127_96)]; + st->pn_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + PN_127_96, val); + + /* timestamps */ + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + LAST_RX_ENQUEUE_TIMESTAMP)]; + st->last_rx_enq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + LAST_RX_ENQUEUE_TIMESTAMP, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + LAST_RX_DEQUEUE_TIMESTAMP)]; + st->last_rx_deq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + LAST_RX_DEQUEUE_TIMESTAMP, val); + + /* rx bitmap */ + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_31_0)]; + st->rx_bitmap_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_31_0, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_63_32)]; + st->rx_bitmap_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_63_32, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_95_64)]; + st->rx_bitmap_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_95_64, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_127_96)]; + st->rx_bitmap_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_127_96, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_159_128)]; + st->rx_bitmap_159_128 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_159_128, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_191_160)]; + st->rx_bitmap_191_160 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_191_160, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_223_192)]; + st->rx_bitmap_223_192 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_223_192, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_255_224)]; + st->rx_bitmap_255_224 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + RX_BITMAP_255_224, val); + + /* various counts */ + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + CURRENT_MPDU_COUNT)]; + st->curr_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + CURRENT_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + CURRENT_MSDU_COUNT)]; + st->curr_msdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + CURRENT_MSDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + TIMEOUT_COUNT)]; + st->fwd_timeout_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + TIMEOUT_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + FORWARD_DUE_TO_BAR_COUNT)]; + st->fwd_bar_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + FORWARD_DUE_TO_BAR_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + DUPLICATE_COUNT)]; + st->dup_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + DUPLICATE_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + FRAMES_IN_ORDER_COUNT)]; + st->frms_in_order_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + FRAMES_IN_ORDER_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + BAR_RECEIVED_COUNT)]; + st->bar_rcvd_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + BAR_RECEIVED_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + MPDU_FRAMES_PROCESSED_COUNT)]; + st->mpdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + MPDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + MSDU_FRAMES_PROCESSED_COUNT)]; + st->msdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + MSDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + TOTAL_PROCESSED_BYTE_COUNT)]; + st->total_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + TOTAL_PROCESSED_BYTE_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + LATE_RECEIVE_MPDU_COUNT)]; + st->late_recv_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + LATE_RECEIVE_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + WINDOW_JUMP_2K)]; + st->win_jump_2k = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + WINDOW_JUMP_2K, val); + + val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, + HOLE_COUNT)]; + st->hole_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, + HOLE_COUNT, val); +} + +void +hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_flush_queue_status *st = + (struct hal_reo_flush_queue_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + uint64_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED, + val); +} + +void +hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_flush_cache_status *st = + (struct hal_reo_flush_cache_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + uint64_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED, + val); + + /* block error */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS, + BLOCK_ERROR_DETAILS)]; + st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS, + BLOCK_ERROR_DETAILS, + val); + if (!st->block_error) + qdf_set_bit(hal_soc->index, + (unsigned long *)&hal_soc->reo_res_bitmap); + + /* cache flush status */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS, + CACHE_CONTROLLER_FLUSH_STATUS_HIT)]; + st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS, + CACHE_CONTROLLER_FLUSH_STATUS_HIT, + val); + + /* cache flush descriptor type */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)]; + st->cache_flush_status_desc_type = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE, + val); + + /* cache flush count */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS, + CACHE_CONTROLLER_FLUSH_COUNT)]; + st->cache_flush_cnt = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS, + CACHE_CONTROLLER_FLUSH_COUNT, + val); +} + +void +hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc, + hal_soc_handle_t hal_soc_hdl, + void *st_handle) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_unblk_cache_status *st = + (struct hal_reo_unblk_cache_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + uint64_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV, + &st->header, hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS, + ERROR_DETECTED, + val); + + /* unblock type */ + val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS, + UNBLOCK_TYPE)]; + st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS, + UNBLOCK_TYPE, + val); + + if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX)) + qdf_clear_bit(hal_soc->index, + (unsigned long *)&hal_soc->reo_res_bitmap); +} + +void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_flush_timeout_list_status *st = + (struct hal_reo_flush_timeout_list_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + uint64_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS, + ERROR_DETECTED, + val); + + /* list empty */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS, + TIMOUT_LIST_EMPTY)]; + st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS, + TIMOUT_LIST_EMPTY, + val); + + /* release descriptor count */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS, + RELEASE_DESC_COUNT)]; + st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS, + RELEASE_DESC_COUNT, + val); + + /* forward buf count */ + val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS, + FORWARD_BUF_COUNT)]; + st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS, + FORWARD_BUF_COUNT, + val); +} + +void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_desc_thres_reached_status *st = + (struct hal_reo_desc_thres_reached_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + uint64_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, + HAL_REO_DESC_THRES_STATUS_TLV, + &(st->header), hal_soc); + + /* threshold index */ + val = reo_desc[HAL_OFFSET_QW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + THRESHOLD_INDEX)]; + st->thres_index = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + THRESHOLD_INDEX, + val); + + /* link desc counters */ + val = reo_desc[HAL_OFFSET_QW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER0)]; + st->link_desc_counter0 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER0, + val); + + val = reo_desc[HAL_OFFSET_QW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER1)]; + st->link_desc_counter1 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER1, + val); + + val = reo_desc[HAL_OFFSET_QW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER2)]; + st->link_desc_counter2 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER2, + val); + + val = reo_desc[HAL_OFFSET_QW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER_SUM)]; + st->link_desc_counter_sum = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS, + LINK_DESCRIPTOR_COUNTER_SUM, + val); +} + +void +hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_update_rx_queue_status *st = + (struct hal_reo_update_rx_queue_status *)st_handle; + uint64_t *reo_desc = (uint64_t *)ring_desc; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr)); + + /* header */ + hal_reo_status_get_header(ring_desc, + HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV, + &(st->header), hal_soc); +} diff --git a/hal/wifi3.0/be/hal_be_reo.h b/hal/wifi3.0/be/hal_be_reo.h new file mode 100644 index 0000000000..cb9c78b558 --- /dev/null +++ b/hal/wifi3.0/be/hal_be_reo.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_REO_BE_H_ +#define _HAL_REO_BE_H_ + +#include "hal_be_hw_headers.h" +#include "hal_rx.h" +#include "hal_reo.h" + +#define HAL_REO_QUEUE_EXT_DESC 10 + +/* Proto-types */ +void hal_get_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t *value); +void hal_set_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t value); +uint32_t hal_get_reo_reg_base_offset_be(void); + +int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + enum hal_reo_cmd_type cmd, + void *params); + +/* REO status ring routines */ +void +hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc, + hal_soc_handle_t hal_soc_hdl, + void *st_handle); +void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); + +/** + * hal_reo_init_cmd_ring_be() - Initialize descriptors of REO command SRNG + * with command number + * @hal_soc: Handle to HAL SoC structure + * @hal_ring: Handle to HAL SRNG structure + * + * Return: none + */ +void hal_reo_init_cmd_ring_be(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl); +#endif /* _HAL_REO_BE_H_ */ diff --git a/hal/wifi3.0/hal_reo.c b/hal/wifi3.0/hal_reo.c index b4c9e88c77..e994f3fcff 100644 --- a/hal/wifi3.0/hal_reo.c +++ b/hal/wifi3.0/hal_reo.c @@ -23,1363 +23,8 @@ #include "hal_rx.h" #include "qdf_module.h" -/* TODO: See if the following definition is available in HW headers */ -#define HAL_REO_OWNED 4 -#define HAL_REO_QUEUE_DESC 8 -#define HAL_REO_QUEUE_EXT_DESC 9 - -/* TODO: Using associated link desc counter 1 for Rx. Check with FW on - * how these counters are assigned - */ -#define HAL_RX_LINK_DESC_CNTR 1 -/* TODO: Following definition should be from HW headers */ -#define HAL_DESC_REO_OWNED 4 - -/** - * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro - * @owner - owner info - * @buffer_type - buffer type - */ -static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner, - uint32_t buffer_type) -{ - HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER, - owner); - HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE, - buffer_type); -} - -#ifndef TID_TO_WME_AC -#define WME_AC_BE 0 /* best effort */ -#define WME_AC_BK 1 /* background */ -#define WME_AC_VI 2 /* video */ -#define WME_AC_VO 3 /* voice */ - -#define TID_TO_WME_AC(_tid) ( \ - (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ - (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ - (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ - WME_AC_VO) -#endif -#define HAL_NON_QOS_TID 16 - -#ifdef HAL_DISABLE_NON_BA_2K_JUMP_ERROR -static inline uint32_t hal_update_non_ba_win_size(int tid, - uint32_t ba_window_size) -{ - return ba_window_size; -} -#else -static inline uint32_t hal_update_non_ba_win_size(int tid, - uint32_t ba_window_size) -{ - if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID)) - ba_window_size++; - - return ba_window_size; -} -#endif - -/** - * hal_reo_qdesc_setup - Setup HW REO queue descriptor - * - * @hal_soc: Opaque HAL SOC handle - * @ba_window_size: BlockAck window size - * @start_seq: Starting sequence number - * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory - * @hw_qdesc_paddr: Physical address of REO queue descriptor memory - * @tid: TID - * - */ -void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid, - uint32_t ba_window_size, - uint32_t start_seq, void *hw_qdesc_vaddr, - qdf_dma_addr_t hw_qdesc_paddr, - int pn_type) -{ - uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr; - uint32_t *reo_queue_ext_desc; - uint32_t reg_val; - uint32_t pn_enable; - uint32_t pn_size = 0; - - qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue)); - - hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED, - HAL_REO_QUEUE_DESC); - /* Fixed pattern in reserved bits for debugging */ - HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0, - RESERVED_0A, 0xDDBEEF); - - /* This a just a SW meta data and will be copied to REO destination - * descriptors indicated by hardware. - * TODO: Setting TID in this field. See if we should set something else. - */ - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1, - RECEIVE_QUEUE_NUMBER, tid); - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, - VLD, 1); - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, - ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR); - - /* - * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0 - */ - - reg_val = TID_TO_WME_AC(tid); - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val); - - if (ba_window_size < 1) - ba_window_size = 1; - - /* WAR to get 2k exception in Non BA case. - * Setting window size to 2 to get 2k jump exception - * when we receive aggregates in Non BA case - */ - ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size); - - /* Set RTY bit for non-BA case. Duplicate detection is currently not - * done by HW in non-BA case if RTY bit is not set. - * TODO: This is a temporary War and should be removed once HW fix is - * made to check and discard duplicates even if RTY bit is not set. - */ - if (ba_window_size == 1) - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1); - - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE, - ba_window_size - 1); - - switch (pn_type) { - case HAL_PN_WPA: - pn_enable = 1; - pn_size = PN_SIZE_48; - break; - case HAL_PN_WAPI_EVEN: - case HAL_PN_WAPI_UNEVEN: - pn_enable = 1; - pn_size = PN_SIZE_128; - break; - default: - pn_enable = 0; - break; - } - - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED, - pn_enable); - - if (pn_type == HAL_PN_WAPI_EVEN) - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, - PN_SHALL_BE_EVEN, 1); - else if (pn_type == HAL_PN_WAPI_UNEVEN) - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, - PN_SHALL_BE_UNEVEN, 1); - - /* - * TODO: Need to check if PN handling in SW needs to be enabled - * So far this is not a requirement - */ - - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE, - pn_size); - - /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set - * based on BA window size and/or AMPDU capabilities - */ - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, - IGNORE_AMPDU_FLAG, 1); - - if (start_seq <= 0xfff) - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN, - start_seq); - - /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA, - * but REO is not delivering packets if we set it to 1. Need to enable - * this once the issue is resolved - */ - HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0); - - /* TODO: Check if we should set start PN for WAPI */ - -#ifdef notyet - /* Setup first queue extension if BA window size is more than 1 */ - if (ba_window_size > 1) { - reo_queue_ext_desc = - (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) + - 1); - qdf_mem_zero(reo_queue_ext_desc, - sizeof(struct rx_reo_queue_ext)); - hal_uniform_desc_hdr_setup(reo_queue_ext_desc, - HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); - } - /* Setup second queue extension if BA window size is more than 105 */ - if (ba_window_size > 105) { - reo_queue_ext_desc = (uint32_t *) - (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); - qdf_mem_zero(reo_queue_ext_desc, - sizeof(struct rx_reo_queue_ext)); - hal_uniform_desc_hdr_setup(reo_queue_ext_desc, - HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); - } - /* Setup third queue extension if BA window size is more than 210 */ - if (ba_window_size > 210) { - reo_queue_ext_desc = (uint32_t *) - (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); - qdf_mem_zero(reo_queue_ext_desc, - sizeof(struct rx_reo_queue_ext)); - hal_uniform_desc_hdr_setup(reo_queue_ext_desc, - HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); - } -#else - /* TODO: HW queue descriptors are currently allocated for max BA - * window size for all QOS TIDs so that same descriptor can be used - * later when ADDBA request is recevied. This should be changed to - * allocate HW queue descriptors based on BA window size being - * negotiated (0 for non BA cases), and reallocate when BA window - * size changes and also send WMI message to FW to change the REO - * queue descriptor in Rx peer entry as part of dp_rx_tid_update. - */ - if (tid != HAL_NON_QOS_TID) { - reo_queue_ext_desc = (uint32_t *) - (((struct rx_reo_queue *)reo_queue_desc) + 1); - qdf_mem_zero(reo_queue_ext_desc, 3 * - sizeof(struct rx_reo_queue_ext)); - /* Initialize first reo queue extension descriptor */ - hal_uniform_desc_hdr_setup(reo_queue_ext_desc, - HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); - /* Fixed pattern in reserved bits for debugging */ - HAL_DESC_SET_FIELD(reo_queue_ext_desc, - UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF); - /* Initialize second reo queue extension descriptor */ - reo_queue_ext_desc = (uint32_t *) - (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); - hal_uniform_desc_hdr_setup(reo_queue_ext_desc, - HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); - /* Fixed pattern in reserved bits for debugging */ - HAL_DESC_SET_FIELD(reo_queue_ext_desc, - UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF); - /* Initialize third reo queue extension descriptor */ - reo_queue_ext_desc = (uint32_t *) - (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); - hal_uniform_desc_hdr_setup(reo_queue_ext_desc, - HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC); - /* Fixed pattern in reserved bits for debugging */ - HAL_DESC_SET_FIELD(reo_queue_ext_desc, - UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF); - } -#endif -} -qdf_export_symbol(hal_reo_qdesc_setup); - -/** - * hal_get_ba_aging_timeout - Get BA Aging timeout - * - * @hal_soc: Opaque HAL SOC handle - * @ac: Access category - * @value: window size to get - */ -void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac, - uint32_t *value) -{ - struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; - - switch (ac) { - case WME_AC_BE: - *value = HAL_REG_READ(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; - break; - case WME_AC_BK: - *value = HAL_REG_READ(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; - break; - case WME_AC_VI: - *value = HAL_REG_READ(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; - break; - case WME_AC_VO: - *value = HAL_REG_READ(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; - break; - default: - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "Invalid AC: %d\n", ac); - } -} - -qdf_export_symbol(hal_get_ba_aging_timeout); - -/** - * hal_set_ba_aging_timeout - Set BA Aging timeout - * - * @hal_soc: Opaque HAL SOC handle - * @ac: Access category - * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice - * @value: Input value to set - */ -void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac, - uint32_t value) -{ - struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; - - switch (ac) { - case WME_AC_BE: - HAL_REG_WRITE(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET), - value * 1000); - break; - case WME_AC_BK: - HAL_REG_WRITE(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET), - value * 1000); - break; - case WME_AC_VI: - HAL_REG_WRITE(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET), - value * 1000); - break; - case WME_AC_VO: - HAL_REG_WRITE(soc, - HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( - SEQ_WCSS_UMAC_REO_REG_OFFSET), - value * 1000); - break; - default: - QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, - "Invalid AC: %d\n", ac); - } -} - -qdf_export_symbol(hal_set_ba_aging_timeout); - -#define BLOCK_RES_MASK 0xF -static inline uint8_t hal_find_one_bit(uint8_t x) -{ - uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK; - uint8_t pos; - - for (pos = 0; y; y >>= 1) - pos++; - - return pos-1; -} - -static inline uint8_t hal_find_zero_bit(uint8_t x) -{ - uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK; - uint8_t pos; - - for (pos = 0; y; y >>= 1) - pos++; - - return pos-1; -} - -inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc, - enum hal_reo_cmd_type type, - uint32_t paddr_lo, - uint8_t paddr_hi) -{ - switch (type) { - case CMD_GET_QUEUE_STATS: - HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1, - RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); - HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, - RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); - break; - case CMD_FLUSH_QUEUE: - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1, - FLUSH_DESC_ADDR_31_0, paddr_lo); - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, - FLUSH_DESC_ADDR_39_32, paddr_hi); - break; - case CMD_FLUSH_CACHE: - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1, - FLUSH_ADDR_31_0, paddr_lo); - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, - FLUSH_ADDR_39_32, paddr_hi); - break; - case CMD_UPDATE_RX_REO_QUEUE: - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1, - RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); - break; - default: - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, - "%s: Invalid REO command type", __func__); - break; - } -} - -inline int hal_reo_cmd_queue_stats(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd) - -{ - uint32_t *reo_desc, val; - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - - hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); - reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); - if (!reo_desc) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, - "%s: Out of cmd ring entries", __func__); - hal_srng_access_end(hal_soc, hal_ring_hdl); - return -EBUSY; - } - - HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E, - sizeof(struct reo_get_queue_stats)); - - /* Offsets of descriptor fields defined in HW headers start from - * the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), - sizeof(struct reo_get_queue_stats) - - (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); - - HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, - REO_STATUS_REQUIRED, cmd->std.need_status); - - hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS, - cmd->std.addr_lo, - cmd->std.addr_hi); - - HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS, - cmd->u.stats_params.clear); - - if (hif_pm_runtime_get(hal_soc->hif_handle, - RTPM_ID_HAL_REO_CMD, true) == 0) { - hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); - hif_pm_runtime_put(hal_soc->hif_handle, - RTPM_ID_HAL_REO_CMD); - } else { - hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); - hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); - hal_srng_inc_flush_cnt(hal_ring_hdl); - } - - val = reo_desc[CMD_HEADER_DW_OFFSET]; - return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, - val); -} -qdf_export_symbol(hal_reo_cmd_queue_stats); - -inline int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd) -{ - uint32_t *reo_desc, val; - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - - hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); - reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); - if (!reo_desc) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, - "%s: Out of cmd ring entries", __func__); - hal_srng_access_end(hal_soc, hal_ring_hdl); - return -EBUSY; - } - - HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E, - sizeof(struct reo_flush_queue)); - - /* Offsets of descriptor fields defined in HW headers start from - * the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), - sizeof(struct reo_flush_queue) - - (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); - - HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, - REO_STATUS_REQUIRED, cmd->std.need_status); - - hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo, - cmd->std.addr_hi); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, - BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH, - cmd->u.fl_queue_params.block_use_after_flush); - - if (cmd->u.fl_queue_params.block_use_after_flush) { - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, - BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index); - } - - hal_srng_access_end(hal_soc, hal_ring_hdl); - val = reo_desc[CMD_HEADER_DW_OFFSET]; - return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, - val); -} -qdf_export_symbol(hal_reo_cmd_flush_queue); - -inline int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd) -{ - uint32_t *reo_desc, val; - struct hal_reo_cmd_flush_cache_params *cp; - uint8_t index = 0; - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - - cp = &cmd->u.fl_cache_params; - - hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); - - /* We need a cache block resource for this operation, and REO HW has - * only 4 such blocking resources. These resources are managed using - * reo_res_bitmap, and we return failure if none is available. - */ - if (cp->block_use_after_flush) { - index = hal_find_zero_bit(hal_soc->reo_res_bitmap); - if (index > 3) { - qdf_print("No blocking resource available!"); - hal_srng_access_end(hal_soc, hal_ring_hdl); - return -EBUSY; - } - hal_soc->index = index; - } - - reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); - if (!reo_desc) { - hal_srng_access_end(hal_soc, hal_ring_hdl); - hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl)); - return -EBUSY; - } - - HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E, - sizeof(struct reo_flush_cache)); - - /* Offsets of descriptor fields defined in HW headers start from - * the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), - sizeof(struct reo_flush_cache) - - (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); - - HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, - REO_STATUS_REQUIRED, cmd->std.need_status); - - hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo, - cmd->std.addr_hi); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, - FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue); - - /* set it to 0 for now */ - cp->rel_block_index = 0; - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, - RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index); - - if (cp->block_use_after_flush) { - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, - CACHE_BLOCK_RESOURCE_INDEX, index); - } - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, - FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, - BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE, - cp->flush_entire_cache); - - if (hif_pm_runtime_get(hal_soc->hif_handle, - RTPM_ID_HAL_REO_CMD, true) == 0) { - hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); - hif_pm_runtime_put(hal_soc->hif_handle, - RTPM_ID_HAL_REO_CMD); - } else { - hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); - hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); - hal_srng_inc_flush_cnt(hal_ring_hdl); - } - - val = reo_desc[CMD_HEADER_DW_OFFSET]; - return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, - val); -} -qdf_export_symbol(hal_reo_cmd_flush_cache); - -inline int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd) - -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t *reo_desc, val; - uint8_t index = 0; - - hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); - - if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { - index = hal_find_one_bit(hal_soc->reo_res_bitmap); - if (index > 3) { - hal_srng_access_end(hal_soc, hal_ring_hdl); - qdf_print("No blocking resource to unblock!"); - return -EBUSY; - } - } - - reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); - if (!reo_desc) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, - "%s: Out of cmd ring entries", __func__); - hal_srng_access_end(hal_soc, hal_ring_hdl); - return -EBUSY; - } - - HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E, - sizeof(struct reo_unblock_cache)); - - /* Offsets of descriptor fields defined in HW headers start from - * the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), - sizeof(struct reo_unblock_cache) - - (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); - - HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, - REO_STATUS_REQUIRED, cmd->std.need_status); - - HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, - UNBLOCK_TYPE, cmd->u.unblk_cache_params.type); - - if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { - HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, - CACHE_BLOCK_RESOURCE_INDEX, - cmd->u.unblk_cache_params.index); - } - - hal_srng_access_end(hal_soc, hal_ring_hdl); - val = reo_desc[CMD_HEADER_DW_OFFSET]; - return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, - val); -} -qdf_export_symbol(hal_reo_cmd_unblock_cache); - -inline int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t *reo_desc, val; - - hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); - reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); - if (!reo_desc) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, - "%s: Out of cmd ring entries", __func__); - hal_srng_access_end(hal_soc, hal_ring_hdl); - return -EBUSY; - } - - HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E, - sizeof(struct reo_flush_timeout_list)); - - /* Offsets of descriptor fields defined in HW headers start from - * the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), - sizeof(struct reo_flush_timeout_list) - - (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); - - HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, - REO_STATUS_REQUIRED, cmd->std.need_status); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST, - cmd->u.fl_tim_list_params.ac_list); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, - MINIMUM_RELEASE_DESC_COUNT, - cmd->u.fl_tim_list_params.min_rel_desc); - - HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, - MINIMUM_FORWARD_BUF_COUNT, - cmd->u.fl_tim_list_params.min_fwd_buf); - - hal_srng_access_end(hal_soc, hal_ring_hdl); - val = reo_desc[CMD_HEADER_DW_OFFSET]; - return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, - val); -} -qdf_export_symbol(hal_reo_cmd_flush_timeout_list); - -inline int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t *reo_desc, val; - struct hal_reo_cmd_update_queue_params *p; - - p = &cmd->u.upd_queue_params; - - hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); - reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); - if (!reo_desc) { - QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, - "%s: Out of cmd ring entries", __func__); - hal_srng_access_end(hal_soc, hal_ring_hdl); - return -EBUSY; - } - - HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E, - sizeof(struct reo_update_rx_reo_queue)); - - /* Offsets of descriptor fields defined in HW headers start from - * the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), - sizeof(struct reo_update_rx_reo_queue) - - (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); - - HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, - REO_STATUS_REQUIRED, cmd->std.need_status); - - hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE, - cmd->std.addr_lo, cmd->std.addr_hi); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD, - p->update_vld); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER, - p->update_assoc_link_desc); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_DISABLE_DUPLICATE_DETECTION, - p->update_disable_dup_detect); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_DISABLE_DUPLICATE_DETECTION, - p->update_disable_dup_detect); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_SOFT_REORDER_ENABLE, - p->update_soft_reorder_enab); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_AC, p->update_ac); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_BAR, p->update_bar); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_BAR, p->update_bar); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_RTY, p->update_rty); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_CHK_2K_MODE, p->update_chk_2k_mode); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_OOR_MODE, p->update_oor_mode); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN_SIZE, p->update_pn_size); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_SVLD, p->update_svld); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_SSN, p->update_ssn); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_SEQ_2K_ERROR_DETECTED_FLAG, - p->update_seq_2k_err_detect); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN_VALID, p->update_pn_valid); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, - UPDATE_PN, p->update_pn); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - RECEIVE_QUEUE_NUMBER, p->rx_queue_num); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - VLD, p->vld); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - ASSOCIATED_LINK_DESCRIPTOR_COUNTER, - p->assoc_link_desc); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - SOFT_REORDER_ENABLE, p->soft_reorder_enab); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - BAR, p->bar); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - CHK_2K_MODE, p->chk_2k_mode); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - RTY, p->rty); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - OOR_MODE, p->oor_mode); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - PN_CHECK_NEEDED, p->pn_check_needed); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - PN_SHALL_BE_EVEN, p->pn_even); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - PN_SHALL_BE_UNEVEN, p->pn_uneven); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - PN_HANDLING_ENABLE, p->pn_hand_enab); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, - IGNORE_AMPDU_FLAG, p->ignore_ampdu); - - if (p->ba_window_size < 1) - p->ba_window_size = 1; - /* - * WAR to get 2k exception in Non BA case. - * Setting window size to 2 to get 2k jump exception - * when we receive aggregates in Non BA case - */ - if (p->ba_window_size == 1) - p->ba_window_size++; - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, - BA_WINDOW_SIZE, p->ba_window_size - 1); - - if (p->pn_size == 24) - p->pn_size = PN_SIZE_24; - else if (p->pn_size == 48) - p->pn_size = PN_SIZE_48; - else if (p->pn_size == 128) - p->pn_size = PN_SIZE_128; - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, - PN_SIZE, p->pn_size); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, - SVLD, p->svld); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, - SSN, p->ssn); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, - SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, - PN_ERROR_DETECTED_FLAG, p->pn_err_detect); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5, - PN_31_0, p->pn_31_0); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6, - PN_63_32, p->pn_63_32); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7, - PN_95_64, p->pn_95_64); - - HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8, - PN_127_96, p->pn_127_96); - - if (hif_pm_runtime_get(hal_soc->hif_handle, - RTPM_ID_HAL_REO_CMD, false) == 0) { - hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); - hif_pm_runtime_put(hal_soc->hif_handle, - RTPM_ID_HAL_REO_CMD); - } else { - hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); - hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); - hal_srng_inc_flush_cnt(hal_ring_hdl); - } - - val = reo_desc[CMD_HEADER_DW_OFFSET]; - return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, - val); -} -qdf_export_symbol(hal_reo_cmd_update_rx_queue); - -inline void -hal_reo_queue_stats_status(uint32_t *reo_desc, - struct hal_reo_queue_status *st, - hal_soc_handle_t hal_soc_hdl) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t val; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV, - &(st->header), hal_soc); - - /* SSN */ - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)]; - st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val); - - /* current index */ - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, - CURRENT_INDEX)]; - st->curr_idx = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, - CURRENT_INDEX, val); - - /* PN bits */ - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3, - PN_31_0)]; - st->pn_31_0 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3, - PN_31_0, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4, - PN_63_32)]; - st->pn_63_32 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4, - PN_63_32, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5, - PN_95_64)]; - st->pn_95_64 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5, - PN_95_64, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6, - PN_127_96)]; - st->pn_127_96 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6, - PN_127_96, val); - - /* timestamps */ - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7, - LAST_RX_ENQUEUE_TIMESTAMP)]; - st->last_rx_enq_tstamp = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7, - LAST_RX_ENQUEUE_TIMESTAMP, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8, - LAST_RX_DEQUEUE_TIMESTAMP)]; - st->last_rx_deq_tstamp = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8, - LAST_RX_DEQUEUE_TIMESTAMP, val); - - /* rx bitmap */ - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9, - RX_BITMAP_31_0)]; - st->rx_bitmap_31_0 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9, - RX_BITMAP_31_0, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10, - RX_BITMAP_63_32)]; - st->rx_bitmap_63_32 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10, - RX_BITMAP_63_32, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11, - RX_BITMAP_95_64)]; - st->rx_bitmap_95_64 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11, - RX_BITMAP_95_64, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12, - RX_BITMAP_127_96)]; - st->rx_bitmap_127_96 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12, - RX_BITMAP_127_96, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13, - RX_BITMAP_159_128)]; - st->rx_bitmap_159_128 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13, - RX_BITMAP_159_128, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14, - RX_BITMAP_191_160)]; - st->rx_bitmap_191_160 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14, - RX_BITMAP_191_160, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15, - RX_BITMAP_223_192)]; - st->rx_bitmap_223_192 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15, - RX_BITMAP_223_192, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16, - RX_BITMAP_255_224)]; - st->rx_bitmap_255_224 = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16, - RX_BITMAP_255_224, val); - - /* various counts */ - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, - CURRENT_MPDU_COUNT)]; - st->curr_mpdu_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, - CURRENT_MPDU_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, - CURRENT_MSDU_COUNT)]; - st->curr_msdu_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, - CURRENT_MSDU_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, - TIMEOUT_COUNT)]; - st->fwd_timeout_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, - TIMEOUT_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, - FORWARD_DUE_TO_BAR_COUNT)]; - st->fwd_bar_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, - FORWARD_DUE_TO_BAR_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, - DUPLICATE_COUNT)]; - st->dup_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, - DUPLICATE_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, - FRAMES_IN_ORDER_COUNT)]; - st->frms_in_order_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, - FRAMES_IN_ORDER_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, - BAR_RECEIVED_COUNT)]; - st->bar_rcvd_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, - BAR_RECEIVED_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20, - MPDU_FRAMES_PROCESSED_COUNT)]; - st->mpdu_frms_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20, - MPDU_FRAMES_PROCESSED_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21, - MSDU_FRAMES_PROCESSED_COUNT)]; - st->msdu_frms_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21, - MSDU_FRAMES_PROCESSED_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22, - TOTAL_PROCESSED_BYTE_COUNT)]; - st->total_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22, - TOTAL_PROCESSED_BYTE_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, - LATE_RECEIVE_MPDU_COUNT)]; - st->late_recv_mpdu_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, - LATE_RECEIVE_MPDU_COUNT, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, - WINDOW_JUMP_2K)]; - st->win_jump_2k = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, - WINDOW_JUMP_2K, val); - - val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, - HOLE_COUNT)]; - st->hole_cnt = - HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, - HOLE_COUNT, val); -} -qdf_export_symbol(hal_reo_queue_stats_status); - -inline void -hal_reo_flush_queue_status(uint32_t *reo_desc, - struct hal_reo_flush_queue_status *st, - hal_soc_handle_t hal_soc_hdl) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t val; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV, - &(st->header), hal_soc); - - /* error bit */ - val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2, - ERROR_DETECTED)]; - st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, - val); -} -qdf_export_symbol(hal_reo_flush_queue_status); - -inline void -hal_reo_flush_cache_status(uint32_t *reo_desc, - struct hal_reo_flush_cache_status *st, - hal_soc_handle_t hal_soc_hdl) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t val; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV, - &(st->header), hal_soc); - - /* error bit */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, - ERROR_DETECTED)]; - st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, - val); - - /* block error */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, - BLOCK_ERROR_DETAILS)]; - st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, - BLOCK_ERROR_DETAILS, - val); - if (!st->block_error) - qdf_set_bit(hal_soc->index, - (unsigned long *)&hal_soc->reo_res_bitmap); - - /* cache flush status */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, - CACHE_CONTROLLER_FLUSH_STATUS_HIT)]; - st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, - CACHE_CONTROLLER_FLUSH_STATUS_HIT, - val); - - /* cache flush descriptor type */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, - CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)]; - st->cache_flush_status_desc_type = - HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, - CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE, - val); - - /* cache flush count */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, - CACHE_CONTROLLER_FLUSH_COUNT)]; - st->cache_flush_cnt = - HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, - CACHE_CONTROLLER_FLUSH_COUNT, - val); - -} -qdf_export_symbol(hal_reo_flush_cache_status); - -inline void hal_reo_unblock_cache_status(uint32_t *reo_desc, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_unblk_cache_status *st) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t val; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV, - &st->header, hal_soc); - - /* error bit */ - val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, - ERROR_DETECTED)]; - st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, - ERROR_DETECTED, - val); - - /* unblock type */ - val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, - UNBLOCK_TYPE)]; - st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, - UNBLOCK_TYPE, - val); - - if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX)) - qdf_clear_bit(hal_soc->index, - (unsigned long *)&hal_soc->reo_res_bitmap); -} -qdf_export_symbol(hal_reo_unblock_cache_status); - -inline void hal_reo_flush_timeout_list_status( - uint32_t *reo_desc, - struct hal_reo_flush_timeout_list_status *st, - hal_soc_handle_t hal_soc_hdl) - -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t val; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV, - &(st->header), hal_soc); - - /* error bit */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, - ERROR_DETECTED)]; - st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, - ERROR_DETECTED, - val); - - /* list empty */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, - TIMOUT_LIST_EMPTY)]; - st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, - TIMOUT_LIST_EMPTY, - val); - - /* release descriptor count */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, - RELEASE_DESC_COUNT)]; - st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, - RELEASE_DESC_COUNT, - val); - - /* forward buf count */ - val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, - FORWARD_BUF_COUNT)]; - st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, - FORWARD_BUF_COUNT, - val); -} -qdf_export_symbol(hal_reo_flush_timeout_list_status); - -inline void hal_reo_desc_thres_reached_status( - uint32_t *reo_desc, - struct hal_reo_desc_thres_reached_status *st, - hal_soc_handle_t hal_soc_hdl) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - uint32_t val; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, - HAL_REO_DESC_THRES_STATUS_TLV, - &(st->header), hal_soc); - - /* threshold index */ - val = reo_desc[HAL_OFFSET_DW( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, - THRESHOLD_INDEX)]; - st->thres_index = HAL_GET_FIELD( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, - THRESHOLD_INDEX, - val); - - /* link desc counters */ - val = reo_desc[HAL_OFFSET_DW( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, - LINK_DESCRIPTOR_COUNTER0)]; - st->link_desc_counter0 = HAL_GET_FIELD( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, - LINK_DESCRIPTOR_COUNTER0, - val); - - val = reo_desc[HAL_OFFSET_DW( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, - LINK_DESCRIPTOR_COUNTER1)]; - st->link_desc_counter1 = HAL_GET_FIELD( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, - LINK_DESCRIPTOR_COUNTER1, - val); - - val = reo_desc[HAL_OFFSET_DW( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, - LINK_DESCRIPTOR_COUNTER2)]; - st->link_desc_counter2 = HAL_GET_FIELD( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, - LINK_DESCRIPTOR_COUNTER2, - val); - - val = reo_desc[HAL_OFFSET_DW( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, - LINK_DESCRIPTOR_COUNTER_SUM)]; - st->link_desc_counter_sum = HAL_GET_FIELD( - REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, - LINK_DESCRIPTOR_COUNTER_SUM, - val); -} -qdf_export_symbol(hal_reo_desc_thres_reached_status); - -inline void -hal_reo_rx_update_queue_status(uint32_t *reo_desc, - struct hal_reo_update_rx_queue_status *st, - hal_soc_handle_t hal_soc_hdl) -{ - struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; - - /* Offsets of descriptor fields defined in HW headers start - * from the field after TLV header */ - reo_desc += (sizeof(struct tlv_32_hdr) >> 2); - - /* header */ - hal_reo_status_get_header(reo_desc, - HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV, - &(st->header), hal_soc); -} -qdf_export_symbol(hal_reo_rx_update_queue_status); - -/** - * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG - * with command number - * @hal_soc: Handle to HAL SoC structure - * @hal_ring: Handle to HAL SRNG structure - * - * Return: none - */ -inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl, - hal_ring_handle_t hal_ring_hdl) +void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl) { int cmd_num; uint32_t *desc_addr; @@ -1391,15 +36,15 @@ inline void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl, hal_get_srng_params(hal_soc_hdl, hal_ring_hdl, &srng_params); desc_addr = (uint32_t *)(srng_params.ring_base_vaddr); - desc_addr += (sizeof(struct tlv_32_hdr) >> 2); - desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2; + desc_addr += HAL_GET_NUM_DWORDS(sizeof(struct tlv_32_hdr)); + desc_size = HAL_GET_NUM_DWORDS(hal_srng_get_entrysize(soc, REO_CMD)); num_desc = srng_params.num_entries; cmd_num = 1; while (num_desc) { /* Offsets of descriptor fields defined in HW headers start * from the field after TLV header */ - HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0, - REO_CMD_NUMBER, cmd_num); + HAL_DESC_SET_FIELD(desc_addr, HAL_UNIFORM_REO_CMD_HEADER, + REO_CMD_NUMBER, cmd_num); desc_addr += desc_size; num_desc--; cmd_num++; } diff --git a/hal/wifi3.0/hal_reo.h b/hal/wifi3.0/hal_reo.h index 092e41b8dd..566911d65f 100644 --- a/hal/wifi3.0/hal_reo.h +++ b/hal/wifi3.0/hal_reo.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the @@ -38,6 +38,7 @@ /* SW headers */ #include "hal_api.h" +#include "hal_rx_hw_defines.h" /*--------------------------------------------------------------------------- Preprocessor definitions and constants @@ -77,9 +78,44 @@ #define HAL_GET_TLV(desc) (((struct tlv_32_hdr *) desc)->tlv_tag) #define HAL_OFFSET_DW(_block, _field) (HAL_OFFSET(_block, _field) >> 2) +#define HAL_OFFSET_QW(_block, _field) (HAL_OFFSET(_block, _field) >> 3) /* dword offsets in REO cmd TLV */ #define CMD_HEADER_DW_OFFSET 0 +/* TODO: See if the following definition is available in HW headers */ +#define HAL_REO_OWNED 4 +#define HAL_REO_QUEUE_DESC 8 + +/* TODO: Using associated link desc counter 1 for Rx. Check with FW on + * how these counters are assigned + */ +#define HAL_RX_LINK_DESC_CNTR 1 +/* TODO: Following definition should be from HW headers */ +#define HAL_DESC_REO_OWNED 4 + +#ifndef TID_TO_WME_AC +/** + * enum hal_wme_access_category: Access category enums + * @WME_AC_BE: best effort + * @WME_AC_BK: background + * @WME_AC_VI: video + * @WME_AC_VO: voice + */ +enum hal_wme_access_category { + WME_AC_BE, + WME_AC_BK, + WME_AC_VI, + WME_AC_VO +}; + +#define TID_TO_WME_AC(_tid) ( \ + (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) +#endif +#define HAL_NON_QOS_TID 16 + /** * enum reo_unblock_cache_type: Enum for unblock type in REO unblock command * @UNBLOCK_RES_INDEX: Unblock a block resource @@ -122,25 +158,6 @@ enum reo_cmd_exec_status { HAL_REO_CMD_DRAIN = 0xff }; -/** - * enum hal_reo_cmd_type: Enum for REO command type - * @CMD_GET_QUEUE_STATS: Get REO queue status/stats - * @CMD_FLUSH_QUEUE: Flush all frames in REO queue - * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache - * @CMD_UNBLOCK_CACHE: Unblock a descriptor’s address that was blocked - * earlier with a ‘REO_FLUSH_CACHE’ command - * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list - * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings - */ -enum hal_reo_cmd_type { - CMD_GET_QUEUE_STATS = 0, - CMD_FLUSH_QUEUE = 1, - CMD_FLUSH_CACHE = 2, - CMD_UNBLOCK_CACHE = 3, - CMD_FLUSH_TIMEOUT_LIST = 4, - CMD_UPDATE_RX_REO_QUEUE = 5 -}; - /** * struct hal_reo_cmd_params_std: Standard REO command parameters * @need_status: Status required for the command @@ -490,57 +507,317 @@ union hal_reo_status { struct hal_reo_update_rx_queue_status rx_queue_status; }; -/* Prototypes */ +#ifdef HAL_DISABLE_NON_BA_2K_JUMP_ERROR +static inline uint32_t hal_update_non_ba_win_size(int tid, + uint32_t ba_window_size) +{ + return ba_window_size; +} +#else +static inline uint32_t hal_update_non_ba_win_size(int tid, + uint32_t ba_window_size) +{ + if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID)) + ba_window_size++; + + return ba_window_size; +} +#endif + +#define BLOCK_RES_MASK 0xF +static inline uint8_t hal_find_one_bit(uint8_t x) +{ + uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK; + uint8_t pos; + + for (pos = 0; y; y >>= 1) + pos++; + + return pos-1; +} + +static inline uint8_t hal_find_zero_bit(uint8_t x) +{ + uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK; + uint8_t pos; + + for (pos = 0; y; y >>= 1) + pos++; + + return pos-1; +} + /* REO command ring routines */ -void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc, - enum hal_reo_cmd_type type, - uint32_t paddr_lo, - uint8_t paddr_hi); -int hal_reo_cmd_queue_stats(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd); -int hal_reo_cmd_flush_queue(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd); -int hal_reo_cmd_flush_cache(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd); -int hal_reo_cmd_unblock_cache(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd); -int hal_reo_cmd_flush_timeout_list(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd); -int hal_reo_cmd_update_rx_queue(hal_ring_handle_t hal_ring_hdl, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_cmd_params *cmd); -/* REO status ring routines */ -void hal_reo_queue_stats_status(uint32_t *reo_desc, - struct hal_reo_queue_status *st, - hal_soc_handle_t hal_soc_hdl); -void hal_reo_flush_queue_status(uint32_t *reo_desc, - struct hal_reo_flush_queue_status *st, - hal_soc_handle_t hal_soc_hdl); -void hal_reo_flush_cache_status(uint32_t *reo_desc, - struct hal_reo_flush_cache_status *st, - hal_soc_handle_t hal_soc_hdl); -void hal_reo_unblock_cache_status(uint32_t *reo_desc, - hal_soc_handle_t hal_soc_hdl, - struct hal_reo_unblk_cache_status *st); -void hal_reo_flush_timeout_list_status( - uint32_t *reo_desc, - struct hal_reo_flush_timeout_list_status *st, - hal_soc_handle_t hal_soc_hdl); -void hal_reo_desc_thres_reached_status( - uint32_t *reo_desc, - struct hal_reo_desc_thres_reached_status *st, - hal_soc_handle_t hal_soc_hdl); -void hal_reo_rx_update_queue_status(uint32_t *reo_desc, - struct hal_reo_update_rx_queue_status *st, - hal_soc_handle_t hal_soc_hdl); +/** + * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro + * @owner - owner info + * @buffer_type - buffer type + */ +static inline void +hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner, uint32_t buffer_type) +{ + HAL_DESC_SET_FIELD(desc, HAL_UNIFORM_DESCRIPTOR_HEADER, OWNER, + owner); + HAL_DESC_SET_FIELD(desc, HAL_UNIFORM_DESCRIPTOR_HEADER, BUFFER_TYPE, + buffer_type); +} +/** + * hal_reo_send_cmd() - Send reo cmd using the params provided. + * @hal_soc_hdl: HAL soc handle + * @hal_soc_hdl: srng handle + * @cmd: cmd ID + * @cmd_params: command params + * + * Return: cmd number + */ +static inline int +hal_reo_send_cmd(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + enum hal_reo_cmd_type cmd, + struct hal_reo_cmd_params *cmd_params) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return -EINVAL; + } + + if (hal_soc->ops->hal_reo_send_cmd) + return hal_soc->ops->hal_reo_send_cmd(hal_soc_hdl, hal_ring_hdl, + cmd, cmd_params); + + return -EINVAL; +} + +/* REO Status ring routines */ +static inline void +hal_reo_queue_stats_status(hal_ring_desc_t ring_desc, + struct hal_reo_queue_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_queue_stats_status) + return hal_soc->ops->hal_reo_queue_stats_status(ring_desc, st, + hal_soc_hdl); +} + +static inline void +hal_reo_flush_queue_status(hal_ring_desc_t ring_desc, + struct hal_reo_flush_queue_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_flush_queue_status) + return hal_soc->ops->hal_reo_flush_queue_status(ring_desc, st, + hal_soc_hdl); +} + +static inline void +hal_reo_flush_cache_status(hal_ring_desc_t ring_desc, + struct hal_reo_flush_cache_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_flush_cache_status) + return hal_soc->ops->hal_reo_flush_cache_status(ring_desc, st, + hal_soc_hdl); +} + +static inline void +hal_reo_unblock_cache_status(hal_ring_desc_t ring_desc, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_unblk_cache_status *st) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_unblock_cache_status) + return hal_soc->ops->hal_reo_unblock_cache_status(ring_desc, + hal_soc_hdl, + st); +} + +static inline void hal_reo_flush_timeout_list_status( + hal_ring_desc_t ring_desc, + struct hal_reo_flush_timeout_list_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_flush_timeout_list_status) + return hal_soc->ops->hal_reo_flush_timeout_list_status( + ring_desc, st, + hal_soc_hdl); +} + +static inline void hal_reo_desc_thres_reached_status( + hal_ring_desc_t ring_desc, + struct hal_reo_desc_thres_reached_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_desc_thres_reached_status) + return hal_soc->ops->hal_reo_desc_thres_reached_status( + ring_desc, st, + hal_soc_hdl); +} + +static inline void +hal_reo_rx_update_queue_status(hal_ring_desc_t ring_desc, + struct hal_reo_update_rx_queue_status *st, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_rx_update_queue_status) + return hal_soc->ops->hal_reo_rx_update_queue_status( + ring_desc, st, hal_soc_hdl); +} + +static inline void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl, int tid, + uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, + qdf_dma_addr_t hw_qdesc_paddr, + int pn_type) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return; + } + + if (hal_soc->ops->hal_reo_qdesc_setup) + hal_soc->ops->hal_reo_qdesc_setup(hal_soc_hdl, tid, + ba_window_size, start_seq, + hw_qdesc_vaddr, + hw_qdesc_paddr, pn_type); +} + +/** + * hal_get_ba_aging_timeout - Retrieve BA aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * @value: timeout duration in millisec + */ +static inline void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, + uint8_t ac, + uint32_t *value) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_get_ba_aging_timeout(hal_soc_hdl, ac, value); +} + +/** + * hal_set_aging_timeout - Set BA aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category in millisec + * @value: timeout duration value + */ +static inline void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, + uint8_t ac, + uint32_t value) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_soc->ops->hal_set_ba_aging_timeout(hal_soc_hdl, ac, value); +} + +/** + * hal_get_reo_reg_base_offset() - Get REO register base offset + * @hal_soc_hdl: HAL soc handle + * + * Return: REO register base + */ +static inline uint32_t hal_get_reo_reg_base_offset(hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + return hal_soc->ops->hal_get_reo_reg_base_offset(); +} + +static inline uint32_t +hal_gen_reo_remap_val(hal_soc_handle_t hal_soc_hdl, + enum hal_reo_remap_reg remap_reg, + uint8_t *ix0_map) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + if (!hal_soc || !hal_soc->ops) { + hal_err("hal handle is NULL"); + QDF_BUG(0); + return 0; + } + + if (hal_soc->ops->hal_gen_reo_remap_val) + return hal_soc->ops->hal_gen_reo_remap_val(remap_reg, ix0_map); + + return 0; +} + +/* Function Proto-types */ + +/** + * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG + * with command number + * @hal_soc: Handle to HAL SoC structure + * @hal_ring: Handle to HAL SRNG structure + * + * Return: none + */ void hal_reo_init_cmd_ring(hal_soc_handle_t hal_soc_hdl, hal_ring_handle_t hal_ring_hdl); - #endif /* _HAL_REO_H */ diff --git a/hal/wifi3.0/li/hal_li_reo.c b/hal/wifi3.0/li/hal_li_reo.c new file mode 100644 index 0000000000..ccaec5bad6 --- /dev/null +++ b/hal/wifi3.0/li/hal_li_reo.c @@ -0,0 +1,1350 @@ +/* + * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#include "qdf_module.h" +#include "hal_li_hw_headers.h" +#include "hal_reo.h" +#include "hal_li_reo.h" +#include "hal_li_api.h" + +uint32_t hal_get_reo_reg_base_offset_li(void) +{ + return SEQ_WCSS_UMAC_REO_REG_OFFSET; +} + +/** + * hal_reo_qdesc_setup - Setup HW REO queue descriptor + * + * @hal_soc: Opaque HAL SOC handle + * @ba_window_size: BlockAck window size + * @start_seq: Starting sequence number + * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory + * @hw_qdesc_paddr: Physical address of REO queue descriptor memory + * @tid: TID + * + */ +void hal_reo_qdesc_setup_li(hal_soc_handle_t hal_soc_hdl, int tid, + uint32_t ba_window_size, + uint32_t start_seq, void *hw_qdesc_vaddr, + qdf_dma_addr_t hw_qdesc_paddr, + int pn_type) +{ + uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr; + uint32_t *reo_queue_ext_desc; + uint32_t reg_val; + uint32_t pn_enable; + uint32_t pn_size = 0; + + qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue)); + + hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0, + RESERVED_0A, 0xDDBEEF); + + /* This a just a SW meta data and will be copied to REO destination + * descriptors indicated by hardware. + * TODO: Setting TID in this field. See if we should set something else. + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1, + RECEIVE_QUEUE_NUMBER, tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + VLD, 1); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + HAL_RX_LINK_DESC_CNTR); + + /* + * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0 + */ + + reg_val = TID_TO_WME_AC(tid); + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val); + + if (ba_window_size < 1) + ba_window_size = 1; + + /* WAR to get 2k exception in Non BA case. + * Setting window size to 2 to get 2k jump exception + * when we receive aggregates in Non BA case + */ + ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size); + + /* Set RTY bit for non-BA case. Duplicate detection is currently not + * done by HW in non-BA case if RTY bit is not set. + * TODO: This is a temporary War and should be removed once HW fix is + * made to check and discard duplicates even if RTY bit is not set. + */ + if (ba_window_size == 1) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1); + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE, + ba_window_size - 1); + + switch (pn_type) { + case HAL_PN_WPA: + pn_enable = 1; + pn_size = PN_SIZE_48; + break; + case HAL_PN_WAPI_EVEN: + case HAL_PN_WAPI_UNEVEN: + pn_enable = 1; + pn_size = PN_SIZE_128; + break; + default: + pn_enable = 0; + break; + } + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED, + pn_enable); + + if (pn_type == HAL_PN_WAPI_EVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + PN_SHALL_BE_EVEN, 1); + else if (pn_type == HAL_PN_WAPI_UNEVEN) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + PN_SHALL_BE_UNEVEN, 1); + + /* + * TODO: Need to check if PN handling in SW needs to be enabled + * So far this is not a requirement + */ + + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE, + pn_size); + + /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set + * based on BA window size and/or AMPDU capabilities + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, + IGNORE_AMPDU_FLAG, 1); + + if (start_seq <= 0xfff) + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN, + start_seq); + + /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA, + * but REO is not delivering packets if we set it to 1. Need to enable + * this once the issue is resolved + */ + HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0); + + /* TODO: Check if we should set start PN for WAPI */ + + /* TODO: HW queue descriptors are currently allocated for max BA + * window size for all QOS TIDs so that same descriptor can be used + * later when ADDBA request is recevied. This should be changed to + * allocate HW queue descriptors based on BA window size being + * negotiated (0 for non BA cases), and reallocate when BA window + * size changes and also send WMI message to FW to change the REO + * queue descriptor in Rx peer entry as part of dp_rx_tid_update. + */ + if (tid == HAL_NON_QOS_TID) + return; + + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue *)reo_queue_desc) + 1); + qdf_mem_zero(reo_queue_ext_desc, 3 * + sizeof(struct rx_reo_queue_ext)); + /* Initialize first reo queue extension descriptor */ + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, + 0xADBEEF); + /* Initialize second reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, + 0xBDBEEF); + /* Initialize third reo queue extension descriptor */ + reo_queue_ext_desc = (uint32_t *) + (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1); + hal_uniform_desc_hdr_setup(reo_queue_ext_desc, + HAL_DESC_REO_OWNED, + HAL_REO_QUEUE_EXT_DESC); + /* Fixed pattern in reserved bits for debugging */ + HAL_DESC_SET_FIELD(reo_queue_ext_desc, + UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, + 0xCDBEEF); +} +qdf_export_symbol(hal_reo_qdesc_setup); + +/** + * hal_get_ba_aging_timeout_li - Get BA Aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * @value: window size to get + */ +void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t *value) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + switch (ac) { + case WME_AC_BE: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + case WME_AC_BK: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + case WME_AC_VI: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + case WME_AC_VO: + *value = HAL_REG_READ(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000; + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid AC: %d\n", ac); + } +} +qdf_export_symbol(hal_get_ba_aging_timeout_li); + +/** + * hal_set_ba_aging_timeout_li - Set BA Aging timeout + * + * @hal_soc: Opaque HAL SOC handle + * @ac: Access category + * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice + * @value: Input value to set + */ +void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t value) +{ + struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl; + + switch (ac) { + case WME_AC_BE: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + case WME_AC_BK: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + case WME_AC_VI: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + case WME_AC_VO: + HAL_REG_WRITE(soc, + HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR( + SEQ_WCSS_UMAC_REO_REG_OFFSET), + value * 1000); + break; + default: + QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, + "Invalid AC: %d\n", ac); + } +} +qdf_export_symbol(hal_set_ba_aging_timeout_li); + +static inline void +hal_reo_cmd_set_descr_addr_li(uint32_t *reo_desc, enum hal_reo_cmd_type type, + uint32_t paddr_lo, uint8_t paddr_hi) +{ + switch (type) { + case CMD_GET_QUEUE_STATS: + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_QUEUE: + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1, + FLUSH_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + FLUSH_DESC_ADDR_39_32, paddr_hi); + break; + case CMD_FLUSH_CACHE: + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1, + FLUSH_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FLUSH_ADDR_39_32, paddr_hi); + break; + case CMD_UPDATE_RX_REO_QUEUE: + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1, + RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo); + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi); + break; + default: + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR, + "%s: Invalid REO command type", __func__); + break; + } +} + +static inline int +hal_reo_cmd_queue_stats_li(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E, + sizeof(struct reo_get_queue_stats)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_get_queue_stats) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_GET_QUEUE_STATS, + cmd->std.addr_lo, + cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS, + cmd->u.stats_params.clear); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD, false) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_flush_queue_li(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E, + sizeof(struct reo_flush_queue)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_queue) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_QUEUE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH, + cmd->u.fl_queue_params.block_use_after_flush); + + if (cmd->u.fl_queue_params.block_use_after_flush) { + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2, + BLOCK_RESOURCE_INDEX, + cmd->u.fl_queue_params.index); + } + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_flush_cache_li(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + uint32_t *reo_desc, val; + struct hal_reo_cmd_flush_cache_params *cp; + uint8_t index = 0; + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + + cp = &cmd->u.fl_cache_params; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + + /* We need a cache block resource for this operation, and REO HW has + * only 4 such blocking resources. These resources are managed using + * reo_res_bitmap, and we return failure if none is available. + */ + if (cp->block_use_after_flush) { + index = hal_find_zero_bit(hal_soc->reo_res_bitmap); + if (index > 3) { + qdf_print("No blocking resource available!"); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + hal_soc->index = index; + } + + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + hal_srng_access_end(hal_soc, hal_ring_hdl); + hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl)); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E, + sizeof(struct reo_flush_cache)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_cache) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_FLUSH_CACHE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue); + + /* set it to 0 for now */ + cp->rel_block_index = 0; + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index); + + if (cp->block_use_after_flush) { + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + CACHE_BLOCK_RESOURCE_INDEX, index); + } + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, + BLOCK_CACHE_USAGE_AFTER_FLUSH, + cp->block_use_after_flush); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE, + cp->flush_entire_cache); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD, false) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_unblock_cache_li(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) + +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + uint8_t index = 0; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + index = hal_find_one_bit(hal_soc->reo_res_bitmap); + if (index > 3) { + hal_srng_access_end(hal_soc, hal_ring_hdl); + qdf_print("No blocking resource to unblock!"); + return -EBUSY; + } + } + + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E, + sizeof(struct reo_unblock_cache)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_unblock_cache) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, + UNBLOCK_TYPE, cmd->u.unblk_cache_params.type); + + if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) { + HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1, + CACHE_BLOCK_RESOURCE_INDEX, + cmd->u.unblk_cache_params.index); + } + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_flush_timeout_list_li(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E, + sizeof(struct reo_flush_timeout_list)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_flush_timeout_list) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST, + cmd->u.fl_tim_list_params.ac_list); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, + MINIMUM_RELEASE_DESC_COUNT, + cmd->u.fl_tim_list_params.min_rel_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2, + MINIMUM_FORWARD_BUF_COUNT, + cmd->u.fl_tim_list_params.min_fwd_buf); + + hal_srng_access_end(hal_soc, hal_ring_hdl); + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} + +static inline int +hal_reo_cmd_update_rx_queue_li(hal_ring_handle_t hal_ring_hdl, + hal_soc_handle_t hal_soc_hdl, + struct hal_reo_cmd_params *cmd) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + uint32_t *reo_desc, val; + struct hal_reo_cmd_update_queue_params *p; + + p = &cmd->u.upd_queue_params; + + hal_srng_access_start(hal_soc_hdl, hal_ring_hdl); + reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl); + if (!reo_desc) { + QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, + "%s: Out of cmd ring entries", __func__); + hal_srng_access_end(hal_soc, hal_ring_hdl); + return -EBUSY; + } + + HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E, + sizeof(struct reo_update_rx_reo_queue)); + + /* + * Offsets of descriptor fields defined in HW headers start from + * the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER), + sizeof(struct reo_update_rx_reo_queue) - + (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2)); + + HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0, + REO_STATUS_REQUIRED, cmd->std.need_status); + + hal_reo_cmd_set_descr_addr_li(reo_desc, CMD_UPDATE_RX_REO_QUEUE, + cmd->std.addr_lo, cmd->std.addr_hi); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD, + p->update_vld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->update_assoc_link_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_DISABLE_DUPLICATE_DETECTION, + p->update_disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SOFT_REORDER_ENABLE, + p->update_soft_reorder_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_AC, p->update_ac); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BAR, p->update_bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BAR, p->update_bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_RTY, p->update_rty); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_CHK_2K_MODE, p->update_chk_2k_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_OOR_MODE, p->update_oor_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_SIZE, p->update_pn_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SVLD, p->update_svld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SSN, p->update_ssn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_SEQ_2K_ERROR_DETECTED_FLAG, + p->update_seq_2k_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN_VALID, p->update_pn_valid); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, + UPDATE_PN, p->update_pn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + RECEIVE_QUEUE_NUMBER, p->rx_queue_num); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + VLD, p->vld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + ASSOCIATED_LINK_DESCRIPTOR_COUNTER, + p->assoc_link_desc); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + SOFT_REORDER_ENABLE, p->soft_reorder_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + BAR, p->bar); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + CHK_2K_MODE, p->chk_2k_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + RTY, p->rty); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + OOR_MODE, p->oor_mode); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_CHECK_NEEDED, p->pn_check_needed); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_SHALL_BE_EVEN, p->pn_even); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_SHALL_BE_UNEVEN, p->pn_uneven); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + PN_HANDLING_ENABLE, p->pn_hand_enab); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, + IGNORE_AMPDU_FLAG, p->ignore_ampdu); + + if (p->ba_window_size < 1) + p->ba_window_size = 1; + /* + * WAR to get 2k exception in Non BA case. + * Setting window size to 2 to get 2k jump exception + * when we receive aggregates in Non BA case + */ + if (p->ba_window_size == 1) + p->ba_window_size++; + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + BA_WINDOW_SIZE, p->ba_window_size - 1); + + if (p->pn_size == 24) + p->pn_size = PN_SIZE_24; + else if (p->pn_size == 48) + p->pn_size = PN_SIZE_48; + else if (p->pn_size == 128) + p->pn_size = PN_SIZE_128; + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + PN_SIZE, p->pn_size); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SVLD, p->svld); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SSN, p->ssn); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4, + PN_ERROR_DETECTED_FLAG, p->pn_err_detect); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5, + PN_31_0, p->pn_31_0); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6, + PN_63_32, p->pn_63_32); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7, + PN_95_64, p->pn_95_64); + + HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8, + PN_127_96, p->pn_127_96); + + if (hif_pm_runtime_get(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD, false) == 0) { + hal_srng_access_end(hal_soc_hdl, hal_ring_hdl); + hif_pm_runtime_put(hal_soc->hif_handle, + RTPM_ID_HAL_REO_CMD); + } else { + hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl); + hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT); + hal_srng_inc_flush_cnt(hal_ring_hdl); + } + + val = reo_desc[CMD_HEADER_DW_OFFSET]; + return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER, + val); +} + +int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + enum hal_reo_cmd_type cmd, + void *params) +{ + struct hal_reo_cmd_params *cmd_params = + (struct hal_reo_cmd_params *)params; + int num = 0; + + switch (cmd) { + case CMD_GET_QUEUE_STATS: + num = hal_reo_cmd_queue_stats_li(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_FLUSH_QUEUE: + num = hal_reo_cmd_flush_queue_li(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_FLUSH_CACHE: + num = hal_reo_cmd_flush_cache_li(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_UNBLOCK_CACHE: + num = hal_reo_cmd_unblock_cache_li(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + case CMD_FLUSH_TIMEOUT_LIST: + num = hal_reo_cmd_flush_timeout_list_li(hal_ring_hdl, + hal_soc_hdl, + cmd_params); + break; + case CMD_UPDATE_RX_REO_QUEUE: + num = hal_reo_cmd_update_rx_queue_li(hal_ring_hdl, + hal_soc_hdl, cmd_params); + break; + default: + hal_err("Invalid REO command type: %d", cmd); + return -EINVAL; + }; + + return num; +} + +void +hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_queue_status *st = + (struct hal_reo_queue_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + uint32_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV, + &(st->header), hal_soc); + + /* SSN */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)]; + st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val); + + /* current index */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, + CURRENT_INDEX)]; + st->curr_idx = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, + CURRENT_INDEX, val); + + /* PN bits */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3, + PN_31_0)]; + st->pn_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3, + PN_31_0, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4, + PN_63_32)]; + st->pn_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4, + PN_63_32, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5, + PN_95_64)]; + st->pn_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5, + PN_95_64, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6, + PN_127_96)]; + st->pn_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6, + PN_127_96, val); + + /* timestamps */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7, + LAST_RX_ENQUEUE_TIMESTAMP)]; + st->last_rx_enq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7, + LAST_RX_ENQUEUE_TIMESTAMP, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8, + LAST_RX_DEQUEUE_TIMESTAMP)]; + st->last_rx_deq_tstamp = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8, + LAST_RX_DEQUEUE_TIMESTAMP, val); + + /* rx bitmap */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9, + RX_BITMAP_31_0)]; + st->rx_bitmap_31_0 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9, + RX_BITMAP_31_0, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10, + RX_BITMAP_63_32)]; + st->rx_bitmap_63_32 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10, + RX_BITMAP_63_32, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11, + RX_BITMAP_95_64)]; + st->rx_bitmap_95_64 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11, + RX_BITMAP_95_64, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12, + RX_BITMAP_127_96)]; + st->rx_bitmap_127_96 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12, + RX_BITMAP_127_96, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13, + RX_BITMAP_159_128)]; + st->rx_bitmap_159_128 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13, + RX_BITMAP_159_128, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14, + RX_BITMAP_191_160)]; + st->rx_bitmap_191_160 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14, + RX_BITMAP_191_160, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15, + RX_BITMAP_223_192)]; + st->rx_bitmap_223_192 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15, + RX_BITMAP_223_192, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16, + RX_BITMAP_255_224)]; + st->rx_bitmap_255_224 = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16, + RX_BITMAP_255_224, val); + + /* various counts */ + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MPDU_COUNT)]; + st->curr_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MSDU_COUNT)]; + st->curr_msdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17, + CURRENT_MSDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + TIMEOUT_COUNT)]; + st->fwd_timeout_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + TIMEOUT_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + FORWARD_DUE_TO_BAR_COUNT)]; + st->fwd_bar_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + FORWARD_DUE_TO_BAR_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18, + DUPLICATE_COUNT)]; + st->dup_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18, + DUPLICATE_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, + FRAMES_IN_ORDER_COUNT)]; + st->frms_in_order_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, + FRAMES_IN_ORDER_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19, + BAR_RECEIVED_COUNT)]; + st->bar_rcvd_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19, + BAR_RECEIVED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20, + MPDU_FRAMES_PROCESSED_COUNT)]; + st->mpdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20, + MPDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21, + MSDU_FRAMES_PROCESSED_COUNT)]; + st->msdu_frms_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21, + MSDU_FRAMES_PROCESSED_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22, + TOTAL_PROCESSED_BYTE_COUNT)]; + st->total_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22, + TOTAL_PROCESSED_BYTE_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + LATE_RECEIVE_MPDU_COUNT)]; + st->late_recv_mpdu_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + LATE_RECEIVE_MPDU_COUNT, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + WINDOW_JUMP_2K)]; + st->win_jump_2k = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + WINDOW_JUMP_2K, val); + + val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23, + HOLE_COUNT)]; + st->hole_cnt = + HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23, + HOLE_COUNT, val); +} + +void +hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_flush_queue_status *st = + (struct hal_reo_flush_queue_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + uint32_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, + val); +} + +void +hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_flush_cache_status *st = + (struct hal_reo_flush_cache_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + uint32_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED, + val); + + /* block error */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + BLOCK_ERROR_DETAILS)]; + st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + BLOCK_ERROR_DETAILS, + val); + if (!st->block_error) + qdf_set_bit(hal_soc->index, + (unsigned long *)&hal_soc->reo_res_bitmap); + + /* cache flush status */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_HIT)]; + st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_HIT, + val); + + /* cache flush descriptor type */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)]; + st->cache_flush_status_desc_type = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE, + val); + + /* cache flush count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_COUNT)]; + st->cache_flush_cnt = + HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2, + CACHE_CONTROLLER_FLUSH_COUNT, + val); +} + +void +hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc, + hal_soc_handle_t hal_soc_hdl, + void *st_handle) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_unblk_cache_status *st = + (struct hal_reo_unblk_cache_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + uint32_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV, + &st->header, hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, + ERROR_DETECTED, + val); + + /* unblock type */ + val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2, + UNBLOCK_TYPE)]; + st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2, + UNBLOCK_TYPE, + val); + + if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX)) + qdf_clear_bit(hal_soc->index, + (unsigned long *)&hal_soc->reo_res_bitmap); +} + +void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_flush_timeout_list_status *st = + (struct hal_reo_flush_timeout_list_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + uint32_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV, + &(st->header), hal_soc); + + /* error bit */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + ERROR_DETECTED)]; + st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + ERROR_DETECTED, + val); + + /* list empty */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + TIMOUT_LIST_EMPTY)]; + st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2, + TIMOUT_LIST_EMPTY, + val); + + /* release descriptor count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + RELEASE_DESC_COUNT)]; + st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + RELEASE_DESC_COUNT, + val); + + /* forward buf count */ + val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + FORWARD_BUF_COUNT)]; + st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3, + FORWARD_BUF_COUNT, + val); +} + +void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_desc_thres_reached_status *st = + (struct hal_reo_desc_thres_reached_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + uint32_t val; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, + HAL_REO_DESC_THRES_STATUS_TLV, + &(st->header), hal_soc); + + /* threshold index */ + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, + THRESHOLD_INDEX)]; + st->thres_index = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2, + THRESHOLD_INDEX, + val); + + /* link desc counters */ + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, + LINK_DESCRIPTOR_COUNTER0)]; + st->link_desc_counter0 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3, + LINK_DESCRIPTOR_COUNTER0, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, + LINK_DESCRIPTOR_COUNTER1)]; + st->link_desc_counter1 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4, + LINK_DESCRIPTOR_COUNTER1, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, + LINK_DESCRIPTOR_COUNTER2)]; + st->link_desc_counter2 = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5, + LINK_DESCRIPTOR_COUNTER2, + val); + + val = reo_desc[HAL_OFFSET_DW( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, + LINK_DESCRIPTOR_COUNTER_SUM)]; + st->link_desc_counter_sum = HAL_GET_FIELD( + REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6, + LINK_DESCRIPTOR_COUNTER_SUM, + val); +} + +void +hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl) +{ + struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl; + struct hal_reo_update_rx_queue_status *st = + (struct hal_reo_update_rx_queue_status *)st_handle; + uint32_t *reo_desc = (uint32_t *)ring_desc; + + /* + * Offsets of descriptor fields defined in HW headers start + * from the field after TLV header + */ + reo_desc += (sizeof(struct tlv_32_hdr) >> 2); + + /* header */ + hal_reo_status_get_header(ring_desc, + HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV, + &(st->header), hal_soc); +} diff --git a/hal/wifi3.0/li/hal_li_reo.h b/hal/wifi3.0/li/hal_li_reo.h new file mode 100644 index 0000000000..e968d8c7bb --- /dev/null +++ b/hal/wifi3.0/li/hal_li_reo.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2017-2019, 2021 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _HAL_LI_REO_H_ +#define _HAL_LI_REO_H_ + +#include "hal_rx.h" +#include "hal_reo.h" + +#define HAL_REO_QUEUE_EXT_DESC 9 + +/* Proto-types */ +void hal_get_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t *value); +void hal_set_ba_aging_timeout_li(hal_soc_handle_t hal_soc_hdl, uint8_t ac, + uint32_t value); +uint32_t hal_get_reo_reg_base_offset_li(void); + +int hal_reo_send_cmd_li(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl, + enum hal_reo_cmd_type cmd, + void *params); + +/* REO status ring routines */ +void +hal_reo_queue_stats_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_flush_queue_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_flush_cache_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_unblock_cache_status_li(hal_ring_desc_t ring_desc, + hal_soc_handle_t hal_soc_hdl, + void *st_handle); +void hal_reo_flush_timeout_list_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void hal_reo_desc_thres_reached_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); +void +hal_reo_rx_update_queue_status_li(hal_ring_desc_t ring_desc, + void *st_handle, + hal_soc_handle_t hal_soc_hdl); + +/** + * hal_reo_init_cmd_ring_li() - Initialize descriptors of REO command SRNG + * with command number + * @hal_soc: Handle to HAL SoC structure + * @hal_ring: Handle to HAL SRNG structure + * + * Return: none + */ +void hal_reo_init_cmd_ring_li(hal_soc_handle_t hal_soc_hdl, + hal_ring_handle_t hal_ring_hdl); + +#endif /* _HAL_LI_REO_H_ */