Prechádzať zdrojové kódy

qcacmn: REO command access APIs

Implementation of APIs for Lithium REO command/status rings

Change-Id: Ib428dd995bd597f7fbfbfc458dade6e7e258000c
Manoj Ekbote 8 rokov pred
rodič
commit
4f0c6b1732

+ 5 - 0
dp/wifi3.0/dp_main.c

@@ -833,6 +833,10 @@ static int dp_soc_cmn_setup(struct dp_soc *soc)
 		goto fail1;
 	}
 
+	hal_reo_init_cmd_ring(soc->hal_soc, soc->reo_cmd_ring.hal_srng);
+	TAILQ_INIT(&soc->rx.reo_cmd_list);
+	qdf_spinlock_create(&soc->rx.reo_cmd_lock);
+
 	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
 		REO_STATUS_RING_SIZE)) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1118,6 +1122,7 @@ static void dp_soc_detach_wifi3(void *txrx_soc)
 	/* REO command and status rings */
 	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
 	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
+	qdf_spinlock_destroy(&soc->rx.reo_cmd_lock);
 
 	qdf_spinlock_destroy(&soc->peer_ref_mutex);
 	htt_soc_detach(soc->htt_handle);

+ 169 - 0
dp/wifi3.0/dp_reo.c

@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "hal_reo.h"
+
+QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
+		     struct hal_reo_cmd_params *params,
+		     void (*callback_fn), void *data)
+{
+	struct dp_reo_cmd_info *reo_cmd;
+	int num;
+
+	switch (type) {
+	case CMD_GET_QUEUE_STATS:
+		num = hal_reo_cmd_queue_stats(soc->reo_cmd_ring.hal_srng,
+					      soc->hal_soc, params);
+		break;
+	case CMD_FLUSH_QUEUE:
+		num = hal_reo_cmd_flush_queue(soc->reo_cmd_ring.hal_srng,
+					      soc->hal_soc, params);
+		break;
+	case CMD_FLUSH_CACHE:
+		num = hal_reo_cmd_flush_cache(soc->reo_cmd_ring.hal_srng,
+					      soc->hal_soc, params);
+		break;
+	case CMD_UNBLOCK_CACHE:
+		num = hal_reo_cmd_unblock_cache(soc->reo_cmd_ring.hal_srng,
+						soc->hal_soc, params);
+		break;
+	case CMD_FLUSH_TIMEOUT_LIST:
+		num = hal_reo_cmd_flush_timeout_list(soc->reo_cmd_ring.hal_srng,
+						     soc->hal_soc, params);
+		break;
+	case CMD_UPDATE_RX_REO_QUEUE:
+		num = hal_reo_cmd_update_rx_queue(soc->reo_cmd_ring.hal_srng,
+						  soc->hal_soc, params);
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Invalid REO command type\n", __func__);
+		return QDF_STATUS_E_FAILURE;
+	};
+
+	if (num ==  QDF_STATUS_E_FAILURE) {
+		qdf_print("%s: Error with sending REO command\n", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (callback_fn) {
+		reo_cmd = qdf_mem_malloc(sizeof(*reo_cmd));
+		if (!reo_cmd) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: alloc failed for REO cmd:%d!!\n",
+				__func__, type);
+			return QDF_STATUS_E_NOMEM;
+		}
+
+		reo_cmd->cmd = num;
+		reo_cmd->cmd_type = type;
+		reo_cmd->handler = callback_fn;
+		reo_cmd->data = data;
+		qdf_spin_lock_bh(&soc->rx.reo_cmd_lock);
+		TAILQ_INSERT_TAIL(&soc->rx.reo_cmd_list, reo_cmd,
+				  reo_cmd_list_elem);
+		qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+void dp_reo_status_ring_handler(struct dp_soc *soc)
+{
+	uint32_t *reo_desc;
+	struct dp_reo_cmd_info *reo_cmd = NULL;
+	union hal_reo_status reo_status;
+	int num;
+
+	if (hal_srng_access_start(soc->hal_soc,
+		soc->reo_status_ring.hal_srng)) {
+		return;
+	}
+	reo_desc = hal_srng_dst_get_next(soc->hal_soc,
+					soc->reo_status_ring.hal_srng);
+
+	while (reo_desc) {
+		uint16_t tlv = HAL_GET_TLV(reo_desc);
+
+		switch (tlv) {
+		case HAL_REO_QUEUE_STATS_STATUS_TLV:
+			hal_reo_queue_stats_status(reo_desc,
+					   &reo_status.queue_status);
+			num = reo_status.queue_status.header.cmd_num;
+			break;
+		case HAL_REO_FLUSH_QUEUE_STATUS_TLV:
+			hal_reo_flush_queue_status(reo_desc,
+						   &reo_status.fl_queue_status);
+			num = reo_status.fl_queue_status.header.cmd_num;
+			break;
+		case HAL_REO_FLUSH_CACHE_STATUS_TLV:
+			hal_reo_flush_cache_status(reo_desc, soc->hal_soc,
+						   &reo_status.fl_cache_status);
+			num = reo_status.fl_cache_status.header.cmd_num;
+			break;
+		case HAL_REO_UNBLK_CACHE_STATUS_TLV:
+			hal_reo_unblock_cache_status(reo_desc, soc->hal_soc,
+						&reo_status.unblk_cache_status);
+			num = reo_status.unblk_cache_status.header.cmd_num;
+			break;
+		case HAL_REO_TIMOUT_LIST_STATUS_TLV:
+			hal_reo_flush_timeout_list_status(reo_desc,
+						&reo_status.fl_timeout_status);
+			num = reo_status.fl_timeout_status.header.cmd_num;
+			break;
+		case HAL_REO_DESC_THRES_STATUS_TLV:
+			hal_reo_desc_thres_reached_status(reo_desc,
+						&reo_status.thres_status);
+			num = reo_status.thres_status.header.cmd_num;
+			break;
+		case HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV:
+			hal_reo_rx_update_queue_status(reo_desc,
+						&reo_status.rx_queue_status);
+			num = reo_status.rx_queue_status.header.cmd_num;
+			break;
+		default:
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
+				"%s, no handler for TLV:%d\n", __func__, tlv);
+			goto next;
+		} /* switch */
+
+		qdf_spin_lock_bh(&soc->rx.reo_cmd_lock);
+		TAILQ_FOREACH(reo_cmd, &soc->rx.reo_cmd_list,
+			reo_cmd_list_elem) {
+			if (reo_cmd->cmd == num) {
+				TAILQ_REMOVE(&soc->rx.reo_cmd_list, reo_cmd,
+				reo_cmd_list_elem);
+				break;
+			}
+		}
+		qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
+
+		if (reo_cmd) {
+			reo_cmd->handler(soc, reo_cmd->data,
+					&reo_status);
+			qdf_mem_free(reo_cmd);
+		}
+
+next:
+		reo_desc = hal_srng_dst_get_next(soc,
+						soc->reo_status_ring.hal_srng);
+	} /* while */
+
+	hal_srng_access_end(soc->hal_soc, soc->reo_status_ring.hal_srng);
+}

+ 10 - 0
dp/wifi3.0/dp_types.h

@@ -32,6 +32,7 @@
 #endif
 
 #include <hal_tx.h>
+#include <hal_reo.h>
 
 #define MAX_PDEV_CNT 3
 #define MAX_LINK_DESC_BANKS 8
@@ -202,6 +203,13 @@ struct dp_rx_reorder_array_elem {
 
 #define DP_RX_BA_INACTIVE 0
 #define DP_RX_BA_ACTIVE 1
+struct dp_reo_cmd_info {
+	uint16_t cmd;
+	enum hal_reo_cmd_type cmd_type;
+	void *data;
+	void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
+	TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
+};
 
 /* Rx TID */
 struct dp_rx_tid {
@@ -386,6 +394,8 @@ struct dp_soc {
 			int defrag_timeout_check;
 			int dup_check;
 		} flags;
+		TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
+		qdf_spinlock_t reo_cmd_lock;
 	} rx;
 
 	/* optional rx processing function */

+ 5 - 1
hal/wifi3.0/hal_internal.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -306,6 +306,10 @@ struct hal_soc {
 	/* Shared memory for ring pointer updates from host to FW */
 	uint32_t *shadow_wrptr_mem_vaddr;
 	qdf_dma_addr_t shadow_wrptr_mem_paddr;
+
+	/* REO blocking resource index */
+	uint8_t reo_res_bitmap;
+	uint8_t index;
 };
 
 /* TODO: Check if the following can be provided directly by HW headers */

+ 915 - 0
hal/wifi3.0/hal_reo.c

@@ -0,0 +1,915 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hal_reo.h"
+#include "hal_tx.h"
+
+#define BLOCK_RES_MASK		0xF
+static inline uint8_t hal_find_one_bit(uint8_t x)
+{
+	uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
+	uint8_t pos;
+
+	for (pos = 0; y; y >>= 1)
+		pos++;
+
+	return pos-1;
+}
+
+static inline uint8_t hal_find_zero_bit(uint8_t x)
+{
+	uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
+	uint8_t pos;
+
+	for (pos = 0; y; y >>= 1)
+		pos++;
+
+	return pos-1;
+}
+
+inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
+				       enum hal_reo_cmd_type type,
+				       uint32_t paddr_lo,
+				       uint8_t paddr_hi)
+{
+	switch (type) {
+	case CMD_GET_QUEUE_STATS:
+		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
+			RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
+		HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
+				    RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
+		break;
+	case CMD_FLUSH_QUEUE:
+		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
+					FLUSH_DESC_ADDR_31_0, paddr_lo);
+		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
+					FLUSH_DESC_ADDR_39_32, paddr_hi);
+		break;
+	case CMD_FLUSH_CACHE:
+		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
+					FLUSH_ADDR_31_0, paddr_lo);
+		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
+					FLUSH_ADDR_39_32, paddr_hi);
+		break;
+	case CMD_UPDATE_RX_REO_QUEUE:
+		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
+					RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
+		HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+					RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
+		break;
+	default:
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Invalid REO command type\n", __func__);
+		break;
+	}
+}
+
+inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
+				    struct hal_reo_cmd_params *cmd)
+
+{
+	uint32_t *reo_desc, val;
+
+	hal_srng_access_start(soc, reo_ring);
+	reo_desc = hal_srng_src_get_next(soc, reo_ring);
+
+	HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
+			     sizeof(struct reo_update_rx_reo_queue));
+
+	/* Offsets of descriptor fields defined in HW headers start from
+	 * the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
+		REO_STATUS_REQUIRED, cmd->std.need_status);
+
+	hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
+				   cmd->std.addr_lo,
+				   cmd->std.addr_hi);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
+			      cmd->u.stats_params.clear);
+
+	hal_srng_access_end(soc, reo_ring);
+
+	val = reo_desc[CMD_HEADER_DW_OFFSET];
+	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
+				     val);
+}
+
+inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
+				    struct hal_reo_cmd_params *cmd)
+{
+	uint32_t *reo_desc, val;
+
+	hal_srng_access_start(soc, reo_ring);
+	reo_desc = hal_srng_src_get_next(soc, reo_ring);
+
+	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
+			     sizeof(struct reo_update_rx_reo_queue));
+
+	/* Offsets of descriptor fields defined in HW headers start from
+	 * the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
+		REO_STATUS_REQUIRED, cmd->std.need_status);
+
+	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
+		cmd->std.addr_hi);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
+		BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
+		cmd->u.fl_queue_params.use_after_flush);
+
+	if (cmd->u.fl_queue_params.use_after_flush) {
+		HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
+			BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
+	}
+
+	hal_srng_access_end(soc, reo_ring);
+	val = reo_desc[CMD_HEADER_DW_OFFSET];
+	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
+				     val);
+}
+
+inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
+				    struct hal_reo_cmd_params *cmd)
+{
+	uint32_t *reo_desc, val;
+	struct hal_reo_cmd_flush_cache_params *cp;
+	uint8_t index;
+
+	cp = &cmd->u.fl_cache_params;
+
+	hal_srng_access_start(soc, reo_ring);
+
+	index = hal_find_zero_bit(soc->reo_res_bitmap);
+	/* We need a cache block resource for this operation, and REO HW has
+	 * only 4 such blocking resources. These resources are managed using
+	 * reo_res_bitmap, and we return failure if none is available.
+	 */
+	if (index > 3) {
+		qdf_print("%s, No blocking resource available!\n", __func__);
+		hal_srng_access_end(soc, reo_ring);
+		return QDF_STATUS_E_FAILURE;
+	}
+	soc->index = index;
+
+	reo_desc = hal_srng_src_get_next(soc, reo_ring);
+
+	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
+			     sizeof(struct reo_update_rx_reo_queue));
+
+	/* Offsets of descriptor fields defined in HW headers start from
+	 * the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
+		REO_STATUS_REQUIRED, cmd->std.need_status);
+
+	hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
+				   cmd->std.addr_hi);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
+		FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
+
+	/* set it to 0 for now */
+	cp->rel_block_index = 0;
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
+		RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
+		CACHE_BLOCK_RESOURCE_INDEX, index);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
+		FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
+		BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->use_after_flush);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
+		cp->flush_all);
+
+	hal_srng_access_end(soc, reo_ring);
+	val = reo_desc[CMD_HEADER_DW_OFFSET];
+	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
+				     val);
+}
+
+inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
+				      struct hal_reo_cmd_params *cmd)
+
+{
+	uint32_t *reo_desc, val;
+	uint8_t index;
+
+	hal_srng_access_start(soc, reo_ring);
+
+	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
+		index = hal_find_one_bit(soc->reo_res_bitmap);
+		if (index > 3) {
+			hal_srng_access_end(soc, reo_ring);
+			qdf_print("%s: No blocking resource to unblock!\n",
+				  __func__);
+			return QDF_STATUS_E_FAILURE;
+		}
+	}
+
+	reo_desc = hal_srng_src_get_next(soc, reo_ring);
+
+	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
+			     sizeof(struct reo_update_rx_reo_queue));
+
+	/* Offsets of descriptor fields defined in HW headers start from
+	 * the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
+		REO_STATUS_REQUIRED, cmd->std.need_status);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
+		UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
+
+	if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
+		HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
+			CACHE_BLOCK_RESOURCE_INDEX, index);
+		soc->index = index;
+	}
+
+	hal_srng_access_end(soc, reo_ring);
+	val = reo_desc[CMD_HEADER_DW_OFFSET];
+	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
+				     val);
+}
+
+inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
+					   struct hal_reo_cmd_params *cmd)
+{
+	uint32_t *reo_desc, val;
+
+	hal_srng_access_start(soc, reo_ring);
+	reo_desc = hal_srng_src_get_next(soc, reo_ring);
+
+	HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
+			     sizeof(struct reo_update_rx_reo_queue));
+
+	/* Offsets of descriptor fields defined in HW headers start from
+	 * the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
+		REO_STATUS_REQUIRED, cmd->std.need_status);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
+		cmd->u.fl_tim_list_params.ac_list);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
+		MINIMUM_RELEASE_DESC_COUNT,
+		cmd->u.fl_tim_list_params.min_rel_desc);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
+		MINIMUM_FORWARD_BUF_COUNT,
+		cmd->u.fl_tim_list_params.min_fwd_buf);
+
+	hal_srng_access_end(soc, reo_ring);
+	val = reo_desc[CMD_HEADER_DW_OFFSET];
+	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
+				     val);
+}
+
+inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
+					struct hal_reo_cmd_params *cmd)
+{
+	uint32_t *reo_desc, val;
+	struct hal_reo_cmd_update_queue_params *p;
+
+	p = &cmd->u.upd_queue_params;
+
+	hal_srng_access_start(soc, reo_ring);
+	reo_desc = hal_srng_src_get_next(soc, reo_ring);
+
+	HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
+			     sizeof(struct reo_update_rx_reo_queue));
+
+	/* Offsets of descriptor fields defined in HW headers start from
+	 * the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
+		REO_STATUS_REQUIRED, cmd->std.need_status);
+
+	hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
+		cmd->std.addr_lo, cmd->std.addr_hi);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
+			      p->update_vld);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
+		p->update_assoc_link_desc);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_DISABLE_DUPLICATE_DETECTION,
+		p->update_disable_dup_detect);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_DISABLE_DUPLICATE_DETECTION,
+		p->update_disable_dup_detect);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_SOFT_REORDER_ENABLE,
+		p->update_soft_reorder_enab);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_AC, p->update_ac);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_BAR, p->update_bar);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_BAR, p->update_bar);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_RTY, p->update_rty);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_OOR_MODE, p->update_oor_mode);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN_SIZE, p->update_pn_size);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_SVLD, p->update_svld);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_SSN, p->update_ssn);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
+		p->update_seq_2k_err_detect);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN_VALID, p->update_pn_valid);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
+		UPDATE_PN, p->update_pn);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		VLD, p->vld);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
+		p->assoc_link_desc);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		SOFT_REORDER_ENABLE, p->soft_reorder_enab);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		BAR, p->bar);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		CHK_2K_MODE, p->chk_2k_mode);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		RTY, p->rty);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		OOR_MODE, p->oor_mode);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		PN_CHECK_NEEDED, p->pn_check_needed);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		PN_SHALL_BE_EVEN, p->pn_even);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		PN_SHALL_BE_UNEVEN, p->pn_uneven);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		PN_HANDLING_ENABLE, p->pn_hand_enab);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
+		IGNORE_AMPDU_FLAG, p->ignore_ampdu);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
+		BA_WINDOW_SIZE, p->ba_window_size);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
+		PN_SIZE, p->pn_size);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
+		SVLD, p->svld);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
+		SSN, p->ssn);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
+		SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
+		PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
+		PN_31_0, p->pn_31_0);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
+		PN_63_32, p->pn_63_32);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
+		PN_95_64, p->pn_95_64);
+
+	HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
+		PN_127_96, p->pn_127_96);
+
+	hal_srng_access_end(soc, reo_ring);
+	val = reo_desc[CMD_HEADER_DW_OFFSET];
+	return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
+				     val);
+}
+
+inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
+			     struct hal_reo_queue_status *st)
+{
+	uint32_t val;
+
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
+
+	/* SSN */
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
+	st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
+
+	/* current index */
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
+					 CURRENT_INDEX)];
+	st->curr_idx =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
+			      CURRENT_INDEX, val);
+
+	/* PN bits */
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
+					 PN_31_0)];
+	st->pn_31_0 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
+			      PN_31_0, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
+					 PN_63_32)];
+	st->pn_63_32 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
+			      PN_63_32, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
+					 PN_95_64)];
+	st->pn_95_64 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
+			      PN_95_64, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
+					 PN_127_96)];
+	st->pn_127_96 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
+			      PN_127_96, val);
+
+	/* timestamps */
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
+					 LAST_RX_ENQUEUE_TIMESTAMP)];
+	st->last_rx_enq_tstamp =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
+			      LAST_RX_ENQUEUE_TIMESTAMP, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
+					 LAST_RX_DEQUEUE_TIMESTAMP)];
+	st->last_rx_deq_tstamp =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
+			      LAST_RX_DEQUEUE_TIMESTAMP, val);
+
+	/* rx bitmap */
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
+					 RX_BITMAP_31_0)];
+	st->rx_bitmap_31_0 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
+			      RX_BITMAP_31_0, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
+					 RX_BITMAP_63_32)];
+	st->rx_bitmap_63_32 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
+			      RX_BITMAP_63_32, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
+					 RX_BITMAP_95_64)];
+	st->rx_bitmap_95_64 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
+			      RX_BITMAP_95_64, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
+					 RX_BITMAP_127_96)];
+	st->rx_bitmap_127_96 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
+			      RX_BITMAP_127_96, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
+					 RX_BITMAP_159_128)];
+	st->rx_bitmap_159_128 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
+			      RX_BITMAP_159_128, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
+					 RX_BITMAP_191_160)];
+	st->rx_bitmap_191_160 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
+			      RX_BITMAP_191_160, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
+					 RX_BITMAP_223_192)];
+	st->rx_bitmap_223_192 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
+			      RX_BITMAP_223_192, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
+					 RX_BITMAP_255_224)];
+	st->rx_bitmap_255_224 =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
+			      RX_BITMAP_255_224, val);
+
+	/* various counts */
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
+					 CURRENT_MPDU_COUNT)];
+	st->curr_mpdu_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
+			      CURRENT_MPDU_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
+					 CURRENT_MSDU_COUNT)];
+	st->curr_msdu_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
+			      CURRENT_MSDU_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
+					 TIMEOUT_COUNT)];
+	st->fwd_timeout_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
+			      TIMEOUT_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
+					 FORWARD_DUE_TO_BAR_COUNT)];
+	st->fwd_bar_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
+			      FORWARD_DUE_TO_BAR_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
+					 DUPLICATE_COUNT)];
+	st->dup_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
+			      DUPLICATE_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
+					 FRAMES_IN_ORDER_COUNT)];
+	st->frms_in_order_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
+			      FRAMES_IN_ORDER_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
+					 BAR_RECEIVED_COUNT)];
+	st->bar_rcvd_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
+			      BAR_RECEIVED_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
+					 MPDU_FRAMES_PROCESSED_COUNT)];
+	st->mpdu_frms_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
+			      MPDU_FRAMES_PROCESSED_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
+					 MSDU_FRAMES_PROCESSED_COUNT)];
+	st->msdu_frms_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
+			      MSDU_FRAMES_PROCESSED_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
+					 TOTAL_PROCESSED_BYTE_COUNT)];
+	st->total_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
+			      TOTAL_PROCESSED_BYTE_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
+					 LATE_RECEIVE_MPDU_COUNT)];
+	st->late_recv_mpdu_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
+			      LATE_RECEIVE_MPDU_COUNT, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
+					 WINDOW_JUMP_2K)];
+	st->win_jump_2k =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
+			      WINDOW_JUMP_2K, val);
+
+	val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
+					 HOLE_COUNT)];
+	st->hole_cnt =
+		HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
+			      HOLE_COUNT, val);
+}
+
+inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
+				    struct hal_reo_flush_queue_status *st)
+{
+	uint32_t val;
+
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
+
+	/* error bit */
+	val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
+					 ERROR_DETECTED)];
+	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
+				  val);
+}
+
+inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
+				    struct hal_reo_flush_cache_status *st)
+{
+	uint32_t val;
+
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
+
+	/* error bit */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
+					 ERROR_DETECTED)];
+	st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
+				  val);
+
+	/* block error */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
+					 BLOCK_ERROR_DETAILS)];
+	st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
+					BLOCK_ERROR_DETAILS,
+					val);
+	if (!st->block_error)
+		qdf_set_bit(soc->index, &soc->reo_res_bitmap);
+
+	/* cache flush status */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
+					 CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
+	st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
+					CACHE_CONTROLLER_FLUSH_STATUS_HIT,
+					val);
+
+	/* cache flush descriptor type */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
+				  CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
+	st->cache_flush_status_desc_type =
+		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
+			      CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
+			      val);
+
+	/* cache flush count */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
+				  CACHE_CONTROLLER_FLUSH_COUNT)];
+	st->cache_flush_cnt =
+		HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
+			      CACHE_CONTROLLER_FLUSH_COUNT,
+			      val);
+
+}
+
+inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
+					 struct hal_soc *soc,
+					 struct hal_reo_unblk_cache_status *st)
+{
+	uint32_t val;
+
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
+
+	/* error bit */
+	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
+				  ERROR_DETECTED)];
+	st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
+				  ERROR_DETECTED,
+				  val);
+
+	/* unblock type */
+	val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
+				  UNBLOCK_TYPE)];
+	st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
+					 UNBLOCK_TYPE,
+					 val);
+
+	if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
+		qdf_clear_bit(soc->index, &soc->reo_res_bitmap);
+}
+
+inline void hal_reo_flush_timeout_list_status(
+			 uint32_t *reo_desc,
+			 struct hal_reo_flush_timeout_list_status *st)
+
+{
+	uint32_t val;
+
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
+
+	/* error bit */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
+					 ERROR_DETECTED)];
+	st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
+				  ERROR_DETECTED,
+				  val);
+
+	/* list empty */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
+					 TIMOUT_LIST_EMPTY)];
+	st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
+					TIMOUT_LIST_EMPTY,
+					val);
+
+	/* release descriptor count */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
+					 RELEASE_DESC_COUNT)];
+	st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
+				       RELEASE_DESC_COUNT,
+				       val);
+
+	/* forward buf count */
+	val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
+					 FORWARD_BUF_COUNT)];
+	st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
+				       FORWARD_BUF_COUNT,
+				       val);
+}
+
+inline void hal_reo_desc_thres_reached_status(
+			 uint32_t *reo_desc,
+			 struct hal_reo_desc_thres_reached_status *st)
+{
+	uint32_t val;
+
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc,
+			      REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
+
+	/* threshold index */
+	val = reo_desc[HAL_OFFSET_DW(
+				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
+				 THRESHOLD_INDEX)];
+	st->thres_index = HAL_GET_FIELD(
+				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
+				THRESHOLD_INDEX,
+				val);
+
+	/* link desc counters */
+	val = reo_desc[HAL_OFFSET_DW(
+				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
+				 LINK_DESCRIPTOR_COUNTER0)];
+	st->link_desc_counter0 = HAL_GET_FIELD(
+				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
+				LINK_DESCRIPTOR_COUNTER0,
+				val);
+
+	val = reo_desc[HAL_OFFSET_DW(
+				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
+				 LINK_DESCRIPTOR_COUNTER1)];
+	st->link_desc_counter1 = HAL_GET_FIELD(
+				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
+				LINK_DESCRIPTOR_COUNTER1,
+				val);
+
+	val = reo_desc[HAL_OFFSET_DW(
+				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
+				 LINK_DESCRIPTOR_COUNTER2)];
+	st->link_desc_counter2 = HAL_GET_FIELD(
+				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
+				LINK_DESCRIPTOR_COUNTER2,
+				val);
+
+	val = reo_desc[HAL_OFFSET_DW(
+				 REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
+				 LINK_DESCRIPTOR_COUNTER_SUM)];
+	st->link_desc_counter_sum = HAL_GET_FIELD(
+				REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
+				LINK_DESCRIPTOR_COUNTER_SUM,
+				val);
+}
+
+inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
+				      struct hal_reo_update_rx_queue_status *st)
+{
+	/* Offsets of descriptor fields defined in HW headers start
+	 * from the field after TLV header */
+	reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
+
+	/* header */
+	HAL_REO_STATUS_GET_HEADER(reo_desc,
+			      REO_UPDATE_RX_REO_QUEUE, st->header);
+}
+
+/**
+ * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
+ * with command number
+ * @hal_soc: Handle to HAL SoC structure
+ * @hal_ring: Handle to HAL SRNG structure
+ *
+ * Return: none
+ */
+inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
+{
+	int cmd_num;
+	uint32_t *desc_addr;
+	struct hal_srng_params srng_params;
+	uint32_t desc_size;
+	uint32_t num_desc;
+
+	hal_get_srng_params(soc, hal_srng, &srng_params);
+
+	desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
+	desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
+	desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
+	num_desc = srng_params.num_entries;
+	cmd_num = 1;
+	while (num_desc) {
+		/* Offsets of descriptor fields defined in HW headers start
+		 * from the field after TLV header */
+		HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
+			REO_CMD_NUMBER, cmd_num);
+		desc_addr += desc_size;
+		num_desc--; cmd_num++;
+	}
+
+	soc->reo_res_bitmap = 0;
+}

+ 544 - 0
hal/wifi3.0/hal_reo.h

@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_REO_H_
+#define _HAL_REO_H_
+
+/* HW headers */
+#include <reo_descriptor_threshold_reached_status.h>
+#include <reo_flush_queue.h>
+#include <reo_flush_timeout_list_status.h>
+#include <reo_unblock_cache.h>
+#include <reo_flush_cache.h>
+#include <reo_flush_queue_status.h>
+#include <reo_get_queue_stats.h>
+#include <reo_unblock_cache_status.h>
+#include <reo_flush_cache_status.h>
+#include <reo_flush_timeout_list.h>
+#include <reo_get_queue_stats_status.h>
+#include <reo_update_rx_reo_queue.h>
+#include <reo_update_rx_reo_queue_status.h>
+#include <tlv_tag_def.h>
+
+/* SW headers */
+#include "hal_api.h"
+
+/*---------------------------------------------------------------------------
+  Preprocessor definitions and constants
+  ---------------------------------------------------------------------------*/
+
+/* TLV values */
+#define HAL_REO_GET_QUEUE_STATS_TLV	WIFIREO_GET_QUEUE_STATS_E
+#define HAL_REO_FLUSH_QUEUE_TLV		WIFIREO_FLUSH_QUEUE_E
+#define HAL_REO_FLUSH_CACHE_TLV		WIFIREO_FLUSH_CACHE_E
+#define HAL_REO_UNBLOCK_CACHE_TLV	WIFIREO_UNBLOCK_CACHE_E
+#define HAL_REO_FLUSH_TIMEOUT_LIST_TLV	WIFIREO_FLUSH_TIMEOUT_LIST_E
+#define HAL_REO_RX_UPDATE_QUEUE_TLV     WIFIREO_UPDATE_RX_REO_QUEUE_E
+
+#define HAL_REO_QUEUE_STATS_STATUS_TLV	WIFIREO_GET_QUEUE_STATS_STATUS_E
+#define HAL_REO_FLUSH_QUEUE_STATUS_TLV	WIFIREO_FLUSH_QUEUE_STATUS_E
+#define HAL_REO_FLUSH_CACHE_STATUS_TLV	WIFIREO_FLUSH_CACHE_STATUS_E
+#define HAL_REO_UNBLK_CACHE_STATUS_TLV	WIFIREO_UNBLOCK_CACHE_STATUS_E
+#define HAL_REO_TIMOUT_LIST_STATUS_TLV	WIFIREO_FLUSH_TIMEOUT_LIST_STATUS_E
+#define HAL_REO_DESC_THRES_STATUS_TLV	\
+	WIFIREO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_E
+#define HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV WIFIREO_UPDATE_RX_REO_QUEUE_STATUS_E
+
+#define HAL_SET_FIELD(block, field, value) \
+	((value << (block ## _ ## field ## _LSB)) &	\
+	 (block ## _ ## field ## _MASK))
+
+#define HAL_GET_FIELD(block, field, value)		\
+	((value & (block ## _ ## field ## _MASK)) >>	\
+	 (block ## _ ## field ## _LSB))
+
+#define HAL_SET_TLV_HDR(desc, tag, len) \
+	do {						\
+		((struct tlv_32_hdr *) desc)->tlv_tag = tag;	\
+		((struct tlv_32_hdr *) desc)->tlv_len = len;	\
+	} while (0)
+
+#define HAL_GET_TLV(desc)	(((struct tlv_32_hdr *) desc)->tlv_tag)
+
+#define HAL_OFFSET_DW(_block, _field) (HAL_OFFSET(_block, _field) >> 2)
+/* dword offsets in REO cmd TLV */
+#define CMD_HEADER_DW_OFFSET	0
+
+#define HAL_REO_STATUS_GET_HEADER(d, b, h) do {				\
+	uint32_t val1 = d[HAL_OFFSET_DW(b ##_STATUS_0,			\
+			UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)];	\
+	h.cmd_num =							\
+		HAL_GET_FIELD(						\
+			      UNIFORM_REO_STATUS_HEADER_0, REO_STATUS_NUMBER, \
+			      val1);					\
+	h.exec_time =							\
+		HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0,		\
+			      CMD_EXECUTION_TIME, val1);		\
+	h.status =							\
+		HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_0,		\
+			      REO_CMD_EXECUTION_STATUS, val1);		\
+	val1 = d[HAL_OFFSET_DW(b ##_STATUS_1,				\
+			   UNIFORM_REO_STATUS_HEADER_STATUS_HEADER)];   \
+	h.tstamp =							\
+		HAL_GET_FIELD(UNIFORM_REO_STATUS_HEADER_1, TIMESTAMP, val1); \
+} while (0)
+
+/**
+ * enum reo_unblock_cache_type: Enum for unblock type in REO unblock command
+ * @UNBLOCK_RES_INDEX: Unblock a block resource
+ * @UNBLOCK_CACHE: Unblock cache
+ */
+enum reo_unblock_cache_type {
+	UNBLOCK_RES_INDEX	= 0,
+	UNBLOCK_CACHE		= 1
+};
+
+/**
+ * enum reo_thres_index_reg: Enum for reo descriptor usage counter for
+ *	which threshold status is being indicated.
+ * @reo_desc_counter0_threshold: counter0 reached threshold
+ * @reo_desc_counter1_threshold: counter1 reached threshold
+ * @reo_desc_counter2_threshold: counter2 reached threshold
+ * @reo_desc_counter_sum_threshold: Total count reached threshold
+ */
+enum reo_thres_index_reg {
+	reo_desc_counter0_threshold = 0,
+	reo_desc_counter1_threshold = 1,
+	reo_desc_counter2_threshold = 2,
+	reo_desc_counter_sum_threshold = 3
+};
+
+/**
+ * enum reo_cmd_exec_status: Enum for execution status of REO command
+ *
+ * @HAL_REO_CMD_SUCCESS: Command has successfully be executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue or cache
+ *	was blocked
+ * @HAL_REO_CMD_FAILED: Command has encountered problems when executing, like
+ *	the queue descriptor not being valid
+ */
+enum reo_cmd_exec_status {
+	HAL_REO_CMD_SUCCESS = 0,
+	HAL_REO_CMD_BLOCKED = 1,
+	HAL_REO_CMD_FAILED = 2
+};
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @CMD_UNBLOCK_CACHE: Unblock a descriptor’s address that was blocked
+ *	earlier with a ‘REO_FLUSH_CACHE’ command
+ * @CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @CMD_UPDATE_RX_REO_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+	CMD_GET_QUEUE_STATS	= 0,
+	CMD_FLUSH_QUEUE		= 1,
+	CMD_FLUSH_CACHE		= 2,
+	CMD_UNBLOCK_CACHE	= 3,
+	CMD_FLUSH_TIMEOUT_LIST	= 4,
+	CMD_UPDATE_RX_REO_QUEUE = 5
+};
+
+/**
+ * struct hal_reo_cmd_params_std: Standard REO command parameters
+ * @need_status: Status required for the command
+ * @addr_lo: Lower 32 bits of REO queue descriptor address
+ * @addr_hi: Upper 8 bits of REO queue descriptor address
+ */
+struct hal_reo_cmd_params_std {
+	bool need_status;
+	uint32_t addr_lo;
+	uint8_t addr_hi;
+};
+
+/**
+ * struct hal_reo_cmd_get_queue_stats_params: Parameters to
+ *	CMD_GET_QUEUE_STATScommand
+ * @clear: Clear stats after retreiving
+ */
+struct hal_reo_cmd_get_queue_stats_params {
+	bool clear;
+};
+
+/**
+ * struct hal_reo_cmd_flush_queue_params: Parameters to CMD_FLUSH_QUEUE
+ * @use_after_flush: Block usage after flush till unblock command
+ * @index: Blocking resource to be used
+ */
+struct hal_reo_cmd_flush_queue_params {
+	bool use_after_flush;
+	uint8_t index;
+};
+
+/**
+ * struct hal_reo_cmd_flush_cache_params: Parameters to CMD_FLUSH_CACHE
+ * @fwd_mpdus_in_queue: Forward MPDUs before flushing descriptor
+ * @rel_block_index: Release blocking resource used earlier
+ * @cache_block_res_index: Blocking resource to be used
+ * @flush_no_inval: Flush without invalidatig descriptor
+ * @use_after_flush: Block usage after flush till unblock command
+ * @flush_all: Flush entire REO cache
+ */
+struct hal_reo_cmd_flush_cache_params {
+	bool fwd_mpdus_in_queue;
+	bool rel_block_index;
+	uint8_t cache_block_res_index;
+	bool flush_no_inval;
+	bool use_after_flush;
+	bool flush_all;
+};
+
+/**
+ * struct hal_reo_cmd_unblock_cache_params: Parameters to CMD_UNBLOCK_CACHE
+ * @type: Unblock type (enum reo_unblock_cache_type)
+ * @index: Blocking index to be released
+ */
+struct hal_reo_cmd_unblock_cache_params {
+	enum reo_unblock_cache_type type;
+	uint8_t index;
+};
+
+/**
+ * struct hal_reo_cmd_flush_timeout_list_params: Parameters to
+ *		CMD_FLUSH_TIMEOUT_LIST
+ * @ac_list: AC timeout list to be flushed
+ * @min_rel_desc: Min. number of link descriptors to be release
+ * @min_fwd_buf: Min. number of buffers to be forwarded
+ */
+struct hal_reo_cmd_flush_timeout_list_params {
+	uint8_t ac_list;
+	uint16_t min_rel_desc;
+	uint16_t min_fwd_buf;
+};
+
+/**
+ * struct hal_reo_cmd_update_queue_params: Parameters to CMD_UPDATE_RX_REO_QUEUE
+ * @update_rx_queue_num: Update receive queue number
+ * @update_vld: Update valid bit
+ * @update_assoc_link_desc: Update associated link descriptor
+ * @update_disable_dup_detect: Update duplicate detection
+ * @update_soft_reorder_enab: Update soft reorder enable
+ * @update_ac: Update access category
+ * @update_bar: Update BAR received bit
+ * @update_rty: Update retry bit
+ * @update_chk_2k_mode: Update chk_2k_mode setting
+ * @update_oor_mode: Update OOR mode settting
+ * @update_ba_window_size: Update BA window size
+ * @update_pn_check_needed: Update pn_check_needed
+ * @update_pn_even: Update pn_even
+ * @update_pn_uneven: Update pn_uneven
+ * @update_pn_hand_enab: Update pn_handling_enable
+ * @update_pn_size: Update pn_size
+ * @update_ignore_ampdu: Update ignore_ampdu
+ * @update_svld: update svld
+ * @update_ssn: Update SSN
+ * @update_seq_2k_err_detect: Update seq_2k_err_detected flag
+ * @update_pn_err_detect: Update pn_err_detected flag
+ * @update_pn_valid: Update pn_valid
+ * @update_pn: Update PN
+ * @rx_queue_num: rx_queue_num to be updated
+ * @vld: valid bit to be updated
+ * @assoc_link_desc: assoc_link_desc counter
+ * @disable_dup_detect: disable_dup_detect to be updated
+ * @soft_reorder_enab: soft_reorder_enab to be updated
+ * @ac: AC to be updated
+ * @bar: BAR flag to be updated
+ * @rty: RTY flag to be updated
+ * @chk_2k_mode: check_2k_mode setting to be updated
+ * @oor_mode: oor_mode to be updated
+ * @pn_check_needed: pn_check_needed to be updated
+ * @pn_even: pn_even to be updated
+ * @pn_uneven: pn_uneven to be updated
+ * @pn_hand_enab: pn_handling_enable to be updated
+ * @ignore_ampdu: ignore_ampdu to be updated
+ * @ba_window_size: BA window size to be updated
+ * @pn_size: pn_size to be updated
+ * @svld: svld flag to be updated
+ * @ssn: SSN to be updated
+ * @seq_2k_err_detect: seq_2k_err_detected flag to be updated
+ * @pn_err_detect: pn_err_detected flag to be updated
+ * @pn_31_0: PN bits 31-0
+ * @pn_63_32: PN bits 63-32
+ * @pn_95_64: PN bits 95-64
+ * @pn_127_96: PN bits 127-96
+ */
+struct hal_reo_cmd_update_queue_params {
+	uint32_t update_rx_queue_num:1,
+		update_vld:1,
+		update_assoc_link_desc:1,
+		update_disable_dup_detect:1,
+		update_soft_reorder_enab:1,
+		update_ac:1,
+		update_bar:1,
+		update_rty:1,
+		update_chk_2k_mode:1,
+		update_oor_mode:1,
+		update_ba_window_size:1,
+		update_pn_check_needed:1,
+		update_pn_even:1,
+		update_pn_uneven:1,
+		update_pn_hand_enab:1,
+		update_pn_size:1,
+		update_ignore_ampdu:1,
+		update_svld:1,
+		update_ssn:1,
+		update_seq_2k_err_detect:1,
+		update_pn_err_detect:1,
+		update_pn_valid:1,
+		update_pn:1;
+	uint32_t rx_queue_num:16,
+		vld:1,
+		assoc_link_desc:2,
+		disable_dup_detect:1,
+		soft_reorder_enab:1,
+		ac:2,
+		bar:1,
+		rty:1,
+		chk_2k_mode:1,
+		oor_mode:1,
+		pn_check_needed:1,
+		pn_even:1,
+		pn_uneven:1,
+		pn_hand_enab:1,
+		ignore_ampdu:1;
+	uint32_t ba_window_size:8,
+		pn_size:2,
+		svld:1,
+		ssn:12,
+		seq_2k_err_detect:1,
+		pn_err_detect:1;
+	uint32_t pn_31_0:32;
+	uint32_t pn_63_32:32;
+	uint32_t pn_95_64:32;
+	uint32_t pn_127_96:32;
+};
+
+/**
+ * struct hal_reo_cmd_params: Common structure to pass REO command parameters
+ * @hal_reo_cmd_params_std: Standard parameters
+ * @u: Union of various REO command parameters
+ */
+struct hal_reo_cmd_params {
+	struct hal_reo_cmd_params_std std;
+	union {
+		struct hal_reo_cmd_get_queue_stats_params stats_params;
+		struct hal_reo_cmd_flush_queue_params fl_queue_params;
+		struct hal_reo_cmd_flush_cache_params fl_cache_params;
+		struct hal_reo_cmd_unblock_cache_params unblk_cache_params;
+		struct hal_reo_cmd_flush_timeout_list_params fl_tim_list_params;
+		struct hal_reo_cmd_update_queue_params upd_queue_params;
+	} u;
+};
+
+/**
+ * struct hal_reo_status_header: Common REO status header
+ * @cmd_num: Command number
+ * @exec_time: execution time
+ * @status: command execution status
+ * @tstamp: Timestamp of status updated
+ */
+struct hal_reo_status_header {
+	uint16_t cmd_num;
+	uint16_t exec_time;
+	enum reo_cmd_exec_status status;
+	uint32_t tstamp;
+};
+
+/**
+ * struct hal_reo_queue_status: REO queue status structure
+ * @header: Common REO status header
+ * @ssn: SSN of current BA window
+ * @curr_idx: last forwarded pkt
+ * @pn_31_0, pn_63_32, pn_95_64, pn_127_96:
+ *	PN number bits extracted from IV field
+ * @last_rx_enq_tstamp: Last enqueue timestamp
+ * @last_rx_deq_tstamp: Last dequeue timestamp
+ * @rx_bitmap_31_0, rx_bitmap_63_32, rx_bitmap_95_64
+ * @rx_bitmap_127_96, rx_bitmap_159_128, rx_bitmap_191_160
+ * @rx_bitmap_223_192, rx_bitmap_255_224: Each bit corresonds to a frame
+ *	held in re-order queue
+ * @curr_mpdu_cnt, curr_msdu_cnt: Number of MPDUs and MSDUs in the queue
+ * @fwd_timeout_cnt: Frames forwarded due to timeout
+ * @fwd_bar_cnt: Frames forwarded BAR frame
+ * @dup_cnt: duplicate frames detected
+ * @frms_in_order_cnt: Frames received in order
+ * @bar_rcvd_cnt: BAR frame count
+ * @mpdu_frms_cnt, msdu_frms_cnt, total_cnt: MPDU, MSDU, total frames
+	processed by REO
+ * @late_recv_mpdu_cnt; received after window had moved on
+ * @win_jump_2k: 2K jump count
+ * @hole_cnt: sequence hole count
+ */
+struct hal_reo_queue_status {
+	struct hal_reo_status_header header;
+	uint16_t ssn;
+	uint8_t curr_idx;
+	uint32_t pn_31_0, pn_63_32, pn_95_64, pn_127_96;
+	uint32_t last_rx_enq_tstamp, last_rx_deq_tstamp;
+	uint32_t rx_bitmap_31_0, rx_bitmap_63_32, rx_bitmap_95_64;
+	uint32_t rx_bitmap_127_96, rx_bitmap_159_128, rx_bitmap_191_160;
+	uint32_t rx_bitmap_223_192, rx_bitmap_255_224;
+	uint8_t curr_mpdu_cnt, curr_msdu_cnt;
+	uint8_t fwd_timeout_cnt, fwd_bar_cnt;
+	uint16_t dup_cnt;
+	uint32_t frms_in_order_cnt;
+	uint8_t bar_rcvd_cnt;
+	uint32_t mpdu_frms_cnt, msdu_frms_cnt, total_cnt;
+	uint16_t late_recv_mpdu_cnt;
+	uint8_t win_jump_2k;
+	uint16_t hole_cnt;
+};
+
+/**
+ * struct hal_reo_flush_queue_status: FLUSH_QUEUE status structure
+ * @header: Common REO status header
+ * @error: Error detected
+ */
+struct hal_reo_flush_queue_status {
+	struct hal_reo_status_header header;
+	bool error;
+};
+
+/**
+ * struct hal_reo_flush_cache_status: FLUSH_CACHE status structure
+ * @header: Common REO status header
+ * @error: Error detected
+ * @block_error: Blocking related error
+ * @cache_flush_status: Cache hit/miss
+ * @cache_flush_status_desc_type: type of descriptor flushed
+ * @cache_flush_cnt: number of lines actually flushed
+ */
+struct hal_reo_flush_cache_status {
+	struct hal_reo_status_header header;
+	bool error;
+	uint8_t block_error;
+	bool cache_flush_status;
+	uint8_t cache_flush_status_desc_type;
+	uint8_t cache_flush_cnt;
+};
+
+/**
+ * struct hal_reo_unblk_cache_status: UNBLOCK_CACHE status structure
+ * @header: Common REO status header
+ * @error: error detected
+ * unblock_type: resoure or cache
+ */
+struct hal_reo_unblk_cache_status {
+	struct hal_reo_status_header header;
+	bool error;
+	enum reo_unblock_cache_type unblock_type;
+};
+
+/**
+ * struct hal_reo_flush_timeout_list_status: FLUSH_TIMEOUT_LIST status structure
+ * @header: Common REO status header
+ * @error: error detected
+ * @list_empty: timeout list empty
+ * @rel_desc_cnt: number of link descriptors released
+ * @fwd_buf_cnt: number of buffers forwarded to REO destination ring
+ */
+struct hal_reo_flush_timeout_list_status {
+	struct hal_reo_status_header header;
+	bool error;
+	bool list_empty;
+	uint16_t rel_desc_cnt;
+	uint16_t fwd_buf_cnt;
+};
+
+/**
+ * struct hal_reo_desc_thres_reached_status: desc_thres_reached status structure
+ * @header: Common REO status header
+ * @thres_index: Index of descriptor threshold counter
+ * @link_desc_counter0, link_desc_counter1, link_desc_counter2: descriptor
+ *	counter values
+ * @link_desc_counter_sum: overall descriptor count
+ */
+struct hal_reo_desc_thres_reached_status {
+	struct hal_reo_status_header header;
+	enum reo_thres_index_reg thres_index;
+	uint32_t link_desc_counter0, link_desc_counter1, link_desc_counter2;
+	uint32_t link_desc_counter_sum;
+};
+
+/**
+ * struct hal_reo_update_rx_queue_status: UPDATE_RX_QUEUE status structure
+ * @header: Common REO status header
+ */
+struct hal_reo_update_rx_queue_status {
+	struct hal_reo_status_header header;
+};
+
+/**
+ * union hal_reo_status: Union to pass REO status to callbacks
+ * @queue_status: Refer to struct hal_reo_queue_status
+ * @fl_cache_status: Refer to struct hal_reo_flush_cache_status
+ * @fl_queue_status: Refer to struct hal_reo_flush_queue_status
+ * @fl_timeout_status: Refer to struct hal_reo_flush_timeout_list_status
+ * @unblk_cache_status: Refer to struct hal_reo_unblk_cache_status
+ * @thres_status: struct hal_reo_desc_thres_reached_status
+ * @rx_queue_status: struct hal_reo_update_rx_queue_status
+ */
+union hal_reo_status {
+	struct hal_reo_queue_status queue_status;
+	struct hal_reo_flush_cache_status fl_cache_status;
+	struct hal_reo_flush_queue_status fl_queue_status;
+	struct hal_reo_flush_timeout_list_status fl_timeout_status;
+	struct hal_reo_unblk_cache_status unblk_cache_status;
+	struct hal_reo_desc_thres_reached_status thres_status;
+	struct hal_reo_update_rx_queue_status rx_queue_status;
+};
+
+/* Prototypes */
+/* REO command ring routines */
+int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
+			    struct hal_reo_cmd_params *cmd);
+int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
+			    struct hal_reo_cmd_params *cmd);
+int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
+			    struct hal_reo_cmd_params *cmd);
+int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
+			      struct hal_reo_cmd_params *cmd);
+int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
+				   struct hal_reo_cmd_params *cmd);
+int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
+				struct hal_reo_cmd_params *cmd);
+
+/* REO status ring routines */
+void hal_reo_queue_stats_status(uint32_t *reo_desc,
+				struct hal_reo_queue_status *st);
+void hal_reo_flush_queue_status(uint32_t *reo_desc,
+				    struct hal_reo_flush_queue_status *st);
+void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
+				    struct hal_reo_flush_cache_status *st);
+void hal_reo_unblock_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
+				      struct hal_reo_unblk_cache_status *st);
+void hal_reo_flush_timeout_list_status(
+			   uint32_t *reo_desc,
+			   struct hal_reo_flush_timeout_list_status *st);
+void hal_reo_desc_thres_reached_status(
+				uint32_t *reo_desc,
+				struct hal_reo_desc_thres_reached_status *st);
+void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
+				    struct hal_reo_update_rx_queue_status *st);
+
+void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng);
+
+#endif /* _HAL_REO_H */