Răsfoiți Sursa

qcacmn: Lithium SRNG HAL

Initial version of HAL SRNG initialization and access functions.

Change-Id: Ic36b73bdf5053a06ca8cb2bc3a7e3edb53fc02e1
Karunakar Dasineni 8 ani în urmă
părinte
comite
8fbfeea05f
3 a modificat fișierele cu 2020 adăugiri și 0 ștergeri
  1. 764 0
      hal/wifi3.0/hal_api.h
  2. 309 0
      hal/wifi3.0/hal_internal.h
  3. 947 0
      hal/wifi3.0/hal_srng.c

+ 764 - 0
hal/wifi3.0/hal_api.h

@@ -0,0 +1,764 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HAL_API_H_
+#define _HAL_API_H_
+
+#include "qdf_types.h"
+#include "hal_internal.h"
+#include "hif_io32.h"
+
+/**
+ * hal_attach - Initalize HAL layer
+ * @hif_handle: Opaque HIF handle
+ * @qdf_dev: QDF device
+ *
+ * Return: Opaque HAL SOC handle
+ *		 NULL on failure (if given ring is not available)
+ *
+ * This function should be called as part of HIF initialization (for accessing
+ * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
+ */
+extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
+
+/**
+ * hal_detach - Detach HAL layer
+ * @hal_soc: HAL SOC handle
+ *
+ * This function should be called as part of HIF detach
+ *
+ */
+extern void hal_detach(void *hal_soc);
+
+/* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
+enum hal_ring_type {
+	REO_DST,
+	REO_EXCEPTION,
+	REO_REINJECT,
+	REO_CMD,
+	REO_STATUS,
+	TCL_DATA,
+	TCL_CMD,
+	TCL_STATUS,
+	CE_SRC,
+	CE_DST,
+	CE_DST_STATUS,
+	WBM_IDLE_LINK,
+	SW2WBM_RELEASE,
+	WBM2SW_RELEASE,
+	RXDMA_BUF,
+	RXDMA_DST,
+	RXDMA_MONITOR_BUF,
+	RXDMA_MONITOR_STATUS,
+	RXDMA_MONITOR_DST,
+	MAX_RING_TYPES
+};
+
+/* SRNG flags passed in hal_srng_params.flags */
+#define HAL_SRNG_MSI_SWAP				0x00000008
+#define HAL_SRNG_RING_PTR_SWAP			0x00000010
+#define HAL_SRNG_DATA_TLV_SWAP			0x00000020
+#define HAL_SRNG_LOW_THRES_INTR_ENABLE	0x00010000
+#define HAL_SRNG_MSI_INTR				0x00020000
+
+/**
+ * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
+ * used by callers for calculating the size of memory to be allocated before
+ * calling hal_srng_setup to setup the ring
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @ring_type: one of the types from hal_ring_type
+ *
+ */
+extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
+
+/* SRNG parameters to be passed to hal_srng_setup */
+struct hal_srng_params {
+	/* Physical base address of the ring */
+	qdf_dma_addr_t ring_base_paddr;
+	/* Virtual base address of the ring */
+	void *ring_base_vaddr;
+	/* Number of entries in ring */
+	uint32_t num_entries;
+	/* MSI Address */
+	qdf_dma_addr_t msi_addr;
+	/* MSI data */
+	uint32_t msi_data;
+	/* Interrupt timer threshold – in micro seconds */
+	uint32_t intr_timer_thres_us;
+	/* Interrupt batch counter threshold – in number of ring entries */
+	uint32_t intr_batch_cntr_thres_entries;
+	/* Low threshold – in number of ring entries
+	 * (valid for src rings only)
+	 */
+	uint32_t low_threshold;
+	/* Misc flags */
+	uint32_t flags;
+};
+
+/**
+ * hal_srng_setup - Initalize HW SRNG ring.
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @ring_type: one of the types from hal_ring_type
+ * @ring_num: Ring number if there are multiple rings of
+ *		same type (staring from 0)
+ * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
+ * @ring_params: SRNG ring params in hal_srng_params structure.
+
+ * Callers are expected to allocate contiguous ring memory of size
+ * 'num_entries * entry_size' bytes and pass the physical and virtual base
+ * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
+ * structure. Ring base address should be 8 byte aligned and size of each ring
+ * entry should be queried using the API hal_srng_get_entrysize
+ *
+ * Return: Opaque pointer to ring on success
+ *		 NULL on failure (if given ring is not available)
+ */
+extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
+	int mac_id, struct hal_srng_params *ring_params);
+
+/**
+ * hal_srng_cleanup - Deinitialize HW SRNG ring.
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_srng: Opaque HAL SRNG pointer
+ */
+extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
+
+/**
+ * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
+ * hal_srng_access_start if locked access is required
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ *
+ * Return: 0 on success; error on failire
+ */
+static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+
+	if (srng->ring_dir == HAL_SRNG_SRC_RING)
+		srng->u.src_ring.cached_tp =
+			*(volatile uint32_t *)(srng->u.src_ring.tp_addr);
+	else
+		srng->u.dst_ring.cached_hp =
+			*(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
+
+	return 0;
+}
+
+/**
+ * hal_srng_access_start - Start (locked) ring access
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ *
+ * Return: 0 on success; error on failire
+ */
+static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+
+	SRNG_LOCK(&(srng->lock));
+
+	return hal_srng_access_start_unlocked(hal_soc, hal_ring);
+}
+
+/**
+ * hal_srng_dst_get_next - Get next entry from a destination ring and move
+ * cached tail pointer
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Destination ring pointer
+ *
+ * Return: Opaque pointer for next ring entry; NULL on failire
+ */
+static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
+	uint32_t desc_loop_cnt;
+
+	desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
+		>> SRNG_LOOP_CNT_LSB;
+
+	if (srng->u.dst_ring.loop_cnt == desc_loop_cnt) {
+		srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) &
+			srng->ring_size_mask;
+		srng->u.dst_ring.loop_cnt = (srng->u.dst_ring.loop_cnt +
+			!srng->u.dst_ring.tp) &
+			(SRNG_LOOP_CNT_MASK >> SRNG_LOOP_CNT_LSB);
+		/* TODO: Confirm if loop count mask is same for all rings */
+		return (void *)desc;
+	}
+	return NULL;
+}
+
+/**
+ * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
+ * hal_srng_dst_get_next should be called subsequently to move the tail pointer
+ * TODO: See if we need an optimized version of get_next that doesn't check for
+ * loop_cnt
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Destination ring pointer
+ *
+ * Return: Opaque pointer for next ring entry; NULL on failire
+ */
+static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
+	uint32_t desc_loop_cnt;
+
+	desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
+		>> SRNG_LOOP_CNT_LSB;
+
+	if (srng->u.dst_ring.loop_cnt == desc_loop_cnt)
+		return (void *)desc;
+	return NULL;
+}
+
+/**
+ * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
+ * by SW) in destination ring
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Destination ring pointer
+ * @sync_hw_ptr: Sync cached head pointer with HW
+ *
+ */
+static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
+	int sync_hw_ptr)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32 hp;
+	uint32 tp = srng->u.dst_ring.tp;
+
+	if (sync_hw_ptr) {
+		hp = *(srng->u.dst_ring.hp_addr);
+		srng->u.dst_ring.cached_hp = hp;
+	} else {
+		hp = srng->u.dst_ring.cached_hp;
+	}
+
+	if (hp >= tp)
+		return (hp - tp) / srng->entry_size;
+	else
+		return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/**
+ * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
+ * pointer. This can be used to release any buffers associated with completed
+ * ring entries. Note that this should not be used for posting new descriptor
+ * entries. Posting of new entries should be done only using
+ * hal_srng_src_get_next_reaped when this function is used for reaping.
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Source ring pointer
+ *
+ * Return: Opaque pointer for next ring entry; NULL on failire
+ */
+static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t *desc;
+	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) &
+		srng->ring_size_mask;
+
+	if (next_reap_hp != srng->u.src_ring.cached_tp) {
+		desc = &(srng->ring_base_vaddr[next_reap_hp]);
+		srng->u.src_ring.reap_hp = next_reap_hp;
+		return (void *)desc;
+	}
+
+	return NULL;
+}
+
+/**
+ * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
+ * already reaped using hal_srng_src_reap_next, for posting new entries to
+ * the ring
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Source ring pointer
+ *
+ * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
+ */
+static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t *desc;
+
+	if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
+		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
+		srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) &
+			srng->ring_size_mask;
+		return (void *)desc;
+	}
+
+	return NULL;
+}
+
+/**
+ * hal_srng_src_done_val -
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Source ring pointer
+ *
+ * Return: Opaque pointer for next ring entry; NULL on failire
+ */
+static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) &
+		srng->ring_size_mask;
+
+	if (next_reap_hp == srng->u.src_ring.cached_tp)
+		return 0;
+
+	if (srng->u.src_ring.cached_tp > next_reap_hp)
+		return (srng->u.src_ring.cached_tp - next_reap_hp) /
+			srng->entry_size;
+	else
+		return ((srng->ring_size - next_reap_hp) +
+			srng->u.src_ring.cached_tp) / srng->entry_size;
+}
+/**
+ * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Source ring pointer
+ *
+ * Return: Opaque pointer for next ring entry; NULL on failire
+ */
+static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t *desc;
+	uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) &
+		srng->ring_size_mask;
+
+	if (next_hp != srng->u.src_ring.cached_tp) {
+		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
+		srng->u.src_ring.hp = next_hp;
+		/* TODO: Since reap function is not used by all rings, we can
+		 * remove the following update of reap_hp in this function
+		 * if we can ensure that only hal_srng_src_get_next_reaped
+		 * is used for the rings requiring reap functionality
+		 */
+		srng->u.src_ring.reap_hp = next_hp;
+		return (void *)desc;
+	}
+
+	return NULL;
+}
+
+/**
+ * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
+ * hal_srng_src_get_next should be called subsequently to move the head pointer
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Source ring pointer
+ *
+ * Return: Opaque pointer for next ring entry; NULL on failire
+ */
+static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32_t *desc;
+
+	if (((srng->u.src_ring.hp + srng->entry_size) &
+		srng->ring_size_mask) != srng->u.src_ring.cached_tp) {
+		desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
+		return (void *)desc;
+	}
+
+	return NULL;
+}
+
+/**
+ * hal_srng_src_num_avail - Returns number of available entries in src ring
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Source ring pointer
+ * @sync_hw_ptr: Sync cached tail pointer with HW
+ *
+ */
+static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
+	void *hal_ring, int sync_hw_ptr)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	uint32 tp;
+	uint32 hp = srng->u.src_ring.hp;
+
+	if (sync_hw_ptr) {
+		tp = *(srng->u.src_ring.tp_addr);
+		srng->u.src_ring.cached_tp = tp;
+	} else {
+		tp = srng->u.src_ring.cached_tp;
+	}
+
+	if (tp > hp)
+		return ((tp - hp) / srng->entry_size) - 1;
+	else
+		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+/**
+ * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
+ * ring head/tail pointers to HW.
+ * This should be used only if hal_srng_access_start_unlocked to start ring
+ * access
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ *
+ * Return: 0 on success; error on failire
+ */
+static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+
+	/* TODO: See if we need a write memory barrier here */
+	if (srng->flags & HAL_SRNG_LMAC_RING) {
+		/* For LMAC rings, ring pointer updates are done through FW and
+		 * hence written to a shared memory location that is read by FW
+		 */
+		if (srng->ring_dir == HAL_SRNG_SRC_RING)
+			*(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
+		else
+			*(srng->u.src_ring.tp_addr) = srng->u.dst_ring.tp;
+	} else {
+		if (srng->ring_dir == HAL_SRNG_SRC_RING)
+			hif_write32_mb(srng->u.src_ring.hp_addr,
+				srng->u.src_ring.hp);
+		else
+			hif_write32_mb(srng->u.dst_ring.tp_addr,
+				srng->u.dst_ring.tp);
+	}
+}
+
+/**
+ * hal_srng_access_end - Unlock ring access and update cached ring head/tail
+ * pointers to HW
+ * This should be used only if hal_srng_access_start to start ring access
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ *
+ * Return: 0 on success; error on failire
+ */
+static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+
+	hal_srng_access_end_unlocked(hal_soc, hal_ring);
+	SRNG_UNLOCK(&(srng->lock));
+}
+
+
+/* TODO: Check if the following definitions is available in HW headers */
+#define WBM_IDLE_DESC_LIST 1
+#define WBM_IDLE_SCATTER_BUF_SIZE 32704
+#define NUM_MPDUS_PER_LINK_DESC 6
+#define NUM_MSDUS_PER_LINK_DESC 7
+#define REO_QUEUE_DESC_ALIGN 128
+
+#define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
+#define LINK_DESC_ALIGN 128
+
+/* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
+ * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
+ */
+#define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+
+/* TODO: Check with HW team on the scatter buffer size supported. As per WBM
+ * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
+ * should be specified in 16 word units. But the number of bits defined for
+ * this field in HW header files is 5.
+ */
+#define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
+
+/**
+ * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
+ * HW structure
+ *
+ * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
+ * @cookie: SW cookie for the buffer/descriptor
+ * @link_desc_paddr: Physical address of link descriptor entry
+ *
+ */
+static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
+	qdf_dma_addr_t link_desc_paddr)
+{
+	uint32_t *buf_addr = (uint32_t *)desc;
+	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
+		link_desc_paddr & 0xffffffff);
+	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
+		(uint64_t)link_desc_paddr >> 32);
+	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
+		WBM_IDLE_DESC_LIST);
+	HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
+		cookie);
+}
+
+/**
+ * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
+ * in an idle list
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
+{
+	return WBM_IDLE_SCATTER_BUF_SIZE;
+}
+
+/**
+ * hal_get_link_desc_size - Get the size of each link descriptor
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_get_link_desc_size(void *hal_soc)
+{
+	return LINK_DESC_SIZE;
+}
+
+/**
+ * hal_get_link_desc_align - Get the required start address alignment for
+ * link descriptors
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_get_link_desc_align(void *hal_soc)
+{
+	return LINK_DESC_ALIGN;
+}
+
+/**
+ * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
+{
+	return NUM_MPDUS_PER_LINK_DESC;
+}
+
+/**
+ * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
+{
+	return NUM_MSDUS_PER_LINK_DESC;
+}
+
+/**
+ * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
+ * descriptor can hold
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
+{
+	return NUM_MPDU_LINKS_PER_QUEUE_DESC;
+}
+
+/**
+ * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
+ * that the given buffer size
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @scatter_buf_size: Size of scatter buffer
+ *
+ */
+static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
+	uint32_t scatter_buf_size)
+{
+	return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
+		hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
+}
+
+/**
+ * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
+ * provided
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @idle_scatter_bufs_base_paddr: Array of physical base addresses
+ * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
+ * @num_scatter_bufs: Number of scatter buffers in the above lists
+ * @scatter_buf_size: Size of each scatter buffer
+ *
+ */
+extern void hal_setup_link_idle_list(void *hal_soc,
+	qdf_dma_addr_t scatter_bufs_base_paddr[],
+	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
+	uint32_t scatter_buf_size, uint32_t last_buf_end_offset);
+
+/**
+ * hal_reo_setup - Initialize HW REO block
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ */
+extern void hal_reo_setup(void *hal_soc);
+
+enum hal_pn_type {
+	HAL_PN_NONE,
+	HAL_PN_WPA,
+	HAL_PN_WAPI_EVEN,
+	HAL_PN_WAPI_UNEVEN,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+/**
+ * hal_get_reo_qdesc_size - Get size of reo queue descriptor
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @ba_window_size: BlockAck window size
+ *
+ */
+static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
+	uint32_t ba_window_size)
+{
+	if (ba_window_size <= 1)
+		return sizeof(struct rx_reo_queue);
+
+	if (ba_window_size <= 105)
+		return sizeof(struct rx_reo_queue) +
+			sizeof(struct rx_reo_queue_ext);
+
+	if (ba_window_size <= 210)
+		return sizeof(struct rx_reo_queue) +
+			(2 * sizeof(struct rx_reo_queue_ext));
+
+	return sizeof(struct rx_reo_queue) +
+		(3 * sizeof(struct rx_reo_queue_ext));
+}
+
+/**
+ * hal_get_reo_qdesc_align - Get start address alignment for reo
+ * queue descriptors
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ *
+ */
+static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
+{
+	return REO_QUEUE_DESC_ALIGN;
+}
+
+/**
+ * hal_reo_qdesc_setup - Setup HW REO queue descriptor
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @ba_window_size: BlockAck window size
+ * @start_seq: Starting sequence number
+ * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
+ * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
+ * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
+ *
+ */
+extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
+	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
+	int pn_type);
+
+/**
+ * hal_srng_get_hp_addr - Get head pointer physical address
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ *
+ */
+static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	struct hal_soc *hal = (struct hal_soc *)hal_soc;
+
+	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
+		/* Currently this interface is required only for LMAC rings */
+		return (qdf_dma_addr_t)NULL;
+	}
+
+	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
+		return hal->shadow_wrptr_mem_paddr + (srng->u.src_ring.hp_addr -
+			hal->shadow_wrptr_mem_vaddr);
+	} else {
+		return hal->shadow_rdptr_mem_paddr + (srng->u.dst_ring.hp_addr -
+			hal->shadow_rdptr_mem_vaddr);
+	}
+}
+
+/**
+ * hal_srng_get_tp_addr - Get tail pointer physical address
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ *
+ */
+static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+	struct hal_soc *hal = (struct hal_soc *)hal_soc;
+
+	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
+		/* Currently this interface is required only for LMAC rings */
+		return (qdf_dma_addr_t)NULL;
+	}
+
+	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
+		return hal->shadow_rdptr_mem_paddr +
+			((unsigned long)(srng->u.src_ring.tp_addr) -
+			(unsigned long)(hal->shadow_rdptr_mem_vaddr));
+	} else {
+		return hal->shadow_wrptr_mem_paddr +
+			((unsigned long)(srng->u.dst_ring.tp_addr) -
+			(unsigned long)(hal->shadow_wrptr_mem_vaddr));
+	}
+}
+
+/**
+ * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ * @ring_params: SRNG parameters will be returned through this structure
+ */
+extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
+	struct hal_srng_params *ring_params);
+#endif /* _HAL_API_H_ */

+ 309 - 0
hal/wifi3.0/hal_internal.h

@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HAL_INTERNAL_H_
+#define _HAL_INTERNAL_H_
+
+#include "qdf_types.h"
+#include "qdf_lock.h"
+#include "hw/reg_header/wcss/wcss_seq_hwiobase.h"
+#include "hw/reg_header/wcss/wcss_seq_hwioreg.h"
+#include "hw/data/common/sw_xml_headers.h"
+#include "hw/data/datastruct/reo_destination_ring.h"
+#include "hw/data/tlv_32/tcl_data_cmd.h"
+#include "hw/data/common/tlv_hdr.h"
+
+/* TBD: This should be movded to shared HW header file */
+enum hal_srng_ring_id {
+	/* UMAC rings */
+	HAL_SRNG_REO2SW1 = 0,
+	HAL_SRNG_REO2SW2 = 1,
+	HAL_SRNG_REO2SW3 = 2,
+	HAL_SRNG_REO2SW4 = 3,
+	HAL_SRNG_REO2TCL = 4,
+	HAL_SRNG_SW2REO = 5,
+	/* 6-7 unused */
+	HAL_SRNG_REO_CMD = 8,
+	HAL_SRNG_REO_STATUS = 9,
+	/* 10-15 unused */
+	HAL_SRNG_SW2TCL1 = 16,
+	HAL_SRNG_SW2TCL2 = 17,
+	HAL_SRNG_SW2TCL3 = 18,
+	HAL_SRNG_SW2TCL4 = 19, /* FW2TCL ring */
+	/* 20-23 unused */
+	HAL_SRNG_SW2TCL_CMD = 24,
+	HAL_SRNG_TCL_STATUS = 25,
+	/* 26-31 unused */
+	HAL_SRNG_CE_0_SRC = 32,
+	HAL_SRNG_CE_1_SRC = 33,
+	HAL_SRNG_CE_2_SRC = 34,
+	HAL_SRNG_CE_3_SRC = 35,
+	HAL_SRNG_CE_4_SRC = 36,
+	HAL_SRNG_CE_5_SRC = 37,
+	HAL_SRNG_CE_6_SRC = 38,
+	HAL_SRNG_CE_7_SRC = 39,
+	HAL_SRNG_CE_8_SRC = 40,
+	HAL_SRNG_CE_9_SRC = 41,
+	HAL_SRNG_CE_10_SRC = 42,
+	HAL_SRNG_CE_11_SRC = 43,
+	/* 44-55 unused */
+	HAL_SRNG_CE_0_DST = 56,
+	HAL_SRNG_CE_1_DST = 57,
+	HAL_SRNG_CE_2_DST = 58,
+	HAL_SRNG_CE_3_DST = 59,
+	HAL_SRNG_CE_4_DST = 60,
+	HAL_SRNG_CE_5_DST = 61,
+	HAL_SRNG_CE_6_DST = 62,
+	HAL_SRNG_CE_7_DST = 63,
+	HAL_SRNG_CE_8_DST = 64,
+	HAL_SRNG_CE_9_DST = 65,
+	HAL_SRNG_CE_10_DST = 66,
+	HAL_SRNG_CE_11_DST = 67,
+	/* 68-79 unused */
+	HAL_SRNG_CE_0_DST_STATUS = 80,
+	HAL_SRNG_CE_1_DST_STATUS = 81,
+	HAL_SRNG_CE_2_DST_STATUS = 82,
+	HAL_SRNG_CE_3_DST_STATUS = 83,
+	HAL_SRNG_CE_4_DST_STATUS = 84,
+	HAL_SRNG_CE_5_DST_STATUS = 85,
+	HAL_SRNG_CE_6_DST_STATUS = 86,
+	HAL_SRNG_CE_7_DST_STATUS = 87,
+	HAL_SRNG_CE_8_DST_STATUS = 88,
+	HAL_SRNG_CE_9_DST_STATUS = 89,
+	HAL_SRNG_CE_10_DST_STATUS = 90,
+	HAL_SRNG_CE_11_DST_STATUS = 91,
+	/* 92-103 unused */
+	HAL_SRNG_WBM_IDLE_LINK = 104,
+	HAL_SRNG_WBM_SW_RELEASE = 105,
+	HAL_SRNG_WBM2SW0_RELEASE = 106,
+	HAL_SRNG_WBM2SW1_RELEASE = 107,
+	HAL_SRNG_WBM2SW2_RELEASE = 108,
+	HAL_SRNG_WBM2SW3_RELEASE = 109,
+	/* 110-127 unused */
+	HAL_SRNG_UMAC_ID_END = 127,
+	/* LMAC rings - The following set will be replicated for each LMAC */
+	HAL_SRNG_LMAC1_ID_START = 128,
+	HAL_SRNG_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_LMAC1_ID_START,
+	HAL_SRNG_WMAC1_SW2RXDMA1_BUF = 129,
+	HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF = 130,
+	HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF = 131,
+	HAL_SRNG_WMAC1_RXDMA2SW0 = 132,
+	HAL_SRNG_WMAC1_RXDMA2SW1 = 133,
+	/* 134-142 unused */
+	HAL_SRNG_LMAC1_ID_END = 143
+};
+
+#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_REO2SW1
+
+#define HAL_MAX_LMACS 3
+#define HAL_MAX_RINGS_PER_LMAC (HAL_SRNG_LMAC1_ID_END - HAL_SRNG_LMAC1_ID_START)
+#define HAL_MAX_LMAC_RINGS (HAL_MAX_LMACS * HAL_MAX_RINGS_PER_LMAC)
+
+#define HAL_SRNG_ID_MAX (HAL_SRNG_UMAC_ID_END + HAL_MAX_LMAC_RINGS)
+
+enum hal_srng_dir {
+	HAL_SRNG_SRC_RING,
+	HAL_SRNG_DST_RING
+};
+
+/* Lock wrappers for SRNG */
+#define hal_srng_lock_t qdf_spinlock_t
+#define SRNG_LOCK_INIT(_lock) qdf_spinlock_create(_lock)
+#define SRNG_LOCK(_lock) qdf_spinlock_acquire(_lock)
+#define SRNG_UNLOCK(_lock) qdf_spinlock_release(_lock)
+#define SRNG_LOCK_DESTROY(_lock) qdf_spinlock_destroy(_lock)
+
+#define MAX_SRNG_REG_GROUPS 2
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+	/* Unique SRNG ring ID */
+	uint8_t ring_id;
+
+	/* Ring initialization done */
+	uint8_t initialized;
+
+	/* Interrupt/MSI value assigned to this ring */
+	int irq;
+
+	/* Physical base address of the ring */
+	qdf_dma_addr_t ring_base_paddr;
+
+	/* Virtual base address of the ring */
+	uint32_t *ring_base_vaddr;
+
+	/* Number of entries in ring */
+	uint32_t num_entries;
+
+	/* Ring size */
+	uint32_t ring_size;
+
+	/* Ring size mask */
+	uint32_t ring_size_mask;
+
+	/* Size of ring entry */
+	uint32_t entry_size;
+
+	/* Interrupt timer threshold – in micro seconds */
+	uint32_t intr_timer_thres_us;
+
+	/* Interrupt batch counter threshold – in number of ring entries */
+	uint32_t intr_batch_cntr_thres_entries;
+
+	/* MSI Address */
+	qdf_dma_addr_t msi_addr;
+
+	/* MSI data */
+	uint32_t msi_data;
+
+	/* Misc flags */
+	uint32_t flags;
+
+	/* Lock for serializing ring index updates */
+	hal_srng_lock_t lock;
+
+	/* Start offset of SRNG register groups for this ring
+	 * TBD: See if this is required - register address can be derived
+	 * from ring ID
+	 */
+	void *hwreg_base[MAX_SRNG_REG_GROUPS];
+
+	/* Source or Destination ring */
+	enum hal_srng_dir ring_dir;
+
+	union {
+		struct {
+			/* SW tail pointer */
+			uint32_t tp;
+
+			/* Shadow head pointer location to be updated by HW */
+			uint32_t *hp_addr;
+
+			/* Cached head pointer */
+			uint32_t cached_hp;
+
+			/* Tail pointer location to be updated by SW – This
+			 * will be a register address and need not be
+			 * accessed through SW structure */
+			uint32_t *tp_addr;
+
+			/* Current SW loop cnt */
+			int loop_cnt;
+		} dst_ring;
+
+		struct {
+			/* SW head pointer */
+			uint32_t hp;
+
+			/* SW reap head pointer */
+			uint32_t reap_hp;
+
+			/* Shadow tail pointer location to be updated by HW */
+			uint32_t *tp_addr;
+
+			/* Cached tail pointer */
+			uint32_t cached_tp;
+
+			/* Head pointer location to be updated by SW – This
+			 * will be a register address and need not be accessed
+			 * through SW structure */
+			uint32_t *hp_addr;
+
+			/* Low threshold – in number of ring entries */
+			uint32_t low_threshold;
+		} src_ring;
+	} u;
+};
+
+/* HW SRNG configuration table */
+struct hal_hw_srng_config {
+	int start_ring_id;
+	uint16_t max_rings;
+	uint16_t entry_size;
+	uint32_t reg_start[MAX_SRNG_REG_GROUPS];
+	uint16_t reg_size[MAX_SRNG_REG_GROUPS];
+	uint8_t lmac_ring;
+	enum hal_srng_dir ring_dir;
+};
+
+/**
+ * HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct hal_soc {
+	/* HIF handle to access HW registers */
+	void *hif_handle;
+
+	/* QDF device handle */
+	qdf_device_t qdf_dev;
+
+	/* Device base address */
+	void *dev_base_addr;
+
+	/* HAL internal state for all SRNG rings.
+	 * TODO: See if this is required
+	 */
+	struct hal_srng srng_list[HAL_SRNG_ID_MAX];
+
+	/* Remote pointer memory for HW/FW updates */
+	uint32_t *shadow_rdptr_mem_vaddr;
+	qdf_dma_addr_t shadow_rdptr_mem_paddr;
+
+	/* Shared memory for ring pointer updates from host to FW */
+	uint32_t *shadow_wrptr_mem_vaddr;
+	qdf_dma_addr_t shadow_wrptr_mem_paddr;
+};
+
+/* TODO: Check if the following can be provided directly by HW headers */
+#define SRNG_LOOP_CNT_MASK REO_DESTINATION_RING_15_LOOPING_COUNT_MASK
+#define SRNG_LOOP_CNT_LSB REO_DESTINATION_RING_15_LOOPING_COUNT_LSB
+
+#define HAL_SRNG_LMAC_RING 0x80000000
+
+#define HAL_DEFAULT_REO_TIMEOUT_MS 40 /* milliseconds */
+
+#define HAL_DESC_SET_FIELD(_desc, _word, _fld, _value) \
+	((_desc)[(_word ## _ ## _fld ## _OFFSET) >> 2] |= \
+		((_value) << _word ## _ ## _fld ## _LSB))
+
+#define HAL_SM(_reg, _fld, _val) \
+	(((_val) << (_reg ## _ ## _fld ## _SHFT)) & \
+		(_reg ## _ ## _fld ## _BMSK))
+
+#define HAL_MS(_reg, _fld, _val) \
+	(((_val) & (_reg ## _ ## _fld ## _BMSK)) >> \
+		(_reg ## _ ## _fld ## _SHFT))
+
+#define HAL_REG_WRITE(_soc, _reg, _value) \
+	hif_write32_mb((_soc)->dev_base_addr + (_reg), (_value))
+
+#define HAL_REG_READ(_soc, _offset) \
+	hif_read32_mb((_soc)->dev_base_addr + (_offset))
+
+#endif /* _HAL_INTERNAL_H_ */

+ 947 - 0
hal/wifi3.0/hal_srng.c

@@ -0,0 +1,947 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "hw/reg_header/wcss/wcss_seq_hwiobase.h"
+#include "hw/reg_header/wcss/wcss_seq_hwioreg.h"
+#include "hw/data/common/sw_xml_headers.h"
+#include "hw/data/datastruct/reo_destination_ring.h"
+#include "hw/data/tlv_32/tcl_data_cmd.h"
+#include "hw/data/common/tlv_hdr.h"
+#include "hal_api.h"
+
+/**
+ * Common SRNG register access macros:
+ * The SRNG registers are distributed accross various UMAC and LMAC HW blocks,
+ * but the register group and format is exactly same for all rings, with some
+ * difference between producer rings (these are 'producer rings' with respect
+ * to HW and refered as 'destination rings' in SW) and consumer rings (these
+ * are 'consumer rings' with respect to HW and refered as 'source rings' in SW).
+ * The following macros provide uniform access to all SRNG rings.
+ */
+
+/* SRNG registers are split among two groups R0 and R2 and following
+ * definitions identify the group to which each register belongs to
+ */
+#define R0_INDEX 0
+#define R2_INDEX 1
+
+#define HWREG_INDEX(_reg_group) _reg_group ## _ ## INDEX
+
+/* Registers in R0 group */
+#define BASE_LSB_GROUP R0
+#define BASE_MSB_GROUP R0
+#define ID_GROUP R0
+#define STATUS_GROUP R0
+#define MISC_GROUP R0
+#define HP_ADDR_LSB_GROUP R0
+#define HP_ADDR_MSB_GROUP R0
+#define PRODUCER_INT_SETUP_GROUP R0
+#define PRODUCER_INT_STATUS_GROUP R0
+#define PRODUCER_FULL_COUNTER_GROUP R0
+#define MSI1_BASE_LSB_GROUP R0
+#define MSI1_BASE_MSB_GROUP R0
+#define MSI1_DATA_GROUP R0
+#define HP_TP_SW_OFFSET_GROUP R0
+#define TP_ADDR_LSB_GROUP R0
+#define TP_ADDR_MSB_GROUP R0
+#define CONSUMER_INT_SETUP_IX0_GROUP R0
+#define CONSUMER_INT_SETUP_IX1_GROUP R0
+#define CONSUMER_INT_STATUS_GROUP R0
+#define CONSUMER_EMPTY_COUNTER_GROUP R0
+#define CONSUMER_PREFETCH_TIMER_GROUP R0
+#define CONSUMER_PREFETCH_STATUS_GROUP R0
+
+/* Registers in R2 group */
+#define HP_GROUP R2
+#define TP_GROUP R2
+
+/**
+ * Register definitions for all SRNG based rings are same, except few
+ * differences between source (HW consumer) and destination (HW producer)
+ * registers. Following macros definitions provide generic access to all
+ * SRNG based rings.
+ * For source rings, we will use the register/field definitions of SW2TCL1
+ * ring defined in the HW header file mac_tcl_reg_seq_hwioreg.h. To setup
+ * individual fields, SRNG_SM macros should be used with fields specified
+ * using SRNG_SRC_FLD(<register>, <field>), Register writes should be done
+ * using SRNG_SRC_REG_WRITE(<hal_srng>, <register>, <value>).
+ * Similarly for destination rings we will use definitions of REO2SW1 ring
+ * defined in the register reo_destination_ring.h. To setup individual
+ * fields SRNG_SM macros should be used with fields specified using
+ * SRNG_DST_FLD(<register>, <field>). Register writes should be done using
+ * SRNG_DST_REG_WRITE(<hal_srng>, <register>, <value>).
+ */
+
+#define SRNG_DST_REG_OFFSET(_reg, _reg_group) \
+	HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg##_ADDR(0)
+
+#define SRNG_SRC_REG_OFFSET(_reg, _reg_group) \
+	HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg ## _ADDR(0)
+
+#define _SRNG_DST_FLD(_reg_group, _reg_fld) \
+	HWIO_REO_ ## _reg_group ## _REO2SW1_RING_ ## _reg_fld
+#define _SRNG_SRC_FLD(_reg_group, _reg_fld) \
+	HWIO_TCL_ ## _reg_group ## _SW2TCL1_RING_ ## _reg_fld
+
+#define _SRNG_FLD(_reg_group, _reg_fld, _dir) \
+	_SRNG_ ## _dir ## _FLD(_reg_group, _reg_fld)
+
+#define SRNG_DST_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, DST)
+#define SRNG_SRC_FLD(_reg, _f) _SRNG_FLD(_reg ## _GROUP, _reg ## _ ## _f, SRC)
+
+#define SRNG_SRC_R0_START_OFFSET SRNG_SRC_REG_OFFSET(BASE_LSB, R0)
+#define SRNG_DST_R0_START_OFFSET SRNG_DST_REG_OFFSET(BASE_LSB, R0)
+
+#define SRNG_SRC_R2_START_OFFSET SRNG_SRC_REG_OFFSET(HP, R2)
+#define SRNG_DST_R2_START_OFFSET SRNG_DST_REG_OFFSET(HP, R2)
+
+#define SRNG_SRC_START_OFFSET(_reg_group) \
+	SRNG_SRC_ ## _reg_group ## _START_OFFSET
+#define SRNG_DST_START_OFFSET(_reg_group) \
+	SRNG_DST_ ## _reg_group ## _START_OFFSET
+
+#define SRNG_REG_ADDR(_srng, _reg, _reg_group, _dir) \
+	((_srng)->hwreg_base[HWREG_INDEX(_reg_group)] + \
+		SRNG_ ## _dir ## _REG_OFFSET(_reg, _reg_group) - \
+		SRNG_ ## _dir ## _START_OFFSET(_reg_group))
+
+#define SRNG_DST_ADDR(_srng, _reg) \
+	SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, DST)
+
+#define SRNG_SRC_ADDR(_srng, _reg) \
+	SRNG_REG_ADDR(_srng, _reg, _reg ## _GROUP, SRC)
+
+#define SRNG_REG_WRITE(_srng, _reg, _value, _dir) \
+	hif_write32_mb(SRNG_ ## _dir ## _ADDR(_srng, _reg), (_value))
+
+#define SRNG_REG_READ(_srng, _reg, _dir) \
+	hif_read32_mb(SRNG_ ## _dir ## _ADDR(_srng, _reg))
+
+#define SRNG_SRC_REG_WRITE(_srng, _reg, _value) \
+	SRNG_REG_WRITE(_srng, _reg, _value, SRC)
+
+#define SRNG_DST_REG_WRITE(_srng, _reg, _value) \
+	SRNG_REG_WRITE(_srng, _reg, _value, DST)
+
+#define SRNG_SRC_REG_READ(_srng, _reg) \
+	SRNG_REG_READ(_srng, _reg, SRC)
+
+#define _SRNG_FM(_reg_fld) _reg_fld ## _BMSK
+#define _SRNG_FS(_reg_fld) _reg_fld ## _SHFT
+
+#define SRNG_SM(_reg_fld, _val) \
+	(((_val) << _SRNG_FS(_reg_fld)) & _SRNG_FM(_reg_fld))
+
+#define SRNG_MS(_reg_fld, _val) \
+	(((_val) & _SRNG_FM(_reg_fld)) >> _SRNG_FS(_reg_fld))
+
+/**
+ * HW ring configuration table to identify hardware ring attributes like
+ * register addresses, number of rings, ring entry size etc., for each type
+ * of SRNG ring.
+ *
+ * Currently there is just one HW ring table, but there could be multiple
+ * configurations in future based on HW variants from the same wifi3.0 family
+ * and hence need to be attached with hal_soc based on HW type
+ */
+#define HAL_SRNG_CONFIG(_hal_soc, _ring_type) (&hw_srng_table[_ring_type])
+
+static struct hal_hw_srng_config hw_srng_table[] = {
+	/* TODO: max_rings can populated by querying HW capabilities */
+	{ /* REO_DST */
+		.start_ring_id = HAL_SRNG_REO2SW1,
+		.max_rings = 4,
+		.entry_size = sizeof(struct reo_destination_ring) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		.reg_start = {
+			HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+			HWIO_REO_R2_REO2SW1_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET)
+		},
+		.reg_size = {
+			HWIO_REO_R0_REO2SW2_RING_BASE_LSB_ADDR(0) -
+				HWIO_REO_R0_REO2SW1_RING_BASE_LSB_ADDR(0),
+			HWIO_REO_R2_REO2SW1_RING_HP_ADDR(0) -
+				HWIO_REO_R2_REO2SW2_RING_HP_ADDR(0),
+		},
+	},
+	{ /* REO_EXCEPTION */
+		/* Designating REO2TCL ring as exception ring. This ring is
+		 * similar to other REO2SW rings though it is named as REO2TCL.
+		 * Any of theREO2SW rings can be used as exception ring.
+		 */
+		.start_ring_id = HAL_SRNG_REO2TCL,
+		.max_rings = 1,
+		.entry_size = sizeof(struct reo_destination_ring) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		.reg_start = {
+			HWIO_REO_R0_REO2TCL_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+			HWIO_REO_R2_REO2TCL_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET)
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* REO_REINJECT */
+		.start_ring_id = HAL_SRNG_SW2REO,
+		.max_rings = 1,
+		.entry_size = sizeof(struct reo_entrance_ring) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_REO_R0_SW2REO_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+			HWIO_REO_R2_SW2REO_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET)
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* REO_CMD */
+		.start_ring_id = HAL_SRNG_REO_CMD,
+		.max_rings = 1,
+		.entry_size = (sizeof(struct tlv_32_hdr) +
+			sizeof(struct reo_get_queue_stats)) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_REO_R0_REO_CMD_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+			HWIO_REO_R2_REO_CMD_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* REO_STATUS */
+		.start_ring_id = HAL_SRNG_REO_STATUS,
+		.max_rings = 1,
+		.entry_size = (sizeof(struct tlv_32_hdr) +
+			sizeof(struct reo_get_queue_stats_status)) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		.reg_start = {
+			HWIO_REO_R0_REO_STATUS_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+			HWIO_REO_R2_REO_STATUS_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* TCL_DATA */
+		.start_ring_id = HAL_SRNG_SW2TCL1,
+		.max_rings = 3,
+		.entry_size = (sizeof(struct tlv_32_hdr) +
+			sizeof(struct tcl_data_cmd)) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
+			HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
+		},
+		.reg_size = {
+			HWIO_TCL_R0_SW2TCL2_RING_BASE_LSB_ADDR(0) -
+				HWIO_TCL_R0_SW2TCL1_RING_BASE_LSB_ADDR(0),
+			HWIO_TCL_R2_SW2TCL2_RING_HP_ADDR(0) -
+				HWIO_TCL_R2_SW2TCL1_RING_HP_ADDR(0),
+		},
+	},
+	{ /* TCL_CMD */
+		.start_ring_id = HAL_SRNG_SW2TCL_CMD,
+		.max_rings = 1,
+		.entry_size = (sizeof(struct tlv_32_hdr) +
+			sizeof(struct tcl_gse_cmd)) >> 2,
+		.lmac_ring =  FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_TCL_R0_SW2TCL_CMD_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
+			HWIO_TCL_R2_SW2TCL_CMD_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* TCL_STATUS */
+		.start_ring_id = HAL_SRNG_TCL_STATUS,
+		.max_rings = 1,
+		.entry_size = (sizeof(struct tlv_32_hdr) +
+			sizeof(struct tcl_status_ring)) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		.reg_start = {
+			HWIO_TCL_R0_TCL_STATUS1_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
+			HWIO_TCL_R2_TCL_STATUS1_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_MAC_TCL_REG_OFFSET),
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* CE_SRC */
+		.start_ring_id = HAL_SRNG_CE_0_SRC,
+		.max_rings = 12,
+		.entry_size = sizeof(struct ce_src_desc) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET),
+			HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET),
+		},
+		.reg_size = {
+			SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET -
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET,
+			SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_SRC_REG_OFFSET -
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_SRC_REG_OFFSET,
+		},
+	},
+	{ /* CE_DST */
+		.start_ring_id = HAL_SRNG_CE_0_DST,
+		.max_rings = 12,
+		.entry_size = 8 >> 2,
+		/*TODO: entry_size above should actually be
+		 * sizeof(struct ce_dst_desc) >> 2, but couldn't find definition
+		 * of struct ce_dst_desc in HW header files
+		 */
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_WFSS_CE_CHANNEL_DST_R0_DEST_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
+			HWIO_WFSS_CE_CHANNEL_DST_R2_DEST_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
+		},
+		.reg_size = {
+			SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
+			SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
+		},
+	},
+	{ /* CE_DST_STATUS */
+		.start_ring_id = HAL_SRNG_CE_0_DST_STATUS,
+		.max_rings = 12,
+		.entry_size = sizeof(struct ce_stat_desc) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		.reg_start = {
+			HWIO_WFSS_CE_CHANNEL_DST_R0_STATUS_RING_BASE_LSB_ADDR(
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
+			HWIO_WFSS_CE_CHANNEL_DST_R2_STATUS_RING_HP_ADDR(
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET),
+		},
+			/* TODO: check destination status ring registers */
+		.reg_size = {
+			SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
+			SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_1_CHANNEL_DST_REG_OFFSET -
+				SEQ_WCSS_UMAC_WFSS_CE_0_REG_WFSS_CE_0_CHANNEL_DST_REG_OFFSET,
+		},
+	},
+	{ /* WBM_IDLE_LINK */
+		.start_ring_id = HAL_SRNG_WBM_IDLE_LINK,
+		.max_rings = 1,
+		.entry_size = sizeof(struct wbm_link_descriptor_ring) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_WBM_R0_WBM_IDLE_LINK_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+			HWIO_WBM_R2_WBM_IDLE_LINK_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* SW2WBM_RELEASE */
+		.start_ring_id = HAL_SRNG_WBM_SW_RELEASE,
+		.max_rings = 1,
+		.entry_size = sizeof(struct wbm_release_ring) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		.reg_start = {
+			HWIO_WBM_R0_SW_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+			HWIO_WBM_R2_SW_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		},
+		/* Single ring - provide ring size if multiple rings of this
+		 * type are supported */
+		.reg_size = {},
+	},
+	{ /* WBM2SW_RELEASE */
+		.start_ring_id = HAL_SRNG_WBM2SW0_RELEASE,
+		.max_rings = 4,
+		.entry_size = sizeof(struct wbm_release_ring) >> 2,
+		.lmac_ring = FALSE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		.reg_start = {
+			HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+			HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		},
+		.reg_size = {
+			HWIO_WBM_R0_WBM2SW1_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) -
+				HWIO_WBM_R0_WBM2SW0_RELEASE_RING_BASE_LSB_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+			HWIO_WBM_R2_WBM2SW1_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET) -
+				HWIO_WBM_R2_WBM2SW0_RELEASE_RING_HP_ADDR(SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		},
+	},
+	{ /* RXDMA_BUF */
+		.start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF,
+		.max_rings = 1,
+		/* TODO: Check if the additional IPA buffer ring needs to be
+		 * setup here (in which case max_rings should be set to 2),
+		 * or it will be setup by IPA host driver
+		 */
+		.entry_size = sizeof(struct wbm_buffer_ring) >> 2,
+		.lmac_ring = TRUE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		/* reg_start is not set because LMAC rings are not accessed
+		 * from host
+		 */
+		.reg_start = {},
+		.reg_size = {},
+	},
+	{ /* RXDMA_DST */
+		.start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW0,
+		.max_rings = 1,
+		.entry_size = sizeof(struct reo_entrance_ring) >> 2,
+		.lmac_ring =  TRUE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		/* reg_start is not set because LMAC rings are not accessed
+		 * from host
+		 */
+		.reg_start = {},
+		.reg_size = {},
+	},
+	{ /* RXDMA_MONITOR_BUF */
+		.start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_BUF,
+		.max_rings = 1,
+		.entry_size = sizeof(struct wbm_buffer_ring) >> 2,
+		.lmac_ring = TRUE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		/* reg_start is not set because LMAC rings are not accessed
+		 * from host
+		 */
+		.reg_start = {},
+		.reg_size = {},
+	},
+	{ /* RXDMA_MONITOR_STATUS */
+		.start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF,
+		.max_rings = 1,
+		.entry_size = sizeof(struct wbm_buffer_ring) >> 2,
+		.lmac_ring = TRUE,
+		.ring_dir = HAL_SRNG_SRC_RING,
+		/* reg_start is not set because LMAC rings are not accessed
+		 * from host
+		 */
+		.reg_start = {},
+		.reg_size = {},
+	},
+	{ /* RXDMA_MONITOR_DST */
+		.start_ring_id = HAL_SRNG_WMAC1_RXDMA2SW1,
+		.max_rings = 1,
+		.entry_size = sizeof(struct reo_entrance_ring) >> 2,
+		.lmac_ring = TRUE,
+		.ring_dir = HAL_SRNG_DST_RING,
+		/* reg_start is not set because LMAC rings are not accessed
+		 * from host
+		 */
+		.reg_start = {},
+		.reg_size = {},
+	},
+};
+
+/* TODO: Need this interface from HIF layer */
+void *hif_get_dev_ba(void *hif_hanle);
+
+/**
+ * hal_attach - Initalize HAL layer
+ * @hif_handle: Opaque HIF handle
+ * @qdf_dev: QDF device
+ *
+ * Return: Opaque HAL SOC handle
+ *		 NULL on failure (if given ring is not available)
+ *
+ * This function should be called as part of HIF initialization (for accessing
+ * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
+ *
+ */
+void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
+{
+	struct hal_soc *hal;
+	int i;
+
+	hal = qdf_mem_malloc(sizeof(*hal));
+
+	if (!hal) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: hal_soc allocation failed\n", __func__);
+		goto fail0;
+	}
+	hal->hif_handle = hif_handle;
+	hal->dev_base_addr = hif_get_dev_ba(hif_handle);
+	hal->qdf_dev = qdf_dev;
+	hal->shadow_rdptr_mem_vaddr = (uint32_t *)qdf_mem_alloc_consistent(
+		qdf_dev, NULL, sizeof(*(hal->shadow_rdptr_mem_vaddr)) *
+		HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
+	if (!hal->shadow_rdptr_mem_paddr) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: hal->shadow_rdptr_mem_paddr allocation failed\n",
+			__func__);
+		goto fail1;
+	}
+
+	hal->shadow_wrptr_mem_vaddr =
+		(uint32_t *)qdf_mem_alloc_consistent(qdf_dev, NULL,
+		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
+		&(hal->shadow_wrptr_mem_paddr));
+	if (!hal->shadow_wrptr_mem_vaddr) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: hal->shadow_wrptr_mem_vaddr allocation failed\n",
+			__func__);
+		goto fail2;
+	}
+
+	for (i = 0; i < HAL_SRNG_ID_MAX; i++) {
+		hal->srng_list[i].initialized = 0;
+		hal->srng_list[i].ring_id = i;
+	}
+
+	return (void *)hal;
+
+fail2:
+	qdf_mem_free_consistent(hal->qdf_dev, NULL,
+		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
+		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
+fail1:
+	qdf_mem_free(hal);
+fail0:
+	return NULL;
+}
+
+/**
+ * hal_detach - Detach HAL layer
+ * @hal_soc: HAL SOC handle
+ *
+ * Return: Opaque HAL SOC handle
+ *		 NULL on failure (if given ring is not available)
+ *
+ * This function should be called as part of HIF initialization (for accessing
+ * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
+ *
+ */
+extern void hal_detach(void *hal_soc)
+{
+	struct hal_soc *hal = (struct hal_soc *)hal_soc;
+
+	qdf_mem_free_consistent(hal->qdf_dev, NULL,
+		sizeof(*(hal->shadow_rdptr_mem_vaddr)) * HAL_SRNG_ID_MAX,
+		hal->shadow_rdptr_mem_vaddr, hal->shadow_rdptr_mem_paddr, 0);
+	qdf_mem_free_consistent(hal->qdf_dev, NULL,
+		sizeof(*(hal->shadow_wrptr_mem_vaddr)) * HAL_MAX_LMAC_RINGS,
+		hal->shadow_wrptr_mem_vaddr, hal->shadow_wrptr_mem_paddr, 0);
+	qdf_mem_free(hal);
+
+	return;
+}
+
+
+
+/**
+ * hal_srng_src_hw_init - Private function to initialize SRNG
+ * source ring HW
+ * @hal_soc: HAL SOC handle
+ * @srng: SRNG ring pointer
+ */
+static inline void hal_srng_src_hw_init(struct hal_soc *hal,
+	struct hal_srng *srng)
+{
+	uint32_t reg_val = 0;
+	uint64_t tp_addr = 0;
+
+	SRNG_SRC_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
+	reg_val = SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
+		((uint64_t)(srng->ring_base_paddr) >> 32)) |
+		SRNG_SM(SRNG_SRC_FLD(BASE_MSB, RING_SIZE),
+		srng->entry_size * srng->num_entries);
+	SRNG_SRC_REG_WRITE(srng, BASE_MSB, reg_val);
+
+	reg_val = SRNG_SM(SRNG_SRC_FLD(ID, RING_ID), srng->ring_id) |
+		SRNG_SM(SRNG_SRC_FLD(ID, ENTRY_SIZE), srng->entry_size);
+	SRNG_SRC_REG_WRITE(srng, ID, reg_val);
+
+	reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ?
+			SRNG_SM(SRNG_SRC_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) |
+			((srng->flags & HAL_SRNG_RING_PTR_SWAP) ?
+			SRNG_SM(SRNG_SRC_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) |
+			((srng->flags & HAL_SRNG_MSI_SWAP) ?
+			SRNG_SM(SRNG_SRC_FLD(MISC, MSI_SWAP_BIT), 1) : 0);
+
+	/* Loop count is not used for SRC rings */
+	reg_val |= SRNG_SM(SRNG_SRC_FLD(MISC, LOOPCNT_DISABLE), 1);
+
+	SRNG_SRC_REG_WRITE(srng, MISC, reg_val);
+
+	/**
+	 * Interrupt setup:
+	 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE
+	 * if level mode is required
+	 */
+	reg_val = 0;
+	if (srng->intr_timer_thres_us) {
+		reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0,
+			INTERRUPT_TIMER_THRESHOLD),
+			srng->intr_timer_thres_us >> 3);
+	}
+
+	if (srng->intr_batch_cntr_thres_entries) {
+		reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX0,
+			BATCH_COUNTER_THRESHOLD),
+			srng->intr_batch_cntr_thres_entries *
+			srng->entry_size);
+	}
+	SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX0, reg_val);
+
+	if (srng->flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
+		reg_val |= SRNG_SM(SRNG_SRC_FLD(CONSUMER_INT_SETUP_IX1,
+			LOW_THRESHOLD), srng->u.src_ring.low_threshold);
+	}
+
+	SRNG_SRC_REG_WRITE(srng, CONSUMER_INT_SETUP_IX1, reg_val);
+
+	if (srng->flags & HAL_SRNG_MSI_INTR) {
+		SRNG_SRC_REG_WRITE(srng, MSI1_BASE_LSB,
+			srng->msi_addr & 0xffffffff);
+		reg_val = SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB, ADDR),
+			(uint64_t)(srng->msi_addr) >> 32) |
+			SRNG_SM(SRNG_SRC_FLD(MSI1_BASE_MSB,
+			MSI1_ENABLE), 1);
+		SRNG_SRC_REG_WRITE(srng, MSI1_BASE_MSB, reg_val);
+		SRNG_SRC_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
+	}
+
+	tp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr +
+		((unsigned long)(srng->u.src_ring.tp_addr) -
+		(unsigned long)(hal->shadow_rdptr_mem_vaddr)));
+	SRNG_SRC_REG_WRITE(srng, TP_ADDR_LSB, tp_addr & 0xffffffff);
+	SRNG_SRC_REG_WRITE(srng, TP_ADDR_MSB, tp_addr >> 32);
+
+	/* Initilaize head and tail pointers to indicate ring is empty */
+	SRNG_SRC_REG_WRITE(srng, HP, 0);
+	SRNG_SRC_REG_WRITE(srng, TP, 0);
+	*(srng->u.src_ring.tp_addr) = 0;
+}
+
+/**
+ * hal_srng_dst_hw_init - Private function to initialize SRNG
+ * destination ring HW
+ * @hal_soc: HAL SOC handle
+ * @srng: SRNG ring pointer
+ */
+static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
+	struct hal_srng *srng)
+{
+	uint32_t reg_val = 0;
+	uint64_t hp_addr = 0;
+
+	SRNG_DST_REG_WRITE(srng, BASE_LSB, srng->ring_base_paddr & 0xffffffff);
+	reg_val = SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_BASE_ADDR_MSB),
+		((uint64_t)(srng->ring_base_paddr) >> 32)) |
+		SRNG_SM(SRNG_DST_FLD(BASE_MSB, RING_SIZE),
+		srng->entry_size * srng->num_entries);
+	SRNG_DST_REG_WRITE(srng, BASE_MSB, reg_val);
+
+	reg_val = SRNG_SM(SRNG_DST_FLD(ID, RING_ID), srng->ring_id) |
+		SRNG_SM(SRNG_DST_FLD(ID, ENTRY_SIZE), srng->entry_size);
+	SRNG_DST_REG_WRITE(srng, ID, reg_val);
+
+	reg_val = ((srng->flags & HAL_SRNG_DATA_TLV_SWAP) ?
+			SRNG_SM(SRNG_DST_FLD(MISC, DATA_TLV_SWAP_BIT), 1) : 0) |
+			((srng->flags & HAL_SRNG_RING_PTR_SWAP) ?
+			SRNG_SM(SRNG_DST_FLD(MISC, HOST_FW_SWAP_BIT), 1) : 0) |
+			((srng->flags & HAL_SRNG_MSI_SWAP) ?
+			SRNG_SM(SRNG_DST_FLD(MISC, MSI_SWAP_BIT), 1) : 0);
+
+	SRNG_DST_REG_WRITE(srng, MISC, reg_val);
+
+	/**
+	 * Interrupt setup:
+	 * Default interrupt mode is 'pulse'. Need to setup SW_INTERRUPT_MODE
+	 * if level mode is required
+	 */
+	reg_val = 0;
+	if (srng->intr_timer_thres_us) {
+		reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP,
+			INTERRUPT_TIMER_THRESHOLD),
+			srng->intr_timer_thres_us >> 3);
+	}
+
+	if (srng->intr_batch_cntr_thres_entries) {
+		reg_val |= SRNG_SM(SRNG_DST_FLD(PRODUCER_INT_SETUP,
+			BATCH_COUNTER_THRESHOLD),
+			srng->intr_batch_cntr_thres_entries *
+			srng->entry_size);
+	}
+	SRNG_DST_REG_WRITE(srng, PRODUCER_INT_SETUP, reg_val);
+
+	if (srng->flags & HAL_SRNG_MSI_INTR) {
+		SRNG_DST_REG_WRITE(srng, MSI1_BASE_LSB,
+			srng->msi_addr & 0xffffffff);
+		reg_val = SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB, ADDR),
+			(uint64_t)(srng->msi_addr) >> 32) |
+			SRNG_SM(SRNG_DST_FLD(MSI1_BASE_MSB,
+			MSI1_ENABLE), 1);
+		SRNG_DST_REG_WRITE(srng, MSI1_BASE_MSB, reg_val);
+		SRNG_DST_REG_WRITE(srng, MSI1_DATA, srng->msi_data);
+	}
+
+	hp_addr = (uint64_t)(hal->shadow_rdptr_mem_paddr +
+		((unsigned long)(srng->u.dst_ring.hp_addr) -
+		(unsigned long)(hal->shadow_rdptr_mem_vaddr)));
+	SRNG_DST_REG_WRITE(srng, HP_ADDR_LSB, hp_addr & 0xffffffff);
+	SRNG_DST_REG_WRITE(srng, HP_ADDR_MSB, hp_addr >> 32);
+
+	/* Initilaize head and tail pointers to indicate ring is empty */
+	SRNG_DST_REG_WRITE(srng, HP, 0);
+	SRNG_DST_REG_WRITE(srng, TP, 0);
+	*(srng->u.dst_ring.hp_addr) = 0;
+}
+
+/**
+ * hal_srng_hw_init - Private function to initialize SRNG HW
+ * @hal_soc: HAL SOC handle
+ * @srng: SRNG ring pointer
+ */
+static inline void hal_srng_hw_init(struct hal_soc *hal,
+	struct hal_srng *srng)
+{
+	if (srng->ring_dir == HAL_SRNG_SRC_RING)
+		hal_srng_src_hw_init(hal, srng);
+	else
+		hal_srng_dst_hw_init(hal, srng);
+}
+
+/**
+ * hal_srng_setup - Initalize HW SRNG ring.
+ * @hal_soc: Opaque HAL SOC handle
+ * @ring_type: one of the types from hal_ring_type
+ * @ring_num: Ring number if there are multiple rings of same type (staring
+ * from 0)
+ * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
+ * @ring_params: SRNG ring params in hal_srng_params structure.
+
+ * Callers are expected to allocate contiguous ring memory of size
+ * 'num_entries * entry_size' bytes and pass the physical and virtual base
+ * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in
+ * hal_srng_params structure. Ring base address should be 8 byte aligned
+ * and size of each ring entry should be queried using the API
+ * hal_srng_get_entrysize
+ *
+ * Return: Opaque pointer to ring on success
+ *		 NULL on failure (if given ring is not available)
+ */
+void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
+	int mac_id, struct hal_srng_params *ring_params)
+{
+	int ring_id;
+	struct hal_soc *hal = (struct hal_soc *)hal_soc;
+	struct hal_srng *srng;
+	struct hal_hw_srng_config *ring_config =
+		HAL_SRNG_CONFIG(hal, ring_type);
+	void *dev_base_addr;
+	int i;
+
+	if (ring_num >= ring_config->max_rings) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: ring_num exceeded maximum no. of supported rings\n",
+			__func__);
+		return NULL;
+	}
+
+	if (ring_config->lmac_ring) {
+		ring_id = ring_config->start_ring_id + ring_num +
+			(mac_id * HAL_MAX_RINGS_PER_LMAC);
+	} else {
+		ring_id = ring_config->start_ring_id + ring_num;
+	}
+
+	/* TODO: Should we allocate srng structures dynamically? */
+	srng = &(hal->srng_list[ring_id]);
+
+	if (srng->initialized) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Ring (ring_type, ring_num) already initialized\n",
+			__func__);
+		return NULL;
+	}
+
+	dev_base_addr = hal->dev_base_addr;
+	srng->ring_id = ring_id;
+	srng->ring_dir = ring_config->ring_dir;
+	srng->ring_base_paddr = ring_params->ring_base_paddr;
+	srng->ring_base_vaddr = ring_params->ring_base_vaddr;
+	srng->entry_size = ring_config->entry_size;
+	srng->num_entries = ring_params->num_entries;
+	srng->ring_size = srng->num_entries * srng->entry_size;
+	srng->ring_size_mask = srng->ring_size - 1;
+	srng->msi_addr = ring_params->msi_addr;
+	srng->msi_data = ring_params->msi_data;
+	srng->intr_timer_thres_us = ring_params->intr_timer_thres_us;
+	srng->intr_batch_cntr_thres_entries =
+		ring_params->intr_batch_cntr_thres_entries;
+
+	for (i = 0 ; i < MAX_SRNG_REG_GROUPS; i++) {
+		srng->hwreg_base[i] = dev_base_addr + ring_config->reg_start[i]
+			+ (ring_num * ring_config->reg_size[i]);
+	}
+
+	/* Zero out the entire ring memory */
+	qdf_mem_zero(srng->ring_base_vaddr, (srng->entry_size *
+		srng->num_entries) << 2);
+
+	srng->flags = ring_params->flags;
+#ifdef BIG_ENDIAN_HOST
+		/* TODO: See if we should we get these flags from caller */
+	srng->flags |= HAL_SRNG_DATA_TLV_SWAP;
+	srng->flags |= HAL_SRNG_MSI_SWAP;
+	srng->flags |= HAL_SRNG_RING_PTR_SWAP;
+#endif
+	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
+		srng->u.src_ring.hp = 0;
+		srng->u.src_ring.reap_hp = srng->ring_size -
+			srng->entry_size;
+		srng->u.src_ring.tp_addr =
+			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
+		srng->u.src_ring.low_threshold = ring_params->low_threshold;
+		if (ring_config->lmac_ring) {
+			/* For LMAC rings, head pointer updates will be done
+			 * through FW by writing to a shared memory location
+			 */
+			srng->u.src_ring.hp_addr =
+				&(hal->shadow_wrptr_mem_vaddr[ring_id -
+					HAL_SRNG_LMAC1_ID_START]);
+			srng->flags |= HAL_SRNG_LMAC_RING;
+		} else {
+			srng->u.src_ring.hp_addr = SRNG_SRC_ADDR(srng, HP);
+		}
+	} else {
+		/* During initialization loop count in all the descriptors
+		 * will be set to zero, and HW will set it to 1 on completing
+		 * descriptor update in first loop, and increments it by 1 on
+		 * subsequent loops (loop count wraps around after reaching
+		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
+		 * loop count in descriptors updated by HW (to be processed
+		 * by SW).
+		 */
+		srng->u.dst_ring.loop_cnt = 1;
+		srng->u.dst_ring.tp = 0;
+		srng->u.dst_ring.hp_addr =
+			&(hal->shadow_rdptr_mem_vaddr[ring_id]);
+		if (ring_config->lmac_ring) {
+			/* For LMAC rings, tail pointer updates will be done
+			 * through FW by writing to a shared memory location
+			 */
+			srng->u.dst_ring.tp_addr =
+				&(hal->shadow_wrptr_mem_vaddr[ring_id -
+				HAL_SRNG_LMAC1_ID_START]);
+			srng->flags |= HAL_SRNG_LMAC_RING;
+		} else {
+			srng->u.dst_ring.tp_addr = SRNG_DST_ADDR(srng, TP);
+		}
+	}
+
+	if (!(ring_config->lmac_ring))
+		hal_srng_hw_init(hal, srng);
+
+	SRNG_LOCK_INIT(&srng->lock);
+
+	return (void *)srng;
+}
+
+/**
+ * hal_srng_cleanup - Deinitialize HW SRNG ring.
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_srng: Opaque HAL SRNG pointer
+ */
+void hal_srng_cleanup(void *hal_soc, void *hal_srng)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_srng;
+	SRNG_LOCK_DESTROY(&srng->lock);
+	srng->initialized = 0;
+}
+
+/**
+ * hal_srng_get_entrysize - Returns size of ring entry in bytes
+ * @hal_soc: Opaque HAL SOC handle
+ * @ring_type: one of the types from hal_ring_type
+ *
+ */
+uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type)
+{
+	struct hal_hw_srng_config *ring_config =
+		HAL_SRNG_CONFIG(hal, ring_type);
+	return ring_config->entry_size << 2;
+}
+
+/**
+ * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @hal_ring: Ring pointer (Source or Destination ring)
+ * @ring_params: SRNG parameters will be returned through this structure
+ */
+extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
+	struct hal_srng_params *ring_params)
+{
+	struct hal_srng *srng = (struct hal_srng *)hal_ring;
+
+	ring_params->ring_base_paddr = srng->ring_base_paddr;
+	ring_params->ring_base_vaddr = srng->ring_base_vaddr;
+	ring_params->num_entries = srng->num_entries;
+	ring_params->msi_addr = srng->msi_addr;
+	ring_params->msi_data = srng->msi_data;
+	ring_params->intr_timer_thres_us = srng->intr_timer_thres_us;
+	ring_params->intr_batch_cntr_thres_entries =
+		srng->intr_batch_cntr_thres_entries;
+	ring_params->low_threshold = srng->u.src_ring.low_threshold;
+	ring_params->flags = srng->flags;
+}