Browse Source

qcacmn: Add QDF OS abstraction convergence

Converge ADF and CDF API's and move them to
QDF folder. MCL/WIN driver use this QDF converged
module for OS abstraction.

Change-Id: I1d0cdfd8730a5c021aaa50b7dc8549d491d760b3
CRs-Fixed: 981187
Chouhan, Anurag 9 years ago
parent
commit
5776318d19
61 changed files with 12242 additions and 8985 deletions
  1. 56 170
      qdf/inc/osdep.h
  2. 46 44
      qdf/inc/qdf_atomic.h
  3. 171 54
      qdf/inc/qdf_defer.h
  4. 18 108
      qdf/inc/qdf_event.h
  5. 55 51
      qdf/inc/qdf_list.h
  6. 170 192
      qdf/inc/qdf_lock.h
  7. 112 113
      qdf/inc/qdf_mc_timer.h
  8. 299 0
      qdf/inc/qdf_mem.h
  9. 0 262
      qdf/inc/qdf_memory.h
  10. 68 0
      qdf/inc/qdf_module.h
  11. 386 306
      qdf/inc/qdf_nbuf.h
  12. 433 58
      qdf/inc/qdf_net_types.h
  13. 119 0
      qdf/inc/qdf_perf.h
  14. 94 77
      qdf/inc/qdf_status.h
  15. 10 47
      qdf/inc/qdf_threads.h
  16. 84 84
      qdf/inc/qdf_time.h
  17. 44 32
      qdf/inc/qdf_timer.h
  18. 217 139
      qdf/inc/qdf_trace.h
  19. 285 254
      qdf/inc/qdf_types.h
  20. 193 167
      qdf/inc/qdf_util.h
  21. 200 0
      qdf/linux/src/i_osdep.h
  22. 146 0
      qdf/linux/src/i_qdf_atomic.h
  23. 323 0
      qdf/linux/src/i_qdf_defer.h
  24. 17 24
      qdf/linux/src/i_qdf_event.h
  25. 62 0
      qdf/linux/src/i_qdf_list.h
  26. 336 0
      qdf/linux/src/i_qdf_lock.h
  27. 13 14
      qdf/linux/src/i_qdf_mc_timer.h
  28. 213 0
      qdf/linux/src/i_qdf_mem.h
  29. 63 0
      qdf/linux/src/i_qdf_module.h
  30. 1569 0
      qdf/linux/src/i_qdf_nbuf.h
  31. 52 0
      qdf/linux/src/i_qdf_net_types.h
  32. 88 0
      qdf/linux/src/i_qdf_perf.h
  33. 68 48
      qdf/linux/src/i_qdf_time.h
  34. 43 41
      qdf/linux/src/i_qdf_timer.h
  35. 94 0
      qdf/linux/src/i_qdf_trace.h
  36. 292 0
      qdf/linux/src/i_qdf_types.h
  37. 239 0
      qdf/linux/src/i_qdf_util.h
  38. 84 0
      qdf/linux/src/qdf_defer.c
  39. 82 92
      qdf/linux/src/qdf_event.c
  40. 240 0
      qdf/linux/src/qdf_list.c
  41. 660 0
      qdf/linux/src/qdf_lock.c
  42. 702 0
      qdf/linux/src/qdf_mc_timer.c
  43. 951 0
      qdf/linux/src/qdf_mem.c
  44. 34 16
      qdf/linux/src/qdf_module.c
  45. 1536 0
      qdf/linux/src/qdf_nbuf.c
  46. 195 0
      qdf/linux/src/qdf_perf.c
  47. 26 28
      qdf/linux/src/qdf_threads.c
  48. 1054 0
      qdf/linux/src/qdf_trace.c
  49. 0 90
      qdf/src/i_qdf_atomic.h
  50. 0 99
      qdf/src/i_qdf_defer.h
  51. 0 255
      qdf/src/i_qdf_lock.h
  52. 0 1064
      qdf/src/i_qdf_nbuf.h
  53. 0 145
      qdf/src/i_qdf_trace.h
  54. 0 234
      qdf/src/i_qdf_types.h
  55. 0 107
      qdf/src/i_qdf_util.h
  56. 0 236
      qdf/src/qdf_list.c
  57. 0 647
      qdf/src/qdf_lock.c
  58. 0 797
      qdf/src/qdf_mc_timer.c
  59. 0 861
      qdf/src/qdf_memory.c
  60. 0 1017
      qdf/src/qdf_nbuf.c
  61. 0 1012
      qdf/src/qdf_trace.c

+ 56 - 170
qdf/inc/osdep.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,49 +25,25 @@
  * to the Linux Foundation.
  */
 
-#ifndef _OSDEP_H
-#define _OSDEP_H
-
-#include <cdf_types.h>
-#include <cdf_memory.h>
-#include <cdf_lock.h>
-#include <cdf_time.h>
-#include <cdf_softirq_timer.h>
-#include <cdf_defer.h>
-#include <cdf_nbuf.h>
-#include <cds_if_upperproto.h>
-
-#include <cds_queue.h>
-
 /**
- * enum ath_hal_bus_type - Supported Bus types
- * @HAL_BUS_TYPE_PCI: PCI Bus
- * @HAL_BUS_TYPE_AHB: AHB Bus
- * @HAL_BUS_TYPE_SNOC: SNOC Bus
- * @HAL_BUS_TYPE_SIM: Simulator
+ * DOC: osdep
+ * This file provides OS abstraction for osdependent APIs.
  */
-enum ath_hal_bus_type {
-	HAL_BUS_TYPE_PCI,
-	HAL_BUS_TYPE_AHB,
-	HAL_BUS_TYPE_SNOC,
-	HAL_BUS_TYPE_SIM
-};
 
-/**
- * sturct hal_bus_context - Bus to hal context handoff
- * @bc_tag:     bus context tag
- * @bc_handle:  bus context handle
- * @bc_bustype: bus type
- */
-typedef struct hal_bus_context {
-        int bc_tag;
-        char *bc_handle;
-        enum ath_hal_bus_type bc_bustype;
-} HAL_BUS_CONTEXT;
+#ifndef _OSDEP_H
+#define _OSDEP_H
 
-#define INLINE   inline
+#include <qdf_types.h>
+#include <qdf_mem.h>
+#include <qdf_lock.h>
+#include <qdf_time.h>
+#include <qdf_timer.h>
+#include <qdf_defer.h>
+#include <qdf_nbuf.h>
+#include <i_osdep.h>
 
-/* ATH_DEBUG -
+/*
+ * ATH_DEBUG -
  * Control whether debug features (printouts, assertions) are compiled
  * into the driver.
  */
@@ -77,7 +53,7 @@ typedef struct hal_bus_context {
 
 #if ATH_DEBUG
 #ifndef ASSERT
-#define ASSERT(expr)  cdf_assert(expr)
+#define ASSERT(expr)  qdf_assert(expr)
 #endif
 #else
 #define ASSERT(expr)
@@ -102,25 +78,15 @@ typedef struct hal_bus_context {
  * Deduce if tasklets are available.  If not then
  * fall back to using the immediate work queue.
  */
-#define ath_sysctl_decl(f, ctl, write, filp, buffer, lenp, ppos) \
-	f(struct ctl_table *ctl, int write, void *buffer,		     \
-	  size_t *lenp, loff_t *ppos)
-#define ATH_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
-	proc_dointvec(ctl, write, buffer, lenp, ppos)
-#define ATH_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \
-	proc_dostring(ctl, write, filp, buffer, lenp, ppos)
+#define qdf_sysctl_decl(f, ctl, write, filp, buffer, lenp, ppos) \
+	f(struct ctl_table *ctl, int write, void *buffer, \
+	size_t *lenp, loff_t *ppos)
 
-/*
- * Byte Order stuff
- */
-#define    le16toh(_x)    le16_to_cpu(_x)
-#define    htole16(_x)    cpu_to_le16(_x)
-#define    htobe16(_x)    cpu_to_be16(_x)
-#define    le32toh(_x)    le32_to_cpu(_x)
-#define    htole32(_x)    cpu_to_le32(_x)
-#define    be16toh(_x)    be16_to_cpu(_x)
-#define    be32toh(_x)    be32_to_cpu(_x)
-#define    htobe32(_x)    cpu_to_be32(_x)
+#define QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
+	__QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos)
+
+#define QDF_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \
+	__QDF_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos)
 
 #define EOK    (0)
 
@@ -138,157 +104,77 @@ typedef struct hal_bus_context {
 /*
  * Normal Delay functions. Time specified in microseconds.
  */
-#define OS_DELAY(_us)                     cdf_udelay(_us)
+#define OS_DELAY(_us)                     qdf_udelay(_us)
 
 /*
  * memory data manipulation functions.
  */
-#define OS_MEMCPY(_dst, _src, _len)       cdf_mem_copy(_dst, _src, _len)
-#define OS_MEMMOVE(_dst, _src, _len)      cdf_mem_move(_dst, _src, _len)
-#define OS_MEMZERO(_buf, _len)            cdf_mem_zero(_buf, _len)
-#define OS_MEMSET(_buf, _ch, _len)        cdf_mem_set(_buf, _len, _ch)
-#define OS_MEMCMP(_mem1, _mem2, _len)     cdf_mem_compare(_mem1, _mem2, _len)
-
-#ifdef CONFIG_SMP
-/* Undo the one provided by the kernel to debug spin locks */
-#undef spin_lock
-#undef spin_unlock
-#undef spin_trylock
-
-#define spin_lock(x) \
-	do { \
-		spin_lock_bh(x); \
-	} while (0)
-
-#define spin_unlock(x) \
-	do { \
-		if (!spin_is_locked(x)) { \
-			WARN_ON(1); \
-			printk(KERN_EMERG " %s:%d unlock addr=%p, %s \n", __func__, __LINE__, x, \
-			       !spin_is_locked(x) ? "Not locked" : "");	\
-		} \
-		spin_unlock_bh(x); \
-	} while (0)
-
-#define spin_trylock(x) spin_trylock_bh(x)
-
-#define OS_SUPPORT_ASYNC_Q 1    /* support for handling asyn function calls */
-
-#else
-#define OS_SUPPORT_ASYNC_Q 0
-#endif /* ifdef CONFIG_SMP */
+#define OS_MEMCPY(_dst, _src, _len)       qdf_mem_copy(_dst, _src, _len)
+#define OS_MEMMOVE(_dst, _src, _len)      qdf_mem_move(_dst, _src, _len)
+#define OS_MEMZERO(_buf, _len)            qdf_mem_zero(_buf, _len)
+#define OS_MEMSET(_buf, _ch, _len)        qdf_mem_set(_buf, _len, _ch)
+#define OS_MEMCMP(_mem1, _mem2, _len)     qdf_mem_cmp(_mem1, _mem2, _len)
 
 
 /*
  * System time interface
  */
-typedef cdf_time_t systime_t;
-typedef cdf_time_t systick_t;
+typedef qdf_time_t systime_t;
+typedef qdf_time_t systick_t;
 
-static INLINE cdf_time_t os_get_timestamp(void)
+/**
+ * os_get_timestamp() - gives the timestamp in ticks
+ * Return: unsigned long
+ */
+static inline qdf_time_t os_get_timestamp(void)
 {
-	return cdf_system_ticks();      /* Fix double conversion from jiffies to ms */
+	/* Fix double conversion from jiffies to ms */
+	return qdf_system_ticks();
 }
 
 struct _NIC_DEV;
 
-typedef struct _NIC_DEV *osdev_t;
-
-typedef struct timer_list os_timer_t;
-
-typedef struct _os_mesg_t {
-	STAILQ_ENTRY(_os_mesg_t) mesg_next;
-	uint16_t mesg_type;
-	uint16_t mesg_len;
-	/* followed by mesg_len bytes */
-} os_mesg_t;
-
-typedef void (*os_mesg_handler_t)(void *ctx,
-				  uint16_t mesg_type,
-				  uint16_t mesg_len, void *mesg);
-
-typedef struct {
-	osdev_t dev_handle;
-	int32_t num_queued;
-	int32_t mesg_len;
-	uint8_t *mesg_queue_buf;
-	STAILQ_HEAD(, _os_mesg_t) mesg_head;    /* queued mesg buffers */
-	STAILQ_HEAD(, _os_mesg_t) mesg_free_head;       /* free mesg buffers  */
-	spinlock_t lock;
-	spinlock_t ev_handler_lock;
-#ifdef USE_SOFTINTR
-	void *_task;
-#else
-	os_timer_t _timer;
-#endif
-	os_mesg_handler_t handler;
-	void *ctx;
-	uint8_t is_synchronous : 1;
-} os_mesg_queue_t;
-
-/*
- * Definition of OS-dependent device structure.
- * It'll be opaque to the actual ATH layer.
- */
-struct _NIC_DEV {
-	void *bdev;             /* bus device handle */
-	struct net_device *netdev;      /* net device handle (wifi%d) */
-	cdf_bh_t intr_tq;       /* tasklet */
-	struct net_device_stats devstats;       /* net device statisitics */
-	HAL_BUS_CONTEXT bc;
-#ifdef ATH_PERF_PWR_OFFLOAD
-	struct device *device;  /* generic device */
-	wait_queue_head_t event_queue;
-#endif /* PERF_PWR_OFFLOAD */
-#if OS_SUPPORT_ASYNC_Q
-	os_mesg_queue_t async_q;        /* mesgq to handle async calls */
-#endif
-#ifdef ATH_BUS_PM
-	uint8_t isDeviceAsleep;
-#endif /* ATH_BUS_PM */
-};
-
-static INLINE unsigned char *os_malloc(osdev_t pNicDev,
-				       unsigned long ulSizeInBytes, int gfp)
+static inline unsigned char *os_malloc(osdev_t pNicDev,
+					unsigned long ulSizeInBytes, int gfp)
 {
-	return cdf_mem_malloc(ulSizeInBytes);
+		return qdf_mem_malloc(ulSizeInBytes);
 }
 
-#define OS_FREE(_p)                     cdf_mem_free(_p)
+#define OS_FREE(_p)                     qdf_mem_free(_p)
 
 #define OS_DMA_MEM_CONTEXT(context)	    \
-	dma_addr_t context;
+		dma_addr_t context;
 
 #define OS_GET_DMA_MEM_CONTEXT(var, field)  \
-	&(var->field)
+		&(var->field)
 
 #define OS_COPY_DMA_MEM_CONTEXT(dst, src)   \
-	*dst = *src
+		*dst = *src
 
 #define OS_ZERO_DMA_MEM_CONTEXT(context)   \
-	*context = 0
+		*context = 0
 
 /*
  * Timer Interfaces. Use these macros to declare timer
  * and retrieve timer argument. This is mainly for resolving
  * different argument types for timer function in different OS.
  */
-#define OS_DECLARE_TIMER(_fn)                  void _fn(void *)
-
-#define os_timer_func(_fn)		       \
+#define os_timer_func(_fn) \
 	void _fn(void *timer_arg)
 
-#define OS_GET_TIMER_ARG(_arg, _type)	       \
-	(_arg) = (_type)(timer_arg)
+#define OS_GET_TIMER_ARG(_arg, _type) \
+	((_arg) = (_type)(timer_arg))
+
+#define OS_DECLARE_TIMER(_fn)                  void _fn(void *)
 
 #define OS_INIT_TIMER(_osdev, _timer, _fn, _ctx, type)  \
-		cdf_softirq_timer_init(_osdev, _timer, _fn, _ctx, type)
+			qdf_timer_init(_osdev, _timer, _fn, _ctx, type)
 
-#define OS_SET_TIMER(_timer, _ms)      cdf_softirq_timer_mod(_timer, _ms)
+#define OS_SET_TIMER(_timer, _ms)      qdf_timer_mod(_timer, _ms)
 
-#define OS_CANCEL_TIMER(_timer)        cdf_softirq_timer_cancel(_timer)
+#define OS_CANCEL_TIMER(_timer)        qdf_timer_stop(_timer)
 
-#define OS_FREE_TIMER(_timer)          cdf_softirq_timer_cancel(_timer)
+#define OS_FREE_TIMER(_timer)          qdf_timer_stop(_timer)
 
 /*
  * These are required for network manager support

+ 46 - 44
qdf/inc/qdf_atomic.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,127 +26,129 @@
  */
 
 /**
- * DOC: cdf_atomic.h
- * This file abstracts an atomic counter.
+ * DOC: qdf_atomic.h
+ * This file provides OS abstraction for atomic APIs.
  */
 
-#ifndef _CDF_ATOMIC_H
-#define _CDF_ATOMIC_H
+#ifndef _QDF_ATOMIC_H
+#define _QDF_ATOMIC_H
 
-#include <i_cdf_atomic.h>
+#include <i_qdf_atomic.h>
 
 /**
- * cdf_atomic_t - atomic type of variable
+ * qdf_atomic_t - atomic type of variable
  *
  * Use this when you want a simple resource counter etc. which is atomic
  * across multiple CPU's. These maybe slower than usual counters on some
  * platforms/OS'es, so use them with caution.
  */
 
-typedef __cdf_atomic_t cdf_atomic_t;
+typedef __qdf_atomic_t qdf_atomic_t;
 
 /**
- * cdf_atomic_init() - initialize an atomic type variable
- * @v:	A pointer to an opaque atomic variable
+ * qdf_atomic_init() - initialize an atomic type variable
+ * @v: A pointer to an opaque atomic variable
  *
  * Return: None
  */
-static inline void cdf_atomic_init(cdf_atomic_t *v)
+static inline QDF_STATUS qdf_atomic_init(qdf_atomic_t *v)
 {
-	__cdf_atomic_init(v);
+	return __qdf_atomic_init(v);
 }
 
 /**
- * cdf_atomic_read() - read the value of an atomic variable
- * @v:	A pointer to an opaque atomic variable
+ * qdf_atomic_read() - read the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
  *
  * Return: The current value of the variable
  */
-static inline int32_t cdf_atomic_read(cdf_atomic_t *v)
+static inline int32_t qdf_atomic_read(qdf_atomic_t *v)
 {
-	return __cdf_atomic_read(v);
+	return __qdf_atomic_read(v);
 }
 
 /**
- * cdf_atomic_inc() - increment the value of an atomic variable
- * @v:	A pointer to an opaque atomic variable
+ * qdf_atomic_inc() - increment the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
  *
  * Return: None
  */
-static inline void cdf_atomic_inc(cdf_atomic_t *v)
+static inline void qdf_atomic_inc(qdf_atomic_t *v)
 {
-	__cdf_atomic_inc(v);
+	__qdf_atomic_inc(v);
 }
 
 /**
- * cdf_atomic_dec() - decrement the value of an atomic variable
- * @v:	A pointer to an opaque atomic variable
+ * qdf_atomic_dec() - decrement the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
  *
  * Return: None
  */
-static inline void cdf_atomic_dec(cdf_atomic_t *v)
+static inline void qdf_atomic_dec(qdf_atomic_t *v)
 {
-	__cdf_atomic_dec(v);
+	__qdf_atomic_dec(v);
 }
 
 /**
- * cdf_atomic_add() - add a value to the value of an atomic variable
- * @v:	A pointer to an opaque atomic variable
- * @i:	The amount by which to increase the atomic counter
+ * qdf_atomic_add() - add a value to the value of an atomic variable
+ * @i: The amount by which to increase the atomic counter
+ * @v: A pointer to an opaque atomic variable
  *
  * Return: None
  */
-static inline void cdf_atomic_add(int i, cdf_atomic_t *v)
+static inline void qdf_atomic_add(int i, qdf_atomic_t *v)
 {
-	__cdf_atomic_add(i, v);
+	__qdf_atomic_add(i, v);
 }
 
 /**
- * cdf_atomic_sub() - Subtract a value from an atomic variable.
+ * qdf_atomic_sub() - Subtract a value from an atomic variable
  * @i: the amount by which to decrease the atomic counter
  * @v: a pointer to an opaque atomic variable
  *
  * Return: none
  */
-static inline void cdf_atomic_sub(int i, cdf_atomic_t *v)
+static inline void qdf_atomic_sub(int i, qdf_atomic_t *v)
 {
-	__cdf_atomic_sub(i, v);
+	__qdf_atomic_sub(i, v);
 }
 
 /**
- * cdf_atomic_dec_and_test() - decrement an atomic variable and check if the
- *				new value is zero
+ * qdf_atomic_dec_and_test() - decrement an atomic variable and check if the
+ * new value is zero
  * @v: A pointer to an opaque atomic variable
  *
  * Return:
- *    true (non-zero) if the new value is zero,
- *    or false (0) if the new value is non-zero
+ * true (non-zero) if the new value is zero,
+ * false (0) if the new value is non-zero
  */
-static inline int32_t cdf_atomic_dec_and_test(cdf_atomic_t *v)
+static inline int32_t qdf_atomic_dec_and_test(qdf_atomic_t *v)
 {
-	return __cdf_atomic_dec_and_test(v);
+	return __qdf_atomic_dec_and_test(v);
 }
 
 /**
- * cdf_atomic_set() - set a value to the value of an atomic variable
+ * qdf_atomic_set() - set a value to the value of an atomic variable
  * @v: A pointer to an opaque atomic variable
+ * @i: required value to set
  *
+ * Atomically sets the value of v to i
  * Return: None
  */
-static inline void cdf_atomic_set(cdf_atomic_t *v, int i)
+static inline void qdf_atomic_set(qdf_atomic_t *v, int i)
 {
-	__cdf_atomic_set(v, i);
+	__qdf_atomic_set(v, i);
 }
 
 /**
- * cdf_atomic_inc_return() - return the incremented value of an atomic variable
+ * qdf_atomic_inc_return() - return the incremented value of an atomic variable
  * @v: A pointer to an opaque atomic variable
  *
  * Return: The current value of the variable
  */
-static inline int32_t cdf_atomic_inc_return(cdf_atomic_t *v)
+static inline int32_t qdf_atomic_inc_return(qdf_atomic_t *v)
 {
-	return __cdf_atomic_inc_return(v);
+	return __qdf_atomic_inc_return(v);
 }
 
 #endif

+ 171 - 54
qdf/inc/qdf_defer.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,98 +26,215 @@
  */
 
 /**
- * DOC: cdf_defer.h
- * This file abstracts deferred execution contexts.
+ * DOC: qdf_defer.h
+ * This file abstracts deferred execution API's.
  */
 
-#ifndef __CDF_DEFER_H
-#define __CDF_DEFER_H
+#ifndef __QDF_DEFER_H
+#define __QDF_DEFER_H
 
-#include <cdf_types.h>
-#include <i_cdf_defer.h>
+#include <qdf_types.h>
+#include <i_qdf_defer.h>
 
 /**
- * This implements work queues (worker threads, kernel threads etc.).
+ * TODO This implements work queues (worker threads, kernel threads etc.).
  * Note that there is no cancel on a scheduled work. You cannot free a work
  * item if its queued. You cannot know if a work item is queued or not unless
- * its running, whence you know its not queued.
+ * its running, hence you know its not queued.
  *
  * so if, say, a module is asked to unload itself, how exactly will it make
  * sure that the work's not queued, for OS'es that dont provide such a
  * mechanism??
  */
 
-/* cdf_work_t - representation of a work queue */
-typedef __cdf_work_t cdf_work_t;
+/*
+ * Representation of a work queue.
+ */
+typedef __qdf_work_t     qdf_work_t;
+typedef __qdf_delayed_work_t qdf_delayed_work_t;
+typedef __qdf_workqueue_t     qdf_workqueue_t;
 
-/* cdf_work_t - representation of a bottom half */
-typedef __cdf_bh_t cdf_bh_t;
+/*
+ * Representation of a bottom half.
+ */
+typedef __qdf_bh_t       qdf_bh_t;
 
 /**
- * cdf_create_bh() - this creates the Bottom half deferred handler
- * @hdl:   OS handle
- * @bh:    Bottom instance
- * @func:  Func deferred function to run at bottom half interrupt
- *         context
- * Return: None
+ * qdf_create_bh - creates the bottom half deferred handler
+ * @hdl: os handle
+ * @bh: pointer to bottom
+ * @func: deferred function to run at bottom half interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
  */
-static inline void
-cdf_create_bh(cdf_handle_t hdl, cdf_bh_t *bh, cdf_defer_fn_t func, void *arg)
+static inline void qdf_create_bh(qdf_handle_t  hdl, qdf_bh_t  *bh,
+				 qdf_defer_fn_t  func, void  *arg)
 {
-	__cdf_init_bh(hdl, bh, func, arg);
+	__qdf_init_bh(hdl, bh, func, arg);
 }
 
 /**
- * cdf_sched_bh() - schedule a bottom half (DPC)
- * @hdl:	OS handle
- * @bh:		Bottom instance
- *
- * Return: None
+ * qdf_sched - schedule a bottom half (DPC)
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * Return: none
  */
-static inline void cdf_sched_bh(cdf_handle_t hdl, cdf_bh_t *bh)
+static inline void qdf_sched_bh(qdf_handle_t hdl, qdf_bh_t *bh)
 {
-	__cdf_sched_bh(hdl, bh);
+	__qdf_sched_bh(hdl, bh);
 }
 
 /**
- * cdf_destroy_bh() - destroy a bottom half (DPC)
- * @hdl:	OS handle
- * @bh:		Bottom instance
- *
- * Return: None
+ * qdf_destroy_bh - destroy the bh (synchronous)
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * Return: none
  */
-static inline void cdf_destroy_bh(cdf_handle_t hdl, cdf_bh_t *bh)
+static inline void qdf_destroy_bh(qdf_handle_t hdl, qdf_bh_t *bh)
 {
-	__cdf_disable_bh(hdl, bh);
+	__qdf_disable_bh(hdl, bh);
 }
 
 /*********************Non-Interrupt Context deferred Execution***************/
 
 /**
- * cdf_create_work() - create a work/task queue, This runs in non-interrupt
- *		       context, so can be preempted by H/W & S/W intr
- * @work:	Work instance
- * @func:	Deferred function to run at bottom half non-interrupt
- *		context
- * @arg:	Argument for the deferred function
- *
- * Return: None
+ * qdf_create_work - create a work/task queue, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @hdl: OS handle
+ * @work: pointer to work
+ * @func: deferred function to run at bottom half non-interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline void qdf_create_work(qdf_handle_t hdl, qdf_work_t  *work,
+				   qdf_defer_fn_t  func, void  *arg)
+{
+	__qdf_init_work(hdl, work, func, arg);
+}
+
+/**
+ * qdf_create_delayed_work - create a delayed work/task, This runs in
+ * non-interrupt context, so can be preempted by H/W & S/W intr
+ * @hdl: OS handle
+ * @work: pointer to work
+ * @func: deferred function to run at bottom half non-interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline void qdf_create_delayed_work(qdf_handle_t hdl,
+					   qdf_delayed_work_t  *work,
+					   qdf_defer_fn_t  func, void  *arg)
+{
+	__qdf_init_delayed_work(hdl, work, func, arg);
+}
+
+/**
+ * qdf_create_workqueue - create a workqueue, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @name: string
+ * Return: pointer of type qdf_workqueue_t
+ */
+static inline qdf_workqueue_t *qdf_create_workqueue(char *name)
+{
+	return  __qdf_create_workqueue(name);
+}
+
+/**
+ * qdf_queue_work - Queue the work/task
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * @work: pointer to work
+ * Return: none
  */
 static inline void
-cdf_create_work(cdf_work_t *work,
-		cdf_defer_fn_t func, void *arg)
+qdf_queue_work(qdf_handle_t hdl, qdf_workqueue_t *wqueue, qdf_work_t *work)
 {
-	__cdf_init_work(work, func, arg);
+	return  __qdf_queue_work(hdl, wqueue, work);
 }
 
 /**
- * cdf_sched_work() - schedule a deferred task on non-interrupt context
- * @work:	Work instance
- *
- * Return: None
+ * qdf_queue_delayed_work - Queue the delayed work/task
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * @work: pointer to work
+ * @delay: delay interval
+ * Return: none
+ */
+static inline void qdf_queue_delayed_work(qdf_handle_t hdl,
+					  qdf_workqueue_t *wqueue,
+					  qdf_delayed_work_t *work,
+					  uint32_t delay)
+{
+	return  __qdf_queue_delayed_work(hdl, wqueue, work, delay);
+}
+
+/**
+ * qdf_flush_workqueue - flush the workqueue
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * Return: none
+ */
+static inline void qdf_flush_workqueue(qdf_handle_t hdl,
+				       qdf_workqueue_t *wqueue)
+{
+	return  __qdf_flush_workqueue(hdl, wqueue);
+}
+
+/**
+ * qdf_destroy_workqueue - Destroy the workqueue
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * Return: none
+ */
+static inline void qdf_destroy_workqueue(qdf_handle_t hdl,
+					 qdf_workqueue_t *wqueue)
+{
+	return  __qdf_destroy_workqueue(hdl, wqueue);
+}
+
+/**
+ * qdf_sched_work - Schedule a deferred task on non-interrupt context
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Retrun: none
+ */
+static inline void qdf_sched_work(qdf_handle_t hdl, qdf_work_t *work)
+{
+	__qdf_sched_work(hdl, work);
+}
+
+/**
+ * qdf_flush_work - Flush a deferred task on non-interrupt context
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: none
+ */
+static inline void qdf_flush_work(qdf_handle_t hdl, qdf_work_t *work)
+{
+	__qdf_flush_work(hdl, work);
+}
+
+/**
+ * qdf_disable_work - disable the deferred task (synchronous)
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: unsigned int
  */
-static inline void cdf_schedule_work(cdf_work_t *work)
+static inline uint32_t qdf_disable_work(qdf_handle_t hdl, qdf_work_t *work)
 {
-	__cdf_schedule_work(work);
+	return __qdf_disable_work(hdl, work);
 }
-#endif /*__CDF_DEFER_H*/
+
+
+/**
+ * qdf_destroy_work - destroy the deferred task (synchronous)
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: none
+ */
+static inline void qdf_destroy_work(qdf_handle_t hdl, qdf_work_t *work)
+{
+	__qdf_disable_work(hdl, work);
+}
+
+#endif /*_QDF_DEFER_H*/

+ 18 - 108
qdf/inc/qdf_event.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,130 +25,40 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_EVENT_H)
-#define __CDF_EVENT_H
-
 /**
- * DOC: cdf_event.h
- *
- * Connectivity driver framework (CDF) events API
- *
- **/
+ * DOC: qdf_event.h
+ * This file provides OS abstraction for event APIs.
+ */
+
+#if !defined(__QDF_EVENT_H)
+#define __QDF_EVENT_H
 
 /* Include Files */
-#include "cdf_status.h"
-#include "cdf_types.h"
-#include "i_cdf_event.h"
+#include "qdf_status.h"
+#include <qdf_types.h>
+#include <i_qdf_event.h>
+#include <qdf_trace.h>
 
 /* Preprocessor definitions and constants */
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
 
-/* Type declarations */
-
+typedef __qdf_event_t qdf_event_t;
 /* Function declarations and documenation */
 
-/**
- * cdf_event_init() - initializes the specified event
- *
- * @event:	Pointer to CDF event object to initialize
- *
- * Initializes the specified event. Upon successful initialization the state
- * of the event becomes initialized and not signaled.
- *
- * Return:
- *    CDF_STATUS_SUCCESS - Event was successfully initialized and is ready to
- *                         be used
- *    Otherwise failure CDF reason code
- */
+QDF_STATUS qdf_event_create(qdf_event_t *event);
 
-CDF_STATUS cdf_event_init(cdf_event_t *event);
+QDF_STATUS qdf_event_set(qdf_event_t *event);
 
-/**
- * cdf_event_set() -  set a CDF event
- *
- * @event:	Pointer of CDF event to set to the signalled state
- *
- * The state of the specified event is set to 'signalled by calling
- * cdf_event_set().  The state of the event remains signalled until an
- * explicit call to cdf_event_reset().
- *
- * Any threads waiting on the event as a result of a cdf_event_wait() will
- * be unblocked and available to be scheduled for execution when the event
- * is signaled by a call to cdf_event_set().
- *
- * Return:
- *      CDF_STATUS_SUCCESS - Event was successfully set
- *      Otherwise failure CDF reason code
- */
-CDF_STATUS cdf_event_set(cdf_event_t *event);
-
-/**
- * cdf_event_reset() -  reset a CDF event
- *
- * @event:	Pointer of CDF event to reset
- *
- * The state of the specified event is set to 'NOT signalled' by calling
- * cdf_event_reset().  The state of the event remains NOT signalled until an
- * explicit call to cdf_event_set().
- *
- * This function sets the event to a NOT signalled state even if the event was
- * signalled multiple times before being signaled.
- *
- * Return:
- *      CDF_STATUS_SUCCESS - Event was successfully reset
- *      Otherwise failure CDF reason code
- */
-CDF_STATUS cdf_event_reset(cdf_event_t *event);
+QDF_STATUS qdf_event_reset(qdf_event_t *event);
 
-/**
- * cdf_event_destroy() -  destroy a CDF event
- *
- * @event:	Pointer of CDF event to destroy
- *
- * The function destroys the event object referenced by event.
- * After a successful return from cdf_event_destroy() the event object becomes,
- * in effect, uninitialized.
- *
- * A destroyed event object can be reinitialized using cdf_event_init();
- * the results of otherwise referencing the object after it has been destroyed
- * are undefined.  Calls to CDF event functions to manipulate the lock such
- * as cdf_event_set() will fail if the event is destroyed.  Therefore,
- * don't use the event after it has been destroyed until it has
- * been re-initialized.
- *
- * Return:
- *      CDF_STATUS_SUCCESS - Event was successfully destroyed
- *      Otherwise failure CDF reason code
- */
-CDF_STATUS cdf_event_destroy(cdf_event_t *event);
+QDF_STATUS qdf_event_destroy(qdf_event_t *event);
 
-/**
- * cdf_wait_single_event() -  wait for a single input CDF event to be set
- *
- * @event:	Pointer of CDF event to wait on
- * @timeout:	Timeout value in milli seconds
- *
- * This API waits for the event to be set. This function returns
- * if this interval elapses, regardless if any of the events have
- * been set.  An input value of 0 for this timeout parameter means
- * to wait infinitely, meaning a timeout will never occur.
- *
- *
- * Return:
- *    CDF_STATUS_SUCCESS - the wait was satisifed by the event being
- *                         set.
- *
- *    CDF_STATUS_E_TIMEOUT - the timeout interval elapsed before the
- *    event was set.
- *
- *    CDF_STATUS_E_INVAL - The value specified by event is invalid.
- */
-CDF_STATUS cdf_wait_single_event(cdf_event_t *pEvent,
+QDF_STATUS qdf_wait_single_event(qdf_event_t *event,
 				 uint32_t timeout);
 
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
-#endif /* __CDF_EVENT_H */
+#endif /* __QDF_EVENT_H */

+ 55 - 51
qdf/inc/qdf_list.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,88 +25,92 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_LIST_H)
-#define __CDF_LIST_H
 
 /**
- *  DOC: cdf_list.h
- *
- *  Connectivity driver framework (CDF) list APIs
- *
- *  Definitions for CDF Linked Lists API
+ *  DOC: qdf_list.h
+ *  QCA driver framework (QDF) list APIs
+ *  Definitions for QDF Linked Lists API
  *
  *  Lists are implemented as a doubly linked list. An item in a list can
  *  be of any type as long as the datatype contains a field of type
- *  cdf_link_t.
+ *  qdf_link_t.
  *
  *  In general, a list is a doubly linked list of items with a pointer
  *  to the front of the list and a pointer to the end of the list.  The
  *  list items contain a forward and back link.
  *
- *  CDF linked list APIs are NOT thread safe so make sure to use appropriate
+ *  QDF linked list APIs are NOT thread safe so make sure to use appropriate
  *  locking mechanisms to assure operations on the list are thread safe.
  */
 
+#if !defined(__QDF_LIST_H)
+#define __QDF_LIST_H
+
 /* Include Files */
-#include <cdf_types.h>
-#include <cdf_status.h>
-#include <cdf_trace.h>
-#include <linux/list.h>
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <i_qdf_list.h>
+#include <qdf_trace.h>
 
-/* Preprocessor definitions and constants */
+typedef __qdf_list_node_t qdf_list_node_t;
+typedef __qdf_list_t qdf_list_t;
+/* Function declarations */
+QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node);
 
-/* Type declarations */
+QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list, qdf_list_node_t *node,
+				     uint32_t *size);
 
-typedef struct list_head cdf_list_node_t;
+QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node1);
 
-typedef struct cdf_list_s {
-	cdf_list_node_t anchor;
-	uint32_t count;
-	uint32_t max_size;
-} cdf_list_t;
+QDF_STATUS qdf_list_peek_next(qdf_list_t *list,	qdf_list_node_t *node,
+			      qdf_list_node_t **node1);
 
-/* Function declarations */
-
-CDF_INLINE_FN void cdf_list_init(cdf_list_t *p_list, uint32_t max_size)
+/**
+ * qdf_list_create() - Initialize list head
+ * @list: object of list
+ * @max_size: max size of the list
+ * Return: none
+ */
+static inline void qdf_list_create(__qdf_list_t *list, uint32_t max_size)
 {
-	INIT_LIST_HEAD(&p_list->anchor);
-	p_list->count = 0;
-	p_list->max_size = max_size;
+	__qdf_list_create(list, max_size);
 }
 
-CDF_INLINE_FN void cdf_list_destroy(cdf_list_t *p_list)
+
+/**
+ * qdf_list_destroy() - Destroy the list
+ * @list: object of list
+ * Return: none
+ */
+static inline void qdf_list_destroy(qdf_list_t *list)
 {
-	if (p_list->count != 0) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+	if (list->count != 0) {
+		QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_ERROR,
 			  "%s: list length not equal to zero", __func__);
-		CDF_ASSERT(0);
+		QDF_ASSERT(0);
 	}
 }
 
-CDF_INLINE_FN void cdf_list_size(cdf_list_t *p_list, uint32_t *p_size)
+/**
+ * qdf_list_size() - gives the size of the list
+ * @list: object of list
+ * @size: size of the list
+ * Return: uint32_t
+ */
+static inline uint32_t qdf_list_size(qdf_list_t *list)
 {
-	*p_size = p_list->count;
+	return list->count;
 }
 
-CDF_STATUS cdf_list_insert_front(cdf_list_t *p_list, cdf_list_node_t *p_node);
-
-CDF_STATUS cdf_list_insert_back(cdf_list_t *p_list, cdf_list_node_t *p_node);
-
-CDF_STATUS cdf_list_insert_back_size(cdf_list_t *p_list,
-				     cdf_list_node_t *p_node, uint32_t *p_size);
-
-CDF_STATUS cdf_list_remove_front(cdf_list_t *p_list, cdf_list_node_t **pp_node);
-
-CDF_STATUS cdf_list_remove_back(cdf_list_t *p_list, cdf_list_node_t **pp_node);
+QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node);
 
-CDF_STATUS cdf_list_peek_front(cdf_list_t *p_list, cdf_list_node_t **pp_node);
+QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node1);
 
-CDF_STATUS cdf_list_peek_next(cdf_list_t *p_list, cdf_list_node_t *p_node,
-			      cdf_list_node_t **pp_node);
+QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node1);
 
-CDF_STATUS cdf_list_remove_node(cdf_list_t *p_list,
-				cdf_list_node_t *p_node_to_remove);
+QDF_STATUS qdf_list_remove_node(qdf_list_t *list,
+				qdf_list_node_t *node_to_remove);
 
-bool cdf_list_empty(cdf_list_t *list);
+bool qdf_list_empty(qdf_list_t *list);
 
-#endif /* __CDF_LIST_H */
+#endif /* __QDF_LIST_H */

+ 170 - 192
qdf/inc/qdf_lock.h

@@ -25,282 +25,260 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_LOCK_H)
-#define __CDF_LOCK_H
-
 /**
- *
- * @file  cdf_lock.h
- *
- * @brief Connectivity driver framework (CDF) lock APIs
- *
- * Definitions for CDF locks
- *
+ * @file qdf_lock.h
+ * This file abstracts locking operations.
  */
 
-/* Include Files */
-#include "cdf_status.h"
-#include "i_cdf_lock.h"
+#ifndef _QDF_LOCK_H
+#define _QDF_LOCK_H
 
-/* Preprocessor definitions and constants */
+#include <qdf_types.h>
+#include <i_qdf_lock.h>
+
+#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
+#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
+#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
+
+/**
+ * qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
+ * @m: semaphore to take
+ * @timeout: maximum time to try to take the semaphore
+ * Return: int
+ */
+static inline int qdf_semaphore_acquire_timeout(struct semaphore *m,
+						unsigned long timeout)
+{
+	return __qdf_semaphore_acquire_timeout(m, timeout);
+}
 
-/* Type declarations */
 /**
  * @brief Platform spinlock object
  */
-typedef __cdf_spinlock_t cdf_spinlock_t;
+typedef __qdf_spinlock_t qdf_spinlock_t;
+
 /**
  * @brief Platform mutex object
  */
-typedef __cdf_semaphore_t cdf_semaphore_t;
+typedef __qdf_semaphore_t qdf_semaphore_t;
+typedef __qdf_mutex_t qdf_mutex_t;
+
+/* function Declaration */
+QDF_STATUS qdf_mutex_create(qdf_mutex_t *m);
+
+QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *m);
+
+QDF_STATUS qdf_mutex_release(qdf_mutex_t *m);
 
-/* Function declarations and documenation */
+QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock);
 
 /**
- * cdf_semaphore_init() - initialize a semaphore
- * @m:  Semaphore to initialize
- *
- * Return: None
+ * qdf_spinlock_create - Initialize a spinlock
+ * @lock: spinlock object pointer
+ * Retrun: none
  */
-
-static inline void cdf_semaphore_init(cdf_semaphore_t *m)
+static inline void qdf_spinlock_create(qdf_spinlock_t *lock)
 {
-	__cdf_semaphore_init(m);
+	__qdf_spinlock_create(lock);
 }
 
 /**
- * cdf_semaphore_acquire() - take the semaphore
- * @m:  Semaphore to take
- *
- * Return: None
+ * qdf_spinlock_destroy - Delete a spinlock
+ * @lock: spinlock object pointer
+ * Return: none
  */
-static inline int cdf_semaphore_acquire(cdf_device_t osdev, cdf_semaphore_t *m)
+static inline void qdf_spinlock_destroy(qdf_spinlock_t *lock)
 {
-	return __cdf_semaphore_acquire(osdev, m);
+	__qdf_spinlock_destroy(lock);
 }
 
 /**
- * cdf_semaphore_release () - give the semaphore
- * @m:  Semaphore to give
- *
- * Return: None
+ * qdf_spin_trylock_bh() - spin trylock bottomhalf
+ * @lock: spinlock object
+ * Return: int
  */
-static inline void
-cdf_semaphore_release(cdf_device_t osdev, cdf_semaphore_t *m)
+static inline int qdf_spin_trylock_bh(qdf_spinlock_t *lock)
 {
-	__cdf_semaphore_release(osdev, m);
+	return __qdf_spin_trylock_bh(lock);
 }
 
+int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock);
+
 /**
- * cdf_mutex_init() - initialize a CDF lock
- * @lock:	 Pointer to the opaque lock object to initialize
- *
- * cdf_mutex_init() function initializes the specified lock. Upon
- * successful initialization, the state of the lock becomes initialized
- * and unlocked.
- *
- * A lock must be initialized by calling cdf_mutex_init() before it
- * may be used in any other lock functions.
- *
- * Attempting to initialize an already initialized lock results in
- * a failure.
- *
- * Return:
- *	CDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
+ * qdf_spin_lock_bh() - locks the spinlock mutex in soft irq context
+ * @lock: spinlock object pointer
+ * Return: none
  */
-CDF_STATUS cdf_mutex_init(cdf_mutex_t *lock);
+static inline void qdf_spin_lock_bh(qdf_spinlock_t *lock)
+{
+	__qdf_spin_lock_bh(lock);
+}
+
+void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock);
 
 /**
- * cdf_mutex_acquire () - acquire a CDF lock
- * @lock:	 Pointer to the opaque lock object to acquire
- *
- * A lock object is acquired by calling cdf_mutex_acquire().  If the lock
- * is already locked, the calling thread shall block until the lock becomes
- * available. This operation shall return with the lock object referenced by
- * lock in the locked state with the calling thread as its owner.
- *
- * Return:
- *	CDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
+ * qdf_spin_unlock_bh() - unlocks the spinlock mutex in soft irq context
+ * @lock: spinlock object pointer
+ * Return: none
  */
-CDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock);
+static inline void qdf_spin_unlock_bh(qdf_spinlock_t *lock)
+{
+	__qdf_spin_unlock_bh(lock);
+}
+
+void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock);
 
 /**
- * cdf_mutex_release() - release a CDF lock
- * @lock:	 Pointer to the opaque lock object to be released
- *
- * cdf_mutex_release() function shall release the lock object
- * referenced by 'lock'.
- *
- * If a thread attempts to release a lock that it unlocked or is not
- * initialized, an error is returned.
- *
- * Return:
- *	CDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
+ * qdf_spinlock_irq_exec - Execute the input function with spinlock held
+ * and interrupt disabled.
+ * @hdl: OS handle
+ * @lock: spinlock to be held for the critical region
+ * @func: critical region function that to be executed
+ * @context: context of the critical region function
+ * Return: Boolean status returned by the critical region function
  */
-CDF_STATUS cdf_mutex_release(cdf_mutex_t *lock);
+static inline bool qdf_spinlock_irq_exec(qdf_handle_t hdl,
+					 qdf_spinlock_t *lock,
+					 qdf_irqlocked_func_t func, void *arg)
+{
+	return __qdf_spinlock_irq_exec(hdl, lock, func, arg);
+}
 
 /**
- * cdf_mutex_destroy() - destroy a CDF lock
- * @lock:	 Pointer to the opaque lock object to be destroyed
- *
- * cdf_mutex_destroy() function shall destroy the lock object
- * referenced by lock.  After a successful return from \a cdf_mutex_destroy()
- * the lock object becomes, in effect, uninitialized.
- *
- * A destroyed lock object can be reinitialized using cdf_mutex_init();
- * the results of otherwise referencing the object after it has been destroyed
- * are undefined.  Calls to CDF lock functions to manipulate the lock such
- * as cdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
- * don't use the lock after it has been destroyed until it has
- * been re-initialized.
+ * qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
+ * @lock: Lock object
  *
- * Return:
- *	CDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
+ * Return: none
  */
-CDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock);
+static inline void qdf_spin_lock(qdf_spinlock_t *lock)
+{
+	__qdf_spin_lock(lock);
+}
 
 /**
- * cdf_spinlock_init() - initialize a spinlock
- * @lock: Spinlock object pointer
+ * qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
+ * @lock: Lock object
  *
- * Return: None
+ * Return: none
  */
-static inline void cdf_spinlock_init(cdf_spinlock_t *lock)
+static inline void qdf_spin_unlock(qdf_spinlock_t *lock)
 {
-	__cdf_spinlock_init(lock);
+	__qdf_spin_unlock(lock);
 }
 
 /**
- * cdf_spinlock_destroy() - delete a spinlock
- * @lock: Spinlock object pointer
+ * qdf_spin_lock_irq() - Acquire a Spinlock(SMP) & save the irq state
+ * @lock: Lock object
+ * @flags: flags
  *
- * Return: None
+ * Return: none
  */
-static inline void cdf_spinlock_destroy(cdf_spinlock_t *lock)
+static inline void qdf_spin_lock_irq(qdf_spinlock_t *lock, unsigned long flags)
 {
-	__cdf_spinlock_destroy(lock);
+	__qdf_spin_lock_irq(&lock->spinlock, flags);
 }
 
 /**
- * cdf_spin_lock_bh() - locks the spinlock semaphore in soft irq context
- * @lock: Spinlock object pointer
+ * qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
+ * (Preemptive) and disable IRQs
+ * @lock: Lock object
  *
- * Return: None
+ * Return: none
  */
-static inline void cdf_spin_lock_bh(cdf_spinlock_t *lock)
+static inline void qdf_spin_lock_irqsave(qdf_spinlock_t *lock)
 {
-	__cdf_spin_lock_bh(lock);
+	__qdf_spin_lock_irqsave(lock);
 }
 
 /**
- * cdf_spin_lock_bh() - unlocks the spinlock semaphore in soft irq context
- * @lock: Spinlock object pointer
+ * qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
+ * Preemption and enable IRQ
+ * @lock: Lock object
  *
- * Return: None
+ * Return: none
  */
-static inline void cdf_spin_unlock_bh(cdf_spinlock_t *lock)
+static inline void qdf_spin_unlock_irqrestore(qdf_spinlock_t *lock)
 {
-	__cdf_spin_unlock_bh(lock);
+	__qdf_spin_unlock_irqrestore(lock);
 }
 
 /**
- * cdf_wake_lock_init() - initializes a CDF wake lock
- * @lock: The wake lock to initialize
- * @name: Name of wake lock
+ * qdf_spin_unlock_irq() - Unlock a Spinlock(SMP) & save the restore state
+ * @lock: Lock object
+ * @flags: flags
  *
- * Return:
- *    CDF status success : if wake lock is initialized
- *    CDF status fialure : if wake lock was not initialized
+ * Return: none
  */
-CDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *lock, const char *name);
+static inline void qdf_spin_unlock_irq(qdf_spinlock_t *lock,
+				       unsigned long flags)
+{
+	__qdf_spin_unlock_irq(&lock->spinlock, flags);
+}
 
 /**
- * cdf_wake_lock_acquire() - acquires a wake lock
- * @lock:	The wake lock to acquire
- * @reason:	Reason for taking wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status fialure : if wake lock was not acquired
+ * qdf_semaphore_init() - initialize a semaphore
+ * @m: Semaphore to initialize
+ * Return: None
  */
-CDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason);
+static inline void qdf_semaphore_init(qdf_semaphore_t *m)
+{
+	__qdf_semaphore_init(m);
+}
 
 /**
- * cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
- * @lock:	The wake lock to acquire
- * @reason:	Reason for taking wakelock
- *
- * Return:
- *   CDF status success : if wake lock is acquired
- *   CDF status fialure : if wake lock was not acquired
+ * qdf_semaphore_acquire() - take the semaphore
+ * @m: Semaphore to take
+ * Return: int
  */
-CDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock,
-					 uint32_t msec, uint32_t reason);
+static inline int qdf_semaphore_acquire(qdf_semaphore_t *m)
+{
+	return __qdf_semaphore_acquire(m);
+}
 
 /**
- * cdf_wake_lock_release() - releases a wake lock
- * @lock:	the wake lock to release
- * @@reason:	Reason for taking wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status fialure : if wake lock was not acquired
+ * qdf_semaphore_release() - give the semaphore
+ * @m: Semaphore to give
+ * Return: None
  */
-CDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason);
+static inline void qdf_semaphore_release(qdf_semaphore_t *m)
+{
+	__qdf_semaphore_release(m);
+}
 
 /**
- * cdf_wake_lock_destroy() - destroys a wake lock
- * @lock:	The wake lock to destroy
- *
- * Return:
- * CDF status success :	if wake lock is acquired
- * CDF status fialure :	if wake lock was not acquired
+ * qdf_semaphore_acquire_intr - Take the semaphore, interruptible version
+ * @osdev: OS Device
+ * @m: mutex to take
+ * Return: int
  */
-CDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock);
+static inline int qdf_semaphore_acquire_intr(qdf_semaphore_t *m)
+{
+	return __qdf_semaphore_acquire_intr(m);
+}
 
-struct hif_pm_runtime_lock;
-typedef struct hif_pm_runtime_lock *cdf_runtime_lock_t;
+QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name);
 
-CDF_STATUS cdf_runtime_pm_get(void);
-CDF_STATUS cdf_runtime_pm_put(void);
-CDF_STATUS cdf_runtime_pm_prevent_suspend(cdf_runtime_lock_t lock);
-CDF_STATUS cdf_runtime_pm_allow_suspend(cdf_runtime_lock_t lock);
-cdf_runtime_lock_t cdf_runtime_lock_init(const char *name);
-void cdf_runtime_lock_deinit(cdf_runtime_lock_t lock);
+QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason);
 
-/**
- * cdf_spinlock_acquire() - acquires a spin lock
- * @lock:	Spin lock to acquire
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status fialure : if wake lock was not acquired
- */
-CDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock);
+QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock,
+					 uint32_t msec, uint32_t reason);
 
-/**
- * cdf_spinlock_release() - release a spin lock
- * @lock:	Spin lock to release
- *
- * Return:
- * CDF status success :	if wake lock is acquired
- * CDF status fialure :	if wake lock was not acquired
- */
-CDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock);
+QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason);
+
+QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock);
+
+struct hif_pm_runtime_lock;
+typedef struct hif_pm_runtime_lock *qdf_runtime_lock_t;
+
+QDF_STATUS qdf_runtime_pm_get(void);
+QDF_STATUS qdf_runtime_pm_put(void);
+QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t lock);
+QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t lock);
+qdf_runtime_lock_t qdf_runtime_lock_init(const char *name);
+void qdf_runtime_lock_deinit(qdf_runtime_lock_t lock);
 
-#define cdf_spin_lock(_lock) __cdf_spin_lock(_lock)
-#define cdf_spin_unlock(_lock) __cdf_spin_unlock(_lock)
-#define cdf_spin_lock_irqsave(_lock) __cdf_spin_lock_irqsave(_lock)
-#define cdf_spin_unlock_irqrestore(_lock) \
-	__cdf_spin_unlock_irqrestore(_lock)
-#define cdf_spin_lock_irq(_pLock, _flags)   __cdf_spin_lock_irq(_pLock, _flags)
-#define cdf_spin_unlock_irq(_pLock, _flags) \
-	__cdf_spin_unlock_irq(_pLock, _flags)
+QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock);
 
-#define cdf_in_softirq() __cdf_in_softirq()
+QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock);
 
-#endif /* __CDF_LOCK_H */
+#endif /* _QDF_LOCK_H */

+ 112 - 113
qdf/inc/qdf_mc_timer.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,122 +25,121 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_MC_TIMER_H)
-#define __CDF_MC_TIMER_H
-
 /**
- * DOC: cdf_mc_timer
- *
- * Connectivity driver framework timer APIs serialized to MC thread
+ * DOC: qdf_mc_timer
+ * QCA driver framework timer APIs serialized to MC thread
  */
 
+#if !defined(__QDF_MC_TIMER_H)
+#define __QDF_MC_TIMER_H
+
 /* Include Files */
-#include <cdf_types.h>
-#include <cdf_status.h>
-#include <cdf_lock.h>
-#include <i_cdf_mc_timer.h>
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_lock.h>
+#include <i_qdf_mc_timer.h>
 
 #ifdef TIMER_MANAGER
-#include <cdf_list.h>
+#include <qdf_list.h>
 #endif
 
 /* Preprocessor definitions and constants */
-#define CDF_TIMER_STATE_COOKIE (0x12)
-#define CDF_MC_TIMER_TO_MS_UNIT   (1000)
-#define CDF_MC_TIMER_TO_SEC_UNIT  (1000000)
+#define QDF_TIMER_STATE_COOKIE (0x12)
+#define QDF_MC_TIMER_TO_MS_UNIT (1000)
+#define QDF_MC_TIMER_TO_SEC_UNIT (1000000)
 
 /* Type declarations */
-/* cdf Timer callback function prototype (well, actually a prototype for
+/* qdf Timer callback function prototype (well, actually a prototype for
    a pointer to this callback function) */
-typedef void (*cdf_mc_timer_callback_t)(void *userData);
+typedef void (*qdf_mc_timer_callback_t)(void *user_data);
 
 typedef enum {
-	CDF_TIMER_STATE_UNUSED = CDF_TIMER_STATE_COOKIE,
-	CDF_TIMER_STATE_STOPPED,
-	CDF_TIMER_STATE_STARTING,
-	CDF_TIMER_STATE_RUNNING,
-} CDF_TIMER_STATE;
+	QDF_TIMER_STATE_UNUSED = QDF_TIMER_STATE_COOKIE,
+	QDF_TIMER_STATE_STOPPED,
+	QDF_TIMER_STATE_STARTING,
+	QDF_TIMER_STATE_RUNNING,
+} QDF_TIMER_STATE;
 
 #ifdef TIMER_MANAGER
-struct cdf_mc_timer_s;
-typedef struct cdf_mc_timer_node_s {
-	cdf_list_node_t pNode;
-	char *fileName;
-	unsigned int lineNum;
-	struct cdf_mc_timer_s *cdf_timer;
-} cdf_mc_timer_node_t;
+struct qdf_mc_timer_s;
+typedef struct qdf_mc_timer_node_s {
+	qdf_list_node_t node;
+	char *file_name;
+	unsigned int line_num;
+	struct qdf_mc_timer_s *qdf_timer;
+} qdf_mc_timer_node_t;
 #endif
 
-typedef struct cdf_mc_timer_s {
+typedef struct qdf_mc_timer_s {
 #ifdef TIMER_MANAGER
-	cdf_mc_timer_node_t *ptimerNode;
+	qdf_mc_timer_node_t *timer_node;
 #endif
+	qdf_mc_timer_platform_t platform_info;
+	qdf_mc_timer_callback_t callback;
+	void *user_data;
+	qdf_mutex_t lock;
+	QDF_TIMER_TYPE type;
+	QDF_TIMER_STATE state;
+} qdf_mc_timer_t;
+
 
-	cdf_mc_timer_platform_t platformInfo;
-	cdf_mc_timer_callback_t callback;
-	void *userData;
-	cdf_mutex_t lock;
-	CDF_TIMER_TYPE type;
-	CDF_TIMER_STATE state;
-} cdf_mc_timer_t;
+void qdf_try_allowing_sleep(QDF_TIMER_TYPE type);
 
 /* Function declarations and documenation */
 #ifdef TIMER_MANAGER
-void cdf_mc_timer_manager_init(void);
-void cdf_mc_timer_exit(void);
+void qdf_mc_timer_manager_init(void);
+void qdf_mc_timer_manager_exit(void);
 #else
 /**
- * cdf_mc_timer_manager_init() - initialize CDF debug timer manager
- *
- * This API initializes CDF timer debug functionality.
+ * qdf_mc_timer_manager_init() - initialize QDF debug timer manager
+ * This API initializes QDF timer debug functionality.
  *
  * Return: none
  */
-static inline void cdf_mc_timer_manager_init(void)
+static inline void qdf_mc_timer_manager_init(void)
 {
 }
 
 /**
- * cdf_mc_timer_exit() - exit CDF timer debug functionality
- *
- * This API exists CDF timer debug functionality
+ * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality
+ * This API exists QDF timer debug functionality
  *
  * Return: none
  */
-static inline void cdf_mc_timer_exit(void)
+static inline void qdf_mc_timer_manager_exit(void)
 {
 }
 #endif
 /**
- * cdf_mc_timer_get_current_state() - get the current state of the timer
- * @pTimer:  Pointer to timer object
+ * qdf_mc_timer_get_current_state() - get the current state of the timer
+ * @timer:  Pointer to timer object
  *
  * Return:
- *	CDF_TIMER_STATE - cdf timer state
+ * QDF_TIMER_STATE - qdf timer state
  */
 
-CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer);
+QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer);
 
 /**
- * cdf_mc_timer_init() - initialize a CDF timer
- * @pTimer:	Pointer to timer object
- * @timerType:	Type of timer
- * @callback:	Callback to be called after timer expiry
- * @serData:	User data which will be passed to callback function
+ * qdf_mc_timer_init() - initialize a QDF timer
+ * @timer: Pointer to timer object
+ * @timer_type: Type of timer
+ * @callback: Callback to be called after timer expiry
+ * @ser_data: User data which will be passed to callback function
  *
- * This API initializes a CDF Timer object.
+ * This API initializes a QDF Timer object.
  *
- * cdf_mc_timer_init() initializes a CDF Timer object.  A timer must be
- * initialized by calling cdf_mc_timer_initialize() before it may be used in
+ * qdf_mc_timer_init() initializes a QDF Timer object.  A timer must be
+ * initialized by calling qdf_mc_timer_initialize() before it may be used in
  * any other timer functions.
  *
  * Attempting to initialize timer that is already initialized results in
  * a failure. A destroyed timer object can be re-initialized with a call to
- * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * qdf_mc_timer_init().  The results of otherwise referencing the object
  * after it has been destroyed are undefined.
  *
- *  Calls to CDF timer functions to manipulate the timer such
- *  as cdf_mc_timer_set() will fail if the timer is not initialized or has
+ *  Calls to QDF timer functions to manipulate the timer such
+ *  as qdf_mc_timer_set() will fail if the timer is not initialized or has
  *  been destroyed.  Therefore, don't use the timer after it has been
  *  destroyed until it has been re-initialized.
  *
@@ -149,105 +148,105 @@ CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer);
  *  within the tx thread flow.
  *
  * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
+ * QDF_STATUS_SUCCESS - Timer is initialized successfully
+ * QDF failure status - Timer initialization failed
  */
 #ifdef TIMER_MANAGER
-#define cdf_mc_timer_init(timer, timerType, callback, userdata)	\
-	cdf_mc_timer_init_debug(timer, timerType, callback, userdata, \
-		__FILE__, __LINE__)
-
-CDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
-				   CDF_TIMER_TYPE timerType,
-				   cdf_mc_timer_callback_t callback,
-				   void *userData, char *fileName,
-				   uint32_t lineNum);
+#define qdf_mc_timer_init(timer, timer_type, callback, userdata) \
+	qdf_mc_timer_init_debug(timer, timer_type, callback, userdata, \
+				__FILE__, __LINE__)
+
+QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer,
+				   QDF_TIMER_TYPE timer_type,
+				   qdf_mc_timer_callback_t callback,
+				   void *user_data, char *file_name,
+				   uint32_t line_num);
 #else
-CDF_STATUS cdf_mc_timer_init(cdf_mc_timer_t *timer, CDF_TIMER_TYPE timerType,
-			     cdf_mc_timer_callback_t callback,
-			     void *userData);
+QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type,
+			     qdf_mc_timer_callback_t callback,
+			     void *user_data);
 #endif
 
 /**
- * cdf_mc_timer_destroy() - destroy CDF timer
- * @timer:	Pointer to timer object
+ * qdf_mc_timer_destroy() - destroy QDF timer
+ * @timer: Pointer to timer object
  *
- * cdf_mc_timer_destroy() function shall destroy the timer object.
- * After a successful return from \a cdf_mc_timer_destroy() the timer
+ * qdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a qdf_mc_timer_destroy() the timer
  * object becomes, in effect, uninitialized.
  *
  * A destroyed timer object can be re-initialized by calling
- * cdf_mc_timer_init().  The results of otherwise referencing the object
+ * qdf_mc_timer_init().  The results of otherwise referencing the object
  * after it has been destroyed are undefined.
  *
- * Calls to CDF timer functions to manipulate the timer, such
- * as cdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
+ * Calls to QDF timer functions to manipulate the timer, such
+ * as qdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
  * don't use the timer after it has been destroyed until it has
  * been re-initialized.
  *
  * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
+ * QDF_STATUS_SUCCESS - Timer is initialized successfully
+ * QDF failure status - Timer initialization failed
  */
-CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer);
+QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer);
 
 /**
- * cdf_mc_timer_start() - start a CDF Timer object
- * @timer:	Pointer to timer object
- * @expirationTime:	Time to expire
+ * qdf_mc_timer_start() - start a QDF Timer object
+ * @timer: Pointer to timer object
+ * @expiration_time: Time to expire
  *
- * cdf_mc_timer_start() function starts a timer to expire after the
+ * qdf_mc_timer_start() function starts a timer to expire after the
  * specified interval, thus running the timer callback function when
  * the interval expires.
  *
  * A timer only runs once (a one-shot timer).  To re-start the
- * timer, cdf_mc_timer_start() has to be called after the timer runs
+ * timer, qdf_mc_timer_start() has to be called after the timer runs
  * or has been cancelled.
  *
  * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
+ * QDF_STATUS_SUCCESS - Timer is initialized successfully
+ * QDF failure status - Timer initialization failed
  */
-CDF_STATUS cdf_mc_timer_start(cdf_mc_timer_t *timer, uint32_t expirationTime);
+QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time);
 
 /**
- * cdf_mc_timer_stop() - stop a CDF Timer
- * @timer:	Pointer to timer object
- * cdf_mc_timer_stop() function stops a timer that has been started but
+ * qdf_mc_timer_stop() - stop a QDF Timer
+ * @timer: Pointer to timer object
+ * qdf_mc_timer_stop() function stops a timer that has been started but
  * has not expired, essentially cancelling the 'start' request.
  *
  * After a timer is stopped, it goes back to the state it was in after it
- * was created and can be started again via a call to cdf_mc_timer_start().
+ * was created and can be started again via a call to qdf_mc_timer_start().
  *
  * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
+ * QDF_STATUS_SUCCESS - Timer is initialized successfully
+ * QDF failure status - Timer initialization failed
  */
-CDF_STATUS cdf_mc_timer_stop(cdf_mc_timer_t *timer);
+QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer);
 
 /**
- * cdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
-
- * cdf_mc_timer_get_system_ticks() function returns the current number
+ * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
+ *
+ * qdf_mc_timer_get_system_ticks() function returns the current number
  * of timer ticks in 10msec intervals.  This function is suitable timestamping
  * and calculating time intervals by calculating the difference between two
  * timestamps.
  *
  * Return:
- *	The current system tick count (in 10msec intervals).  This
- *	function cannot fail.
+ * The current system tick count (in 10msec intervals).  This
+ * function cannot fail.
  */
-v_TIME_t cdf_mc_timer_get_system_ticks(void);
+unsigned long qdf_mc_timer_get_system_ticks(void);
 
 /**
- * cdf_mc_timer_get_system_time() - Get the system time in milliseconds
+ * qdf_mc_timer_get_system_time() - Get the system time in milliseconds
  *
- * cdf_mc_timer_get_system_time() function returns the number of milliseconds
+ * qdf_mc_timer_get_system_time() function returns the number of milliseconds
  * that have elapsed since the system was started
  *
  * Return:
- *	The current system time in milliseconds
+ * The current system time in milliseconds
  */
-v_TIME_t cdf_mc_timer_get_system_time(void);
+unsigned long qdf_mc_timer_get_system_time(void);
 
-#endif /* #if !defined __CDF_MC_TIMER_H */
+#endif /* __QDF_MC_TIMER_H */

+ 299 - 0
qdf/inc/qdf_mem.h

@@ -0,0 +1,299 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_mem
+ * QCA driver framework (QDF) memory management APIs
+ */
+
+#if !defined(__QDF_MEMORY_H)
+#define __QDF_MEMORY_H
+
+/* Include Files */
+#include <qdf_types.h>
+#include <i_qdf_mem.h>
+
+/**
+ * struct qdf_mem_dma_page_t - Allocated dmaable page
+ * @page_v_addr_start: Page start virtual address
+ * @page_v_addr_end: Page end virtual address
+ * @page_p_addr: Page start physical address
+ */
+struct qdf_mem_dma_page_t {
+	char *page_v_addr_start;
+	char *page_v_addr_end;
+	qdf_dma_addr_t page_p_addr;
+};
+
+/**
+ * struct qdf_mem_multi_page_t - multiple page allocation information storage
+ * @num_element_per_page: Number of element in single page
+ * @num_pages: Number of allocation needed pages
+ * @dma_pages: page information storage in case of coherent memory
+ * @cacheable_pages: page information storage in case of cacheable memory
+ */
+struct qdf_mem_multi_page_t {
+	uint16_t num_element_per_page;
+	uint16_t num_pages;
+	struct qdf_mem_dma_page_t *dma_pages;
+	void **cacheable_pages;
+};
+
+
+/* Preprocessor definitions and constants */
+
+typedef __qdf_mempool_t qdf_mempool_t;
+#ifdef MEMORY_DEBUG
+void qdf_mem_clean(void);
+
+void qdf_mem_init(void);
+
+void qdf_mem_exit(void);
+
+#else
+/**
+ * qdf_mem_init() - initialize qdf memory debug functionality
+ *
+ * Return: none
+ */
+static inline void qdf_mem_init(void)
+{
+}
+
+/**
+ * qdf_mem_exit() - exit qdf memory debug functionality
+ *
+ * Return: none
+ */
+static inline void qdf_mem_exit(void)
+{
+}
+#endif
+
+#ifdef MEMORY_DEBUG
+#define qdf_mem_malloc(size) \
+	qdf_mem_malloc_debug(size, __FILE__, __LINE__)
+void *qdf_mem_malloc_debug(size_t size, char *file_name, uint32_t line_num);
+#else
+void *
+qdf_mem_malloc(qdf_size_t size);
+#endif
+
+void *qdf_mem_alloc_outline(qdf_device_t osdev, qdf_size_t size);
+
+/**
+ * qdf_mem_free() - free QDF memory
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ * This function will free the memory pointed to by 'ptr'.
+ * Return:
+ * None
+ */
+void qdf_mem_free(void *ptr);
+
+void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
+
+void qdf_mem_zero(void *ptr, uint32_t num_bytes);
+
+void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
+
+void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
+
+void qdf_mem_free_outline(void *buf);
+
+void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
+			       qdf_dma_addr_t *paddr);
+
+void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
+			     void *vaddr, qdf_dma_addr_t paddr,
+			     qdf_dma_context_t memctx);
+
+void qdf_mem_zero_outline(void *buf, qdf_size_t size);
+
+/**
+ * qdf_mem_cmp() - memory compare
+ * @memory1: pointer to one location in memory to compare.
+ * @memory2: pointer to second location in memory to compare.
+ * @num_bytes: the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ * Return:
+ * int32_t - returns a bool value that tells if the memory
+ * locations are equal or not equal.
+ * 0 -- equal
+ * < 0 -- *memory1 is less than *memory2
+ * > 0 -- *memory1 is bigger than *memory2
+ */
+static inline int32_t qdf_mem_cmp(const void *memory1, const void *memory2,
+				  uint32_t num_bytes)
+{
+	return __qdf_mem_cmp(memory1, memory2, num_bytes);
+}
+
+/**
+ * qdf_str_cmp - Compare two strings
+ * @str1: First string
+ * @str2: Second string
+ * Return: =0 equal
+ * >0    not equal, if  str1  sorts lexicographically after str2
+ * <0    not equal, if  str1  sorts lexicographically before str2
+ */
+static inline int32_t qdf_str_cmp(const char *str1, const char *str2)
+{
+	return __qdf_str_cmp(str1, str2);
+}
+
+/**
+ * qdf_str_lcopy - Copy from one string to another
+ * @dest: destination string
+ * @src: source string
+ * @bytes: limit of num bytes to copy
+ * Return: =0 returns the initial value of dest
+ */
+static inline uint32_t qdf_str_lcopy(char *dest, const char *src, uint32_t bytes)
+{
+	return __qdf_str_lcopy(dest, src, bytes);
+}
+
+/**
+ * qdf_mem_map_nbytes_single - Map memory for DMA
+ * @osdev: pomter OS device context
+ * @buf: pointer to memory to be dma mapped
+ * @dir: DMA map direction
+ * @nbytes: number of bytes to be mapped.
+ * @phy_addr: ponter to recive physical address.
+ *
+ * Return: success/failure
+ */
+static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
+						 qdf_dma_dir_t dir, int nbytes,
+						 uint32_t *phy_addr)
+{
+#if defined(HIF_PCI)
+	return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
+#else
+	return 0;
+#endif
+}
+
+/**
+ * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
+ * @osdev: pomter OS device context
+ * @phy_addr: physical address of memory to be dma unmapped
+ * @dir: DMA unmap direction
+ * @nbytes: number of bytes to be unmapped.
+ *
+ * Return: none
+ */
+static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
+					       uint32_t phy_addr,
+					       qdf_dma_dir_t dir,
+					       int nbytes)
+{
+#if defined(HIF_PCI)
+	__qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
+#endif
+}
+
+/**
+ * qdf_mempool_init - Create and initialize memory pool
+ * @osdev: platform device object
+ * @pool_addr: address of the pool created
+ * @elem_cnt: no. of elements in pool
+ * @elem_size: size of each pool element in bytes
+ * @flags: flags
+ * Return: Handle to memory pool or NULL if allocation failed
+ */
+static inline int qdf_mempool_init(qdf_device_t osdev,
+				   qdf_mempool_t *pool_addr, int elem_cnt,
+				   size_t elem_size, uint32_t flags)
+{
+	return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
+				  flags);
+}
+
+/**
+ * qdf_mempool_destroy - Destroy memory pool
+ * @osdev: platform device object
+ * @Handle: to memory pool
+ * Return: none
+ */
+static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
+{
+	__qdf_mempool_destroy(osdev, pool);
+}
+
+/**
+ * qdf_mempool_alloc - Allocate an element memory pool
+ * @osdev: platform device object
+ * @Handle: to memory pool
+ * Return: Pointer to the allocated element or NULL if the pool is empty
+ */
+static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
+{
+	return (void *)__qdf_mempool_alloc(osdev, pool);
+}
+
+/**
+ * qdf_mempool_free - Free a memory pool element
+ * @osdev: Platform device object
+ * @pool: Handle to memory pool
+ * @buf: Element to be freed
+ * Return: none
+ */
+static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
+				    void *buf)
+{
+	__qdf_mempool_free(osdev, pool, buf);
+}
+
+void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
+					qdf_dma_addr_t bus_addr,
+					qdf_size_t size,
+					__dma_data_direction direction);
+
+/**
+ * qdf_str_len() - returns the length of a string
+ * @str: input string
+ * Return:
+ * length of string
+ */
+static inline int32_t qdf_str_len(const char *str)
+{
+	return __qdf_str_len(str);
+}
+
+void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
+			       struct qdf_mem_multi_page_t *pages,
+			       size_t element_size, uint16_t element_num,
+			       qdf_dma_context_t memctxt, bool cacheable);
+void qdf_mem_multi_pages_free(qdf_device_t osdev,
+			      struct qdf_mem_multi_page_t *pages,
+			      qdf_dma_context_t memctxt, bool cacheable);
+
+
+#endif /* __QDF_MEMORY_H */

+ 0 - 262
qdf/inc/qdf_memory.h

@@ -1,262 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#if !defined(__CDF_MEMORY_H)
-#define __CDF_MEMORY_H
-
-/**
- * DOC: cdf_memory
- *
- * Connectivity driver framework (CDF) memory management APIs
- */
-
-/* Include Files */
-#include <cdf_types.h>
-
-/**
- * struct cdf_mem_dma_page_t - Allocated dmaable page
- * @page_v_addr_start: Page start virtual address
- * @page_v_addr_end: Page end virtual address
- * @page_p_addr: Page start physical address
- */
-struct cdf_mem_dma_page_t {
-	char *page_v_addr_start;
-	char *page_v_addr_end;
-	cdf_dma_addr_t page_p_addr;
-};
-
-/**
- * struct cdf_mem_multi_page_t - multiple page allocation information storage
- * @num_element_per_page: Number of element in single page
- * @num_pages: Number of allocation needed pages
- * @dma_pages: page information storage in case of coherent memory
- * @cacheable_pages: page information storage in case of cacheable memory
- */
-struct cdf_mem_multi_page_t {
-	uint16_t num_element_per_page;
-	uint16_t num_pages;
-	struct cdf_mem_dma_page_t *dma_pages;
-	void **cacheable_pages;
-};
-
-/* Preprocessor definitions and constants */
-
-#ifdef MEMORY_DEBUG
-void cdf_mem_clean(void);
-void cdf_mem_init(void);
-void cdf_mem_exit(void);
-#else
-/**
- * cdf_mem_init() - initialize cdf memory debug functionality
- *
- * Return: none
- */
-static inline void cdf_mem_init(void)
-{
-}
-
-/**
- * cdf_mem_exit() - exit cdf memory debug functionality
- *
- * Return: none
- */
-static inline void cdf_mem_exit(void)
-{
-}
-#endif
-/* Type declarations */
-
-/* Function declarations and documenation */
-
-/**
- * cdf_mem_malloc() - allocation CDF memory
- * @size:	Number of bytes of memory to allocate.
- *
- * This function will dynamicallly allocate the specified number of bytes of
- * memory.
- *
- *
- * Return:
- *	Upon successful allocate, returns a non-NULL pointer to the allocated
- *	memory.  If this function is unable to allocate the amount of memory
- *	specified (for any reason) it returns %NULL.
- *
- */
-#ifdef MEMORY_DEBUG
-#define cdf_mem_malloc(size) cdf_mem_malloc_debug(size, __FILE__, __LINE__)
-void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum);
-#else
-void *cdf_mem_malloc(size_t size);
-#endif
-
-/**
- *  cdf_mem_free() - free CDF memory
- *  @ptr:	Pointer to the starting address of the memory to be free'd.
- *
- *  This function will free the memory pointed to by 'ptr'.
- *
- *  Return:
- *	 Nothing
- *
- */
-void cdf_mem_free(void *ptr);
-
-/**
- * cdf_mem_set() - set (fill) memory with a specified byte value.
- * @pMemory:	Pointer to memory that will be set
- * @numBytes:	Number of bytes to be set
- * @value:	Byte set in memory
- *
- * Return:
- *    Nothing
- *
- */
-void cdf_mem_set(void *ptr, uint32_t numBytes, uint32_t value);
-
-/**
- * cdf_mem_zero() - zero out memory
- * @pMemory:	pointer to memory that will be set to zero
- * @numBytes:	number of bytes zero
- * @value:	byte set in memory
- *
- *  This function sets the memory location to all zeros, essentially clearing
- *  the memory.
- *
- * Return:
- *	Nothing
- *
- */
-void cdf_mem_zero(void *ptr, uint32_t numBytes);
-
-/**
- * cdf_mem_copy() - copy memory
- * @pDst:	Pointer to destination memory location (to copy to)
- * @pSrc:	Pointer to source memory location (to copy from)
- * @numBytes:	Number of bytes to copy.
- *
- * Copy host memory from one location to another, similar to memcpy in
- * standard C.  Note this function does not specifically handle overlapping
- * source and destination memory locations.  Calling this function with
- * overlapping source and destination memory locations will result in
- * unpredictable results.  Use cdf_mem_move() if the memory locations
- * for the source and destination are overlapping (or could be overlapping!)
- *
- * Return:
- *    Nothing
- *
- */
-void cdf_mem_copy(void *pDst, const void *pSrc, uint32_t numBytes);
-
-/**
- * cdf_mem_move() - move memory
- * @pDst:	pointer to destination memory location (to move to)
- * @pSrc:	pointer to source memory location (to move from)
- * @numBytes:	number of bytes to move.
- *
- * Move host memory from one location to another, similar to memmove in
- * standard C.  Note this function *does* handle overlapping
- * source and destination memory locations.
-
- * Return:
- *	Nothing
- */
-void cdf_mem_move(void *pDst, const void *pSrc, uint32_t numBytes);
-
-/**
- * cdf_mem_compare() - memory compare
- * @pMemory1:	pointer to one location in memory to compare.
- * @pMemory2:	pointer to second location in memory to compare.
- * @numBytes:	the number of bytes to compare.
- *
- * Function to compare two pieces of memory, similar to memcmp function
- * in standard C.
- *
- * Return:
- *	bool - returns a bool value that tells if the memory locations
- *	are equal or not equal.
- *
- */
-bool cdf_mem_compare(const void *pMemory1, const void *pMemory2,
-		     uint32_t numBytes);
-
-/**
- * cdf_mem_compare2() - memory compare
- * @pMemory1: pointer to one location in memory to compare.
- * @pMemory2:	pointer to second location in memory to compare.
- * @numBytes:	the number of bytes to compare.
- *
- * Function to compare two pieces of memory, similar to memcmp function
- * in standard C.
- * Return:
- *	 int32_t - returns a bool value that tells if the memory
- *	 locations are equal or not equal.
- *	 0 -- equal
- *	 < 0 -- *pMemory1 is less than *pMemory2
- *	 > 0 -- *pMemory1 is bigger than *pMemory2
- */
-int32_t cdf_mem_compare2(const void *pMemory1, const void *pMemory2,
-			 uint32_t numBytes);
-
-void *cdf_os_mem_alloc_consistent(cdf_device_t osdev, cdf_size_t size,
-				  cdf_dma_addr_t *paddr,
-				  cdf_dma_context_t mctx);
-void
-cdf_os_mem_free_consistent(cdf_device_t osdev,
-			   cdf_size_t size,
-			   void *vaddr,
-			   cdf_dma_addr_t paddr, cdf_dma_context_t memctx);
-
-void
-cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
-				      cdf_dma_addr_t bus_addr,
-				      cdf_size_t size,
-				      enum dma_data_direction direction);
-
-/**
- * cdf_str_len() - returns the length of a string
- * @str:	input string
- *
- * Return:
- *	length of string
- */
-static inline int32_t cdf_str_len(const char *str)
-{
-	return strlen(str);
-}
-
-void cdf_mem_multi_pages_alloc(cdf_device_t osdev,
-				struct cdf_mem_multi_page_t *pages,
-				size_t element_size,
-				uint16_t element_num,
-				cdf_dma_context_t memctxt,
-				bool cacheable);
-
-void cdf_mem_multi_pages_free(cdf_device_t osdev,
-				struct cdf_mem_multi_page_t *pages,
-				cdf_dma_context_t memctxt,
-				bool cacheable);
-#endif /* __CDF_MEMORY_H */

+ 68 - 0
qdf/inc/qdf_module.h

@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * @file qdf_module.h
+ * This file abstracts "kernel module" semantics.
+ */
+
+#ifndef _QDF_MODULE_H
+#define _QDF_MODULE_H
+
+#include <i_qdf_module.h>
+
+typedef uint32_t (*module_init_func_t)(void);
+
+/**
+ * qdf_virt_module_init - Specify the module's entry point.
+ */
+#define qdf_virt_module_init(_mod_init_func) \
+	__qdf_virt_module_init(_mod_init_func)
+
+/**
+ * qdf_virt_module_exit - Specify the module's exit point.
+ */
+#define qdf_virt_module_exit(_mod_exit_func) \
+	__qdf_virt_module_exit(_mod_exit_func)
+
+/**
+ * qdf_virt_module_name - Specify the module's name.
+ */
+#define qdf_virt_module_name(_name)      __qdf_virt_module_name(_name)
+
+
+/**
+ * qdf_export_symbol - Export a symbol from a module.
+ */
+#define qdf_export_symbol(_sym)         __qdf_export_symbol(_sym)
+
+/**
+ * qdf_declare_param - Declare a module parameter.
+ */
+#define qdf_declare_param(name, _type) __qdf_declare_param(name, _type)
+
+#endif /*_QDF_MODULE_H*/

File diff suppressed because it is too large
+ 386 - 306
qdf/inc/qdf_nbuf.h


+ 433 - 58
qdf/inc/qdf_net_types.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,92 +26,467 @@
  */
 
 /**
- * DOC: cdf_net_types
+ * DOC: qdf_net_types
  * This file defines types used in the networking stack abstraction.
  */
 
-#ifndef _CDF_NET_TYPES_H
-#define _CDF_NET_TYPES_H
+#ifndef _QDF_NET_TYPES_H
+#define _QDF_NET_TYPES_H
 
-#include <cdf_types.h>          /* uint8_t, etc. */
-
-#define ADF_NET_MAC_ADDR_MAX_LEN 6
-#define ADF_NET_IF_NAME_SIZE    64
-#define ADF_NET_ETH_LEN         ADF_NET_MAC_ADDR_MAX_LEN
-#define ADF_NET_MAX_MCAST_ADDR  64
+#include <qdf_types.h>          /* uint8_t, etc. */
+#include <i_qdf_net_types.h>
 
 /* Extended Traffic ID  passed to target if the TID is unknown */
-#define ADF_NBUF_TX_EXT_TID_INVALID     0x1f
+#define QDF_NBUF_TX_EXT_TID_INVALID     0x1f
 
 /**
- * cdf_nbuf_exemption_type - CDF net buf exemption types for encryption
- * @CDF_NBUF_EXEMPT_NO_EXEMPTION: No exemption
- * @CDF_NBUF_EXEMPT_ALWAYS: Exempt always
- * @CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: Exempt on key mapping
+ * qdf_nbuf_exemption_type - QDF net buf exemption types for encryption
+ * @QDF_NBUF_EXEMPT_NO_EXEMPTION: No exemption
+ * @QDF_NBUF_EXEMPT_ALWAYS: Exempt always
+ * @QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE: Exempt on key mapping
  */
-enum cdf_nbuf_exemption_type {
-	CDF_NBUF_EXEMPT_NO_EXEMPTION = 0,
-	CDF_NBUF_EXEMPT_ALWAYS,
-	CDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE
+enum qdf_nbuf_exemption_type {
+	QDF_NBUF_EXEMPT_NO_EXEMPTION = 0,
+	QDF_NBUF_EXEMPT_ALWAYS,
+	QDF_NBUF_EXEMPT_ON_KEY_MAPPING_KEY_UNAVAILABLE
 };
 
+
+/**
+ * QDF_NET_CMD - These control/get info from the device
+ */
+#define QDF_NET_CMD(_x) \
+	QDF_NET_CMD_GET_##_x, \
+	QDF_NET_CMD_SET_##_x
+
+/**
+ * qdf_net_cmd_t - Get/Set commands from anet to qdf_drv
+ */
+typedef enum {
+	QDF_NET_CMD(LINK_INFO),
+	QDF_NET_CMD(POLL_INFO),
+	QDF_NET_CMD(CKSUM_INFO),
+	QDF_NET_CMD(RING_INFO),
+	QDF_NET_CMD(MAC_ADDR),
+	QDF_NET_CMD(MTU),
+	QDF_NET_CMD_GET_DMA_INFO,
+	QDF_NET_CMD_GET_OFFLOAD_CAP,
+	QDF_NET_CMD_GET_STATS,
+	QDF_NET_CMD_ADD_VID,
+	QDF_NET_CMD_DEL_VID,
+	QDF_NET_CMD_SET_MCAST,
+	QDF_NET_CMD_GET_MCAST_CAP
+} qdf_net_cmd_t;
+
+typedef __wsum_t wsum_t;
+typedef __in6_addr_t in6_addr_t;
+
+
+#define QDF_NET_MAC_ADDR_MAX_LEN 6
+#define QDF_NET_IF_NAME_SIZE     64
+#define QDF_NET_ETH_LEN          QDF_NET_MAC_ADDR_MAX_LEN
+#define QDF_NET_MAX_MCAST_ADDR   64
+
+/* Extended Traffic ID  passed to target if the TID is unknown */
+#define QDF_NBUF_TX_EXT_TID_INVALID    0x1f
+
+#define QDF_ETH_TYPE_IPV4              0x0800  /* IPV4 */
+#define QDF_ETH_TYPE_IPV6              0x86dd  /* IPV6 */
+
+typedef struct {
+	uint16_t  source;
+	uint16_t  dest;
+	uint32_t  seq;
+	uint32_t  ack_seq;
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+	uint16_t   res1:4,
+		   doff:4,
+		   fin:1,
+		   syn:1,
+		   rst:1,
+		   psh:1,
+		   ack:1,
+		   urg:1,
+		   ece:1,
+		   cwr:1;
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+	uint16_t   doff:4,
+		   res1:4,
+		   cwr:1,
+		   ece:1,
+		   urg:1,
+		   ack:1,
+		   psh:1,
+		   rst:1,
+		   syn:1,
+		   fin:1;
+#else
+#error  "Adjust your byte order"
+#endif
+	uint16_t  window;
+	uint16_t  check;
+	uint16_t  urg_ptr;
+} qdf_net_tcphdr_t;
+
+typedef struct {
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+	uint8_t	ip_hl:4,
+		ip_version:4;
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+	uint8_t	ip_version:4,
+		ip_hl:4;
+#else
+#error  "Please fix"
+#endif
+	uint8_t       ip_tos;
+	uint16_t      ip_len;
+	uint16_t      ip_id;
+	uint16_t      ip_frag_off;
+	uint8_t       ip_ttl;
+	uint8_t       ip_proto;
+	uint16_t      ip_check;
+	uint32_t      ip_saddr;
+	uint32_t      ip_daddr;
+    /*The options start here. */
+} qdf_net_iphdr_t;
+
+/* V3 group record types [grec_type] */
+#define IGMPV3_MODE_IS_INCLUDE     1
+#define IGMPV3_MODE_IS_EXCLUDE     2
+#define IGMPV3_CHANGE_TO_INCLUDE   3
+#define IGMPV3_CHANGE_TO_EXCLUDE   4
+#define IGMPV3_ALLOW_NEW_SOURCES   5
+#define IGMPV3_BLOCK_OLD_SOURCES   6
+
+/**
+ * qdf_net_cmd_vid_t - Command for set/unset vid
+ */
+typedef uint16_t qdf_net_cmd_vid_t ;        /*get/set vlan id*/
+
+/**
+ * qdf_net_devaddr_t - Command for getting general stats from a device
+ * @num: No. of mcast addresses
+ * @da_addr: Destination address
+ */
+typedef struct qdf_net_devaddr {
+	uint32_t num;
+	uint8_t  *da_addr[QDF_NET_MAX_MCAST_ADDR];
+} qdf_net_devaddr_t;
+
+typedef qdf_net_devaddr_t qdf_net_cmd_mcaddr_t;
+
 /**
- * typedef cdf_nbuf_tx_cksum_t - transmit checksum offload types
- * @CDF_NBUF_TX_CKSUM_NONE: No checksum offload
- * @CDF_NBUF_TX_CKSUM_IP: IP header checksum offload
- * @CDF_NBUF_TX_CKSUM_TCP_UDP: TCP/UDP checksum offload
- * @CDF_NBUF_TX_CKSUM_TCP_UDP_IP: TCP/UDP and IP header checksum offload
+ * typedef qdf_nbuf_tx_cksum_t - transmit checksum offload types
+ * @QDF_NBUF_TX_CKSUM_NONE: No checksum offload
+ * @QDF_NBUF_TX_CKSUM_IP: IP header checksum offload
+ * @QDF_NBUF_TX_CKSUM_TCP_UDP: TCP/UDP checksum offload
+ * @QDF_NBUF_TX_CKSUM_TCP_UDP_IP: TCP/UDP and IP header checksum offload
  */
 
 typedef enum {
-	CDF_NBUF_TX_CKSUM_NONE,
-	CDF_NBUF_TX_CKSUM_IP,
-	CDF_NBUF_TX_CKSUM_TCP_UDP,
-	CDF_NBUF_TX_CKSUM_TCP_UDP_IP,
+	QDF_NBUF_TX_CKSUM_NONE,
+	QDF_NBUF_TX_CKSUM_IP,
+	QDF_NBUF_TX_CKSUM_TCP_UDP,
+	QDF_NBUF_TX_CKSUM_TCP_UDP_IP,
 
-} cdf_nbuf_tx_cksum_t;
+} qdf_nbuf_tx_cksum_t;
 
 /**
- * typedef cdf_nbuf_l4_rx_cksum_type_t - receive checksum API types
- * @CDF_NBUF_RX_CKSUM_TCP: Rx checksum TCP
- * @CDF_NBUF_RX_CKSUM_UDP: Rx checksum UDP
- * @CDF_NBUF_RX_CKSUM_TCPIPV6: Rx checksum TCP IPV6
- * @CDF_NBUF_RX_CKSUM_UDPIPV6: Rx checksum UDP IPV6
- * @CDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER: Rx checksum TCP no pseudo header
- * @CDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER: Rx checksum UDP no pseudo header
- * @CDF_NBUF_RX_CKSUM_TCPSUM16: Rx checksum TCP SUM16
+ * typedef qdf_nbuf_l4_rx_cksum_type_t - receive checksum API types
+ * @QDF_NBUF_RX_CKSUM_TCP: Rx checksum TCP
+ * @QDF_NBUF_RX_CKSUM_UDP: Rx checksum UDP
+ * @QDF_NBUF_RX_CKSUM_TCPIPV6: Rx checksum TCP IPV6
+ * @QDF_NBUF_RX_CKSUM_UDPIPV6: Rx checksum UDP IPV6
+ * @QDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER: Rx checksum TCP no pseudo header
+ * @QDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER: Rx checksum UDP no pseudo header
+ * @QDF_NBUF_RX_CKSUM_TCPSUM16: Rx checksum TCP SUM16
  */
 typedef enum {
-	CDF_NBUF_RX_CKSUM_TCP = 0x0001,
-	CDF_NBUF_RX_CKSUM_UDP = 0x0002,
-	CDF_NBUF_RX_CKSUM_TCPIPV6 = 0x0010,
-	CDF_NBUF_RX_CKSUM_UDPIPV6 = 0x0020,
-	CDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER = 0x0100,
-	CDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER = 0x0200,
-	CDF_NBUF_RX_CKSUM_TCPSUM16 = 0x1000,
-} cdf_nbuf_l4_rx_cksum_type_t;
+	QDF_NBUF_RX_CKSUM_TCP = 0x0001,
+	QDF_NBUF_RX_CKSUM_UDP = 0x0002,
+	QDF_NBUF_RX_CKSUM_TCPIPV6 = 0x0010,
+	QDF_NBUF_RX_CKSUM_UDPIPV6 = 0x0020,
+	QDF_NBUF_RX_CKSUM_TCP_NOPSEUDOHEADER = 0x0100,
+	QDF_NBUF_RX_CKSUM_UDP_NOPSEUDOHEADER = 0x0200,
+	QDF_NBUF_RX_CKSUM_TCPSUM16 = 0x1000,
+} qdf_nbuf_l4_rx_cksum_type_t;
 
 /**
- * typedef cdf_nbuf_l4_rx_cksum_result_t - receive checksum status types
- * @CDF_NBUF_RX_CKSUM_NONE: Device failed to checksum
- * @CDF_NBUF_RX_CKSUM_TCP_UDP_HW: TCP/UDP cksum successful and value returned
- * @CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: TCP/UDP cksum successful, no value
+ * typedef qdf_nbuf_l4_rx_cksum_result_t - receive checksum status types
+ * @QDF_NBUF_RX_CKSUM_NONE: Device failed to checksum
+ * @QDF_NBUF_RX_CKSUM_TCP_UDP_HW: TCP/UDP cksum successful and value returned
+ * @QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY: TCP/UDP cksum successful, no value
  */
 typedef enum {
-	CDF_NBUF_RX_CKSUM_NONE = 0x0000,
-	CDF_NBUF_RX_CKSUM_TCP_UDP_HW = 0x0010,
-	CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY = 0x0020,
-} cdf_nbuf_l4_rx_cksum_result_t;
+	QDF_NBUF_RX_CKSUM_NONE = 0x0000,
+	QDF_NBUF_RX_CKSUM_TCP_UDP_HW = 0x0010,
+	QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY = 0x0020,
+} qdf_nbuf_l4_rx_cksum_result_t;
 
 /**
- * typedef cdf_nbuf_rx_cksum_t - receive checksum type
+ * typedef qdf_nbuf_rx_cksum_t - receive checksum type
  * @l4_type: L4 type
  * @l4_result: L4 result
  */
 typedef struct {
-	cdf_nbuf_l4_rx_cksum_type_t l4_type;
-	cdf_nbuf_l4_rx_cksum_result_t l4_result;
+	qdf_nbuf_l4_rx_cksum_type_t l4_type;
+	qdf_nbuf_l4_rx_cksum_result_t l4_result;
 	uint32_t val;
-} cdf_nbuf_rx_cksum_t;
+} qdf_nbuf_rx_cksum_t;
+
+#define QDF_ARP_REQ       1 /* ARP request */
+#define QDF_ARP_RSP       2 /* ARP response */
+#define QDF_ARP_RREQ      3 /* RARP request */
+#define QDF_ARP_RRSP      4 /* RARP response */
+
+#define QDF_NEXTHDR_ICMP  58 /* ICMP for IPv6. */
+
+/* Neighbor Discovery */
+#define QDF_ND_RSOL       133 /* Router Solicitation */
+#define QDF_ND_RADVT      134 /* Router Advertisement */
+#define QDF_ND_NSOL       135 /* Neighbor Solicitation */
+#define QDF_ND_NADVT      136 /* Neighbor Advertisement */
+
+/**
+ * typedef qdf_net_udphdr_t - UDP header info
+ * @src_port: source port
+ * @dst_port: destination port
+ * @udp_len: length
+ * @udp_cksum: checksum
+ */
+typedef struct {
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint16_t udp_len;
+	uint16_t udp_cksum;
+} qdf_net_udphdr_t;
+
+/**
+ * typedef qdf_net_dhcphdr_t - DHCP header info
+ * @dhcp_msg_type: message type
+ * @dhcp_hw_type: hardware type
+ * @dhcp_hw_addr_len: hardware address length
+ * @dhcp_num_hops: number of hops
+ * @dhcp_transc_id: transaction id
+ * @dhcp_secs_elapsed: time elapsed
+ * @dhcp_flags: flags
+ * @dhcp_ciaddr: client IP
+ * @dhcp_yiaddr: device IP
+ * @dhcp_siaddr_nip: Server IP
+ * @dhcp_gateway_nip: relay agent IP
+ * @dhcp_chaddr: LLC hardware address
+ * @dhcp_sname: server host name
+ * @dhcp_file: boot file name
+ * @dhcp_cookie: cookie
+ */
+typedef struct {
+	uint8_t  dhcp_msg_type;
+	uint8_t  dhcp_hw_type;
+	uint8_t  dhcp_hw_addr_len;
+	uint8_t  dhcp_num_hops;
+	uint32_t dhcp_transc_id;
+	uint16_t dhcp_secs_elapsed;
+	uint16_t dhcp_flags;
+	uint32_t dhcp_ciaddr;
+	uint32_t dhcp_yiaddr;
+	uint32_t dhcp_siaddr_nip;
+	uint32_t dhcp_gateway_nip;
+	uint8_t  dhcp_chaddr[16];
+	uint8_t  dhcp_sname[64];
+	uint8_t  dhcp_file[128];
+	uint8_t  dhcp_cookie[4];
+} qdf_net_dhcphdr_t;
+
+
+/**
+ * qdf_net_vlanhdr_t - Vlan header
+ */
+typedef struct qdf_net_vlanhdr {
+	uint16_t tpid;
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+	uint16_t vid:12; /* Vlan id*/
+	uint8_t  cfi:1; /* reserved for CFI, don't use*/
+	uint8_t  prio:3; /* Priority*/
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+	uint8_t  prio:3; /* Priority*/
+	uint8_t  cfi:1; /* reserved for CFI, don't use*/
+	uint16_t vid:12; /* Vlan id*/
+#else
+#error  "Please fix"
+#endif
+} qdf_net_vlanhdr_t;
+
+typedef struct qdf_net_vid {
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+	uint16_t val:12;
+	uint8_t  res:4;
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+	uint8_t  res:4;
+	uint16_t val:12;
+#else
+#error  "Please fix"
+#endif
+} qdf_net_vid_t;
+
+typedef enum {
+	QDF_NET_TSO_NONE,
+	QDF_NET_TSO_IPV4,     /**< for tsp ipv4 only*/
+	QDF_NET_TSO_ALL,      /**< ip4 & ipv6*/
+} qdf_net_tso_type_t;
+
+/**
+ * qdf_net_dev_info_t - Basic device info
+ */
+typedef struct {
+	uint8_t  if_name[QDF_NET_IF_NAME_SIZE];
+	uint8_t  dev_addr[QDF_NET_MAC_ADDR_MAX_LEN];
+	uint16_t header_len;
+	uint16_t mtu_size;
+	uint32_t unit;
+} qdf_net_dev_info_t;
+
+/**
+ * qdf_nbuf_tso_t - For TCP large Segment Offload
+ */
+typedef struct {
+	qdf_net_tso_type_t  type;
+	uint16_t mss;
+	uint8_t  hdr_off;
+} qdf_nbuf_tso_t;
+
+/**
+ * qdf_net_wireless_event_t - Wireless events
+ * QDF_IEEE80211_ASSOC = station associate (bss mode)
+ * QDF_IEEE80211_REASSOC = station re-associate (bss mode)
+ * QDF_IEEE80211_DISASSOC = station disassociate (bss mode)
+ * QDF_IEEE80211_JOIN = station join (ap mode)
+ * QDF_IEEE80211_LEAVE = station leave (ap mode)
+ * QDF_IEEE80211_SCAN = scan complete, results available
+ * QDF_IEEE80211_REPLAY = sequence counter replay detected
+ * QDF_IEEE80211_MICHAEL = Michael MIC failure detected
+ * QDF_IEEE80211_REJOIN = station re-associate (ap mode)
+ * QDF_CUSTOM_PUSH_BUTTON = WPS push button
+ */
+typedef enum qdf_net_wireless_events {
+	QDF_IEEE80211_ASSOC = __QDF_IEEE80211_ASSOC,
+	QDF_IEEE80211_REASSOC = __QDF_IEEE80211_REASSOC,
+	QDF_IEEE80211_DISASSOC = __QDF_IEEE80211_DISASSOC,
+	QDF_IEEE80211_JOIN = __QDF_IEEE80211_JOIN,
+	QDF_IEEE80211_LEAVE = __QDF_IEEE80211_LEAVE,
+	QDF_IEEE80211_SCAN = __QDF_IEEE80211_SCAN,
+	QDF_IEEE80211_REPLAY = __QDF_IEEE80211_REPLAY,
+	QDF_IEEE80211_MICHAEL = __QDF_IEEE80211_MICHAEL,
+	QDF_IEEE80211_REJOIN = __QDF_IEEE80211_REJOIN,
+	QDF_CUSTOM_PUSH_BUTTON = __QDF_CUSTOM_PUSH_BUTTON
+} qdf_net_wireless_event_t;
+
+/**
+ * qdf_net_ipv6_addr_t - IPv6 Address
+ */
+typedef struct {
+	union {
+		uint8_t  u6_addr8[16];
+		uint16_t u6_addr16[8];
+		uint32_t u6_addr32[4];
+	} in6_u;
+#define s6_addr32   in6_u.u6_addr32
+} qdf_net_ipv6_addr_t;
+
+/**
+ * qdf_net_ipv6hdr_t - IPv6 Header
+ */
+typedef struct {
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+	uint8_t ipv6_priority:4,
+		ipv6_version:4;
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+	uint8_t ipv6_version:4,
+		ipv6_priority:4;
+#else
+#error  "Please fix"
+#endif
+	uint8_t ipv6_flow_lbl[3];
+
+	uint16_t ipv6_payload_len;
+	uint8_t  ipv6_nexthdr,
+		 ipv6_hop_limit;
+
+	qdf_net_ipv6_addr_t ipv6_saddr,
+			    ipv6_daddr;
+} qdf_net_ipv6hdr_t;
+
+/**
+ * qdf_net_icmpv6hdr_t - ICMPv6 Header
+ */
+typedef struct {
+	uint8_t	 icmp6_type;
+	uint8_t	 icmp6_code;
+	uint16_t icmp6_cksum;
+
+	union {
+		uint32_t un_data32[1];
+		uint16_t un_data16[2];
+		uint8_t  un_data8[4];
+
+		struct {
+			uint16_t identifier;
+			uint16_t sequence;
+		} u_echo;
+
+		struct {
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+			uint32_t reserved:5,
+				 override:1,
+				 solicited:1,
+				 router:1,
+				 reserved2:24;
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+			uint32_t router:1,
+				 solicited:1,
+				 override:1,
+				 reserved:29;
+#else
+#error  "Please fix"
+#endif
+		} u_nd_advt;
+
+		struct {
+			uint8_t	hop_limit;
+#if defined(QDF_LITTLE_ENDIAN_MACHINE)
+			uint8_t	reserved:6,
+				other:1,
+				managed:1;
+
+#elif defined(QDF_BIG_ENDIAN_MACHINE)
+			uint8_t	managed:1,
+				other:1,
+				reserved:6;
+#else
+#error  "Please fix"
+#endif
+			uint16_t rt_lifetime;
+		} u_nd_ra;
+
+	} icmp6_dataun;
+
+} qdf_net_icmpv6hdr_t;
+
+/**
+ * qdf_net_nd_msg_t - Neighbor Discovery Message
+ */
+typedef struct {
+	qdf_net_icmpv6hdr_t nd_icmph;
+	qdf_net_ipv6_addr_t nd_target;
+	uint8_t	nd_opt[0];
+} qdf_net_nd_msg_t;
+
+
+static inline int32_t qdf_csum_ipv6(const in6_addr_t *saddr,
+				    const in6_addr_t *daddr,
+				    __u32 len, unsigned short proto,
+				    wsum_t sum)
+{
+	return (int32_t)__qdf_csum_ipv6(saddr, daddr, len, proto, sum);
+}
 
-#endif /*_CDF_NET_TYPES_H*/
+#endif /*_QDF_NET_TYPES_H*/

+ 119 - 0
qdf/inc/qdf_perf.h

@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_perf
+ * This file provides OS abstraction perf API's.
+ */
+
+#ifndef _QDF_PERF_H
+#define _QDF_PERF_H
+
+/* headers */
+#include <i_qdf_perf.h>
+
+#ifdef QCA_PERF_PROFILING
+
+/* Typedefs */
+typedef __qdf_perf_id_t  qdf_perf_id_t;
+
+typedef int (*proc_read_t)(char *page, char **start, off_t off, int count,
+		int *eof, void *data);
+typedef int (*proc_write_t)(struct file *file, const char *buf,
+		unsigned long count, void *data);
+typedef void (*perf_sample_t)(struct qdf_perf_entry  *entry,
+		uint8_t  done);
+
+typedef void (*perf_init_t)(struct qdf_perf_entry *entry, uint32_t def_val);
+
+/**
+ * typedef proc_api_tbl_t - contains functions to read, write to proc FS
+ * @proc_read: function pointer to read function
+ * @proc_write: function pointer to write function
+ * @sample: function pointer to sample function
+ * @init: function pointer to init function
+ * @def_val: int contains default value
+ */
+typedef struct proc_api_tbl {
+	proc_read_t     proc_read;
+	proc_write_t    proc_write;
+	perf_sample_t   sample;
+	perf_init_t     init;
+	uint32_t        def_val;
+} proc_api_tbl_t;
+
+proc_api_tbl_t          api_tbl[];
+
+/* Macros */
+#define INIT_API(name, val)    {   \
+	.proc_read  = read_##name,     \
+	.proc_write = write_##name,    \
+	.sample     = sample_event,    \
+	.init       = init_##name,     \
+	.def_val    = val,             \
+}
+
+#define PERF_ENTRY(hdl) ((qdf_perf_entry_t *)hdl)
+
+#define qdf_perf_init(_parent, _id, _ctr_type)   \
+	__qdf_perf_init((_parent), (_id), (_ctr_type))
+
+#define qdf_perf_destroy(_id) __qdf_perf_destroy((_id))
+
+#define qdf_perf_start(_id) __qdf_perf_start((_id))
+
+#define qdf_perf_end(_id)  __qdf_perf_end((_id))
+
+/* Extern declarations */
+extern __qdf_perf_id_t
+	__qdf_perf_init(qdf_perf_id_t parent,
+			uint8_t *id_name,
+			qdf_perf_cntr_t type)(__qdf_perf_id_t parent,
+						uint8_t *id_name,
+						uint32_t type);
+
+extern bool __qdf_perf_destroy(qdf_perf_id_t id)(__qdf_perf_id_t     id);
+
+extern void __qdf_perf_start(qdf_perf_id_t id)(__qdf_perf_id_t       id);
+extern void __qdf_perf_end(qdf_perf_id_t id)(__qdf_perf_id_t         id);
+
+extern int
+qdf_perfmod_init(void);
+extern void
+qdf_perfmod_exit(void);
+
+#else /* !QCA_PERF_PROFILING */
+
+#define qdf_perfmod_init()
+#define qdf_perfmod_exit()
+#define DECLARE_N_EXPORT_PERF_CNTR(id)
+#define START_PERF_CNTR(_id, _name)
+#define END_PERF_CNTR(_id)
+
+#endif /* QCA_PERF_PROFILING */
+
+#endif /* end of _QDF_PERF_H */

+ 94 - 77
qdf/inc/qdf_status.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,87 +25,104 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_STATUS_H)
-#define __CDF_STATUS_H
-
 /**
- * DOC:  cdf_status
- *
- * Connectivity driver framework (CDF) status codes
- *
- * Basic status codes/definitions used by CDF
+ * DOC: qdf_status
+ * QCA driver framework (QDF) status codes
+ * Basic status codes/definitions used by QDF
  */
 
+#if !defined(__QDF_STATUS_H)
+#define __QDF_STATUS_H
+
 /**
- * typedef CDF_STATUS - CDF error codes
- * @CDF_STATUS_SUCCESS: success
- * @CDF_STATUS_E_RESOURCES: system resource(other than memory) not available
- * @CDF_STATUS_E_NOMEM: not enough memory
- * @CDF_STATUS_E_AGAIN: try again
- * @CDF_STATUS_E_INVAL: invalid request
- * @CDF_STATUS_E_FAULT: system fault
- * @CDF_STATUS_E_ALREADY: another request already in progress
- * @CDF_STATUS_E_BADMSG: bad message
- * @CDF_STATUS_E_BUSY: device or resource busy
- * @CDF_STATUS_E_CANCELED: request cancelled
- * @CDF_STATUS_E_ABORTED: request aborted
- * @CDF_STATUS_E_NOSUPPORT: request not supported
- * @CDF_STATUS_E_PERM: operation not permitted
- * @CDF_STATUS_E_EMPTY: empty condition
- * @CDF_STATUS_E_EXISTS: existence failure
- * @CDF_STATUS_E_TIMEOUT: operation timeout
- * @CDF_STATUS_E_FAILURE: unknown reason do not use unless nothign else applies
- * @CDF_STATUS_NOT_INITIALIZED: resource not initialized
- * @CDF_STATUS_E_NULL_VALUE: request is null
- * @CDF_STATUS_PMC_PENDING: request pendign in pmc
- * @CDF_STATUS_PMC_DISABLED: pmc is disabled
- * @CDF_STATUS_PMC_NOT_NOW: pmc not ready now
- * @CDF_STATUS_PMC_AC_POWER: pmc ac power
- * @CDF_STATUS_PMC_SYS_ERROR: pmc system error
- * @CDF_STATUS_HEARTBEAT_TMOUT: hearbeat timeout error
- * @CDF_STATUS_NTH_BEACON_DELIVERY: Nth beacon delivery
- * @CDF_STATUS_CSR_WRONG_STATE: csr in wrong state
- * @CDF_STATUS_FT_PREAUTH_KEY_SUCCESS: ft preauth key success
- * @CDF_STATUS_FT_PREAUTH_KEY_FAILED: ft preauth key failed
- * @CDF_STATUS_CMD_NOT_QUEUED: command not queued
- * @CDF_STATUS_FW_MSG_TIMEDOUT: target message timeout
- * @CDF_STATUS_MAX: not a realy value just a place holder for max
+ * typedef QDF_STATUS - QDF error codes
+ * @QDF_STATUS_SUCCESS: success
+ * @QDF_STATUS_E_RESOURCES: system resource(other than memory) not available
+ * @QDF_STATUS_E_NOMEM: not enough memory
+ * @QDF_STATUS_E_AGAIN: try again
+ * @QDF_STATUS_E_INVAL: invalid request
+ * @QDF_STATUS_E_FAULT: system fault
+ * @QDF_STATUS_E_ALREADY: another request already in progress
+ * @QDF_STATUS_E_BADMSG: bad message
+ * @QDF_STATUS_E_BUSY: device or resource busy
+ * @QDF_STATUS_E_CANCELED: request cancelled
+ * @QDF_STATUS_E_ABORTED: request aborted
+ * @QDF_STATUS_E_NOSUPPORT: request not supported
+ * @QDF_STATUS_E_PERM: operation not permitted
+ * @QDF_STATUS_E_EMPTY: empty condition
+ * @QDF_STATUS_E_EXISTS: existence failure
+ * @QDF_STATUS_E_TIMEOUT: operation timeout
+ * @QDF_STATUS_E_FAILURE: unknown reason do not use unless nothign else applies
+ * @QDF_STATUS_E_NOENT: No such file or directory
+ * @QDF_STATUS_E_E2BIG: Arg list too long
+ * @QDF_STATUS_E_NOSPC: no space left on device
+ * @QDF_STATUS_E_ADDRNOTAVAIL: Cannot assign requested address
+ * @QDF_STATUS_E_ENXIO: No such device or address
+ * @QDF_STATUS_E_NETDOWN: network is down
+ * @QDF_STATUS_E_IO: I/O Error
+ * @QDF_STATUS_E_NETRESET: Network dropped connection because of reset
+ * @QDF_STATUS_E_SIG: Exit due to received SIGINT
+ * @QDF_STATUS_NOT_INITIALIZED: resource not initialized
+ * @QDF_STATUS_E_NULL_VALUE: request is null
+ * @QDF_STATUS_PMC_PENDING: request pendign in pmc
+ * @QDF_STATUS_PMC_DISABLED: pmc is disabled
+ * @QDF_STATUS_PMC_NOT_NOW: pmc not ready now
+ * @QDF_STATUS_PMC_AC_POWER: pmc ac power
+ * @QDF_STATUS_PMC_SYS_ERROR: pmc system error
+ * @QDF_STATUS_HEARTBEAT_TMOUT: hearbeat timeout error
+ * @QDF_STATUS_NTH_BEACON_DELIVERY: Nth beacon delivery
+ * @QDF_STATUS_CSR_WRONG_STATE: csr in wrong state
+ * @QDF_STATUS_FT_PREAUTH_KEY_SUCCESS: ft preauth key success
+ * @QDF_STATUS_FT_PREAUTH_KEY_FAILED: ft preauth key failed
+ * @QDF_STATUS_CMD_NOT_QUEUED: command not queued
+ * @QDF_STATUS_FW_MSG_TIMEDOUT: target message timeout
+ * @QDF_STATUS_MAX: not a realy value just a place holder for max
  */
 typedef enum {
-	CDF_STATUS_SUCCESS,
-	CDF_STATUS_E_RESOURCES,
-	CDF_STATUS_E_NOMEM,
-	CDF_STATUS_E_AGAIN,
-	CDF_STATUS_E_INVAL,
-	CDF_STATUS_E_FAULT,
-	CDF_STATUS_E_ALREADY,
-	CDF_STATUS_E_BADMSG,
-	CDF_STATUS_E_BUSY,
-	CDF_STATUS_E_CANCELED,
-	CDF_STATUS_E_ABORTED,
-	CDF_STATUS_E_NOSUPPORT,
-	CDF_STATUS_E_PERM,
-	CDF_STATUS_E_EMPTY,
-	CDF_STATUS_E_EXISTS,
-	CDF_STATUS_E_TIMEOUT,
-	CDF_STATUS_E_FAILURE,
-	CDF_STATUS_NOT_INITIALIZED,
-	CDF_STATUS_E_NULL_VALUE,
-	CDF_STATUS_PMC_PENDING,
-	CDF_STATUS_PMC_DISABLED,
-	CDF_STATUS_PMC_NOT_NOW,
-	CDF_STATUS_PMC_AC_POWER,
-	CDF_STATUS_PMC_SYS_ERROR,
-	CDF_STATUS_HEARTBEAT_TMOUT,
-	CDF_STATUS_NTH_BEACON_DELIVERY,
-	CDF_STATUS_CSR_WRONG_STATE,
-	CDF_STATUS_FT_PREAUTH_KEY_SUCCESS,
-	CDF_STATUS_FT_PREAUTH_KEY_FAILED,
-	CDF_STATUS_CMD_NOT_QUEUED,
-	CDF_STATUS_FW_MSG_TIMEDOUT,
-	CDF_STATUS_MAX
-} CDF_STATUS;
+	QDF_STATUS_SUCCESS,
+	QDF_STATUS_E_RESOURCES,
+	QDF_STATUS_E_NOMEM,
+	QDF_STATUS_E_AGAIN,
+	QDF_STATUS_E_INVAL,
+	QDF_STATUS_E_FAULT,
+	QDF_STATUS_E_ALREADY,
+	QDF_STATUS_E_BADMSG,
+	QDF_STATUS_E_BUSY,
+	QDF_STATUS_E_CANCELED,
+	QDF_STATUS_E_ABORTED,
+	QDF_STATUS_E_NOSUPPORT,
+	QDF_STATUS_E_PERM,
+	QDF_STATUS_E_EMPTY,
+	QDF_STATUS_E_EXISTS,
+	QDF_STATUS_E_TIMEOUT,
+	QDF_STATUS_E_FAILURE,
+	QDF_STATUS_E_NOENT,
+	QDF_STATUS_E_E2BIG,
+	QDF_STATUS_E_NOSPC,
+	QDF_STATUS_E_ADDRNOTAVAIL,
+	QDF_STATUS_E_ENXIO,
+	QDF_STATUS_E_NETDOWN,
+	QDF_STATUS_E_IO,
+	QDF_STATUS_E_NETRESET,
+	QDF_STATUS_E_SIG,
+	QDF_STATUS_NOT_INITIALIZED,
+	QDF_STATUS_E_NULL_VALUE,
+	QDF_STATUS_PMC_PENDING,
+	QDF_STATUS_PMC_DISABLED,
+	QDF_STATUS_PMC_NOT_NOW,
+	QDF_STATUS_PMC_AC_POWER,
+	QDF_STATUS_PMC_SYS_ERROR,
+	QDF_STATUS_HEARTBEAT_TMOUT,
+	QDF_STATUS_NTH_BEACON_DELIVERY,
+	QDF_STATUS_CSR_WRONG_STATE,
+	QDF_STATUS_FT_PREAUTH_KEY_SUCCESS,
+	QDF_STATUS_FT_PREAUTH_KEY_FAILED,
+	QDF_STATUS_CMD_NOT_QUEUED,
+	QDF_STATUS_FW_MSG_TIMEDOUT,
+	QDF_STATUS_MAX
+} QDF_STATUS;
 
-#define CDF_IS_STATUS_SUCCESS(status) (CDF_STATUS_SUCCESS == (status))
+#define QDF_IS_STATUS_SUCCESS(status) (QDF_STATUS_SUCCESS == (status))
+#define QDF_IS_STATUS_ERROR(status) (QDF_STATUS_SUCCESS != (status))
 
-#endif /* if !defined __CDF_STATUS_H */
+#endif /* if !defined __QDF_STATUS_H */

+ 10 - 47
qdf/inc/qdf_threads.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,59 +25,22 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_THREADS_H)
-#define __CDF_THREADS_H
-
 /**
- * DOC:  cdf_threads
- *
- * Connectivity driver framework (CDF) thread related APIs
- *
+ * DOC:  qdf_threads
+ * QCA driver framework (QDF) thread related APIs
  */
 
-/* Include Files */
-#include <cdf_types.h>
-
-/* Preprocessor definitions and constants */
+#if !defined(__QDF_THREADS_H)
+#define __QDF_THREADS_H
 
-/* Type declarations */
+#include <qdf_types.h>
 
 /* Function declarations and documenation */
 
-/**
- *  cdf_sleep() - sleep
- *  @msInterval : Number of milliseconds to suspend the current thread.
- *  A value of 0 may or may not cause the current thread to yield.
- *
- *  This function suspends the execution of the current thread
- *  until the specified time out interval elapses.
- *
- *  Return: nothing
- */
-void cdf_sleep(uint32_t msInterval);
+void qdf_sleep(uint32_t ms_interval);
 
-/**
- *  cdf_sleep_us() - sleep
- *  @usInterval : Number of microseconds to suspend the current thread.
- *  A value of 0 may or may not cause the current thread to yield.
- *
- *  This function suspends the execution of the current thread
- *  until the specified time out interval elapses.
- *
- *  Return : nothing
- */
-void cdf_sleep_us(uint32_t usInterval);
+void qdf_sleep_us(uint32_t us_interval);
 
-/**
- *  cdf_busy_wait() - busy wait
- *  @usInterval : Number of microseconds to busy wait.
- *
- *  This function places the current thread in busy wait until the specified
- *  time out interval elapses. If the interval is greater than 50us on WM, the
- *  behaviour is undefined.
- *
- *  Return : nothing
- */
-void cdf_busy_wait(uint32_t usInterval);
+void qdf_busy_wait(uint32_t us_interval);
 
-#endif /* __CDF_THREADS_H */
+#endif /* __QDF_THREADS_H */

+ 84 - 84
qdf/inc/qdf_time.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,177 +26,177 @@
  */
 
 /**
- * DOC: cdf_time
+ * DOC: qdf_time
  * This file abstracts time related functionality.
  */
 
-#ifndef _CDF_OS_TIME_H
-#define _CDF_OS_TIME_H
+#ifndef _QDF_OS_TIME_H
+#define _QDF_OS_TIME_H
 
-#include <i_cdf_time.h>
-#ifdef CONFIG_CNSS
-#include <net/cnss.h>
-#endif
+#include <i_qdf_time.h>
 
-typedef __cdf_time_t cdf_time_t;
+typedef __qdf_time_t qdf_time_t;
 
 /**
- * cdf_system_ticks() - Count the number of ticks elapsed from the time when
- *			the system booted
+ * qdf_system_ticks - Count the number of ticks elapsed from the time when
+ * the system booted
  *
  * Return: ticks
  */
-static inline unsigned long cdf_system_ticks(void)
+static inline unsigned long qdf_system_ticks(void)
 {
-	return __cdf_system_ticks();
+	return __qdf_system_ticks();
 }
 
 /**
- * cdf_system_ticks_to_msecs() - convert ticks to milliseconds
+ * qdf_system_ticks_to_msecs - convert ticks to milliseconds
  * @clock_ticks: Number of ticks
  *
- * Return: Time in milliseconds
+ * Return: unsigned int Time in milliseconds
  */
-static inline uint32_t cdf_system_ticks_to_msecs(unsigned long clock_ticks)
+static inline uint32_t qdf_system_ticks_to_msecs(unsigned long clock_ticks)
 {
-	return __cdf_system_ticks_to_msecs(clock_ticks);
+	return __qdf_system_ticks_to_msecs(clock_ticks);
 }
 
 /**
- * cdf_system_msecs_to_ticks() - convert milliseconds to ticks
+ * qdf_system_msecs_to_ticks - convert milliseconds to ticks
  * @msec: Time in milliseconds
  *
- * Return: number of ticks
+ * Return: unsigned long number of ticks
  */
-static inline unsigned long cdf_system_msecs_to_ticks(uint32_t msecs)
+static inline unsigned long qdf_system_msecs_to_ticks(uint32_t msecs)
 {
-	return __cdf_system_msecs_to_ticks(msecs);
+	return __qdf_system_msecs_to_ticks(msecs);
 }
 
 /**
- * cdf_get_system_uptime() - Return a monotonically increasing time.
+ * qdf_get_system_uptime - Return a monotonically increasing time
  * This increments once per HZ ticks
  *
- * Return: system up time
+ * Return: unsigned long system up time
  */
-static inline unsigned long cdf_get_system_uptime(void)
+static inline unsigned long qdf_get_system_uptime(void)
 {
-	return __cdf_get_system_uptime();
+	return __qdf_get_system_uptime();
 }
 
 /**
- * cdf_get_system_timestamp() - brief Return current timestamp
+ * qdf_get_system_timestamp - Return current timestamp
  *
- * Return: none
+ * Return: unsigned long
  */
-static inline unsigned long cdf_get_system_timestamp(void)
+static inline unsigned long qdf_get_system_timestamp(void)
 {
-	return __cdf_get_system_timestamp();
+	return __qdf_get_system_timestamp();
 }
 
 /**
- * cdf_udelay() - delay in microseconds
+ * qdf_udelay - delay in microseconds
  * @usecs: Number of microseconds to delay
  *
  * Return: none
  */
-static inline void cdf_udelay(int usecs)
+static inline void qdf_udelay(int usecs)
 {
-	__cdf_udelay(usecs);
+	__qdf_udelay(usecs);
 }
 
 /**
- * cdf_mdelay() - Delay in milliseconds.
+ * qdf_mdelay - Delay in milliseconds.
  * @msec: Number of milliseconds to delay
  *
  * Return: none
  */
-static inline void cdf_mdelay(int msecs)
+static inline void qdf_mdelay(int msecs)
 {
-	__cdf_mdelay(msecs);
+	__qdf_mdelay(msecs);
 }
 
-/* Check if _a is later than _b */
-#define cdf_system_time_after(_a, _b)       __cdf_system_time_after(_a, _b)
+/**
+ * qdf_system_time_after() - Check if a is later than b
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ * true if a < b else false
+ */
+static inline bool qdf_system_time_after(qdf_time_t a, qdf_time_t b)
+{
+	return __qdf_system_time_after(a, b);
+}
 
-/* Check if _a is prior to _b */
-#define cdf_system_time_before(_a, _b)      __cdf_system_time_before(_a, _b)
+/**
+ * qdf_system_time_before() - Check if a is before b
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ * true if a is before b else false
+ */
+static inline bool qdf_system_time_before(qdf_time_t a, qdf_time_t b)
+{
+	return __qdf_system_time_before(a, b);
+}
 
-/* Check if _a atleast as recent as _b, if not later */
-#define cdf_system_time_after_eq(_a, _b)    __cdf_system_time_after_eq(_a, _b)
+/**
+ * qdf_system_time_after_eq() - Check if a atleast as recent as b, if not
+ * later
+ * @a: Time stamp value a
+ * @b: Time stamp value b
+ *
+ * Return:
+ * true if a >= b else false
+ */
+static inline bool qdf_system_time_after_eq(qdf_time_t a, qdf_time_t b)
+{
+	return __qdf_system_time_after_eq(a, b);
+}
 
 /**
- * enum cdf_timestamp_unit - what unit the cdf timestamp is in
+ * enum qdf_timestamp_unit - what unit the qdf timestamp is in
  * @KERNEL_LOG: boottime time in uS (micro seconds)
- * @KERNEL_LOG: QTIME in (1/19200)S
+ * @QTIMER: QTIME in (1/19200)S
  *
  * This enum is used to distinguish which timer source is used.
  */
-enum cdf_timestamp_unit {
+enum qdf_timestamp_unit {
 	KERNEL_LOG,
 	QTIMER,
 };
 
 #ifdef QCA_WIFI_3_0_ADRASTEA
-#define CDF_LOG_TIMESTAMP_UNIT QTIMER
+#define QDF_LOG_TIMESTAMP_UNIT QTIMER
 #else
-#define CDF_LOG_TIMESTAMP_UNIT KERNEL_LOG
+#define QDF_LOG_TIMESTAMP_UNIT KERNEL_LOG
 #endif
 
-#ifdef QCA_WIFI_3_0_ADRASTEA
-/**
- * cdf_get_log_timestamp() - get time stamp for logging
- *
- * For adrastea this API returns QTIMER tick which is needed to synchronize
- * host and fw log timestamps
- *
- * For ROME and other discrete solution this API returns system boot time stamp
- *
- * Return:
- *	QTIMER ticks(19.2MHz) for adrastea
- *	System tick for rome and other future discrete solutions
- */
-static inline uint64_t cdf_get_log_timestamp(void)
-{
-	return __cdf_get_qtimer_ticks();
-}
-#else
+
 /**
- * cdf_get_log_timestamp() - get time stamp for logging
- *
+ * qdf_get_log_timestamp - get time stamp for logging
  * For adrastea this API returns QTIMER tick which is needed to synchronize
  * host and fw log timestamps
- *
  * For ROME and other discrete solution this API returns system boot time stamp
  *
  * Return:
- *	QTIMER ticks(19.2MHz) for adrastea
- *	System tick for rome and other future discrete solutions
+ * QTIMER ticks(19.2MHz) for adrastea
+ * System tick for rome and other future discrete solutions
  */
-static inline uint64_t cdf_get_log_timestamp(void)
+static inline uint64_t qdf_get_log_timestamp(void)
 {
-#ifdef CONFIG_CNSS
-	struct timespec ts;
-
-	cnss_get_boottime(&ts);
-
-	return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
-#else
-	return cdf_system_ticks_to_msecs(cdf_system_ticks()) * 1000;
-#endif /* CONFIG_CNSS */
+	return __qdf_get_log_timestamp();
 }
-#endif /* QCA_WIFI_3_0_ADRASTEA */
 
 /**
- * cdf_get_monotonic_boottime() - get monotonic kernel boot time
- * This API is similar to cdf_get_system_boottime but it includes
+ * qdf_get_monotonic_boottime - get monotonic kernel boot time
+ * This API is similar to qdf_get_system_boottime but it includes
  * time spent in suspend.
  *
  * Return: Time in microseconds
  */
-static inline uint64_t cdf_get_monotonic_boottime(void)
+static inline uint64_t qdf_get_monotonic_boottime(void)
 {
-	return __cdf_get_monotonic_boottime();
+	return __qdf_get_monotonic_boottime();
 }
 
 #endif

+ 44 - 32
qdf/inc/qdf_softirq_timer.h → qdf/inc/qdf_timer.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,93 +26,105 @@
  */
 
 /**
- * DOC: cdf_softirq_timer
+ * DOC: qdf_timer
  * This file abstracts OS timers running in soft IRQ context.
  */
 
-#ifndef _CDF_SOFTIRQ_TIMER_H
-#define _CDF_SOFTIRQ_TIMER_H
+#ifndef _QDF_TIMER_H
+#define _QDF_TIMER_H
 
-#include <cdf_types.h>
-#include <i_cdf_softirq_timer.h>
+#include <qdf_types.h>
+#include <i_qdf_timer.h>
 
 /* Platform timer object */
-typedef __cdf_softirq_timer_t cdf_softirq_timer_t;
+typedef __qdf_timer_t qdf_timer_t;
 
 /**
- * cdf_softirq_timer_init() - initialize a softirq timer
+ * qdf_timer_init() - initialize a timer
  * @hdl: OS handle
  * @timer: Timer object pointer
  * @func: Timer function
  * @arg: Arguement of timer function
  * @type: deferrable or non deferrable timer type
  *
- * Timer type CDF_TIMER_TYPE_SW means its a deferrable sw timer which will
+ * Timer type QDF_TIMER_TYPE_SW means its a deferrable sw timer which will
  * not cause CPU wake upon expiry
- * Timer type CDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
+ * Timer type QDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
  * will cause CPU wake up on expiry
  *
  * Return: none
  */
-static inline void
-cdf_softirq_timer_init(cdf_handle_t hdl,
-			cdf_softirq_timer_t *timer,
-			cdf_softirq_timer_func_t func, void *arg,
-			CDF_TIMER_TYPE type)
+static inline void qdf_timer_init(qdf_handle_t hdl, qdf_timer_t *timer,
+				  qdf_timer_func_t func, void *arg,
+				  QDF_TIMER_TYPE type)
 {
-	__cdf_softirq_timer_init(hdl, timer, func, arg, type);
+	__qdf_timer_init(hdl, timer, func, arg, type);
 }
 
 /**
- * cdf_softirq_timer_start() - start a one-shot softirq timer
+ * qdf_timer_start() - start a one-shot timer
  * @timer: Timer object pointer
  * @msec: Expiration period in milliseconds
  *
  * Return: none
  */
 static inline void
-cdf_softirq_timer_start(cdf_softirq_timer_t *timer, int msec)
+qdf_timer_start(qdf_timer_t *timer, int msec)
 {
-	__cdf_softirq_timer_start(timer, msec);
+	__qdf_timer_start(timer, msec);
 }
 
 /**
- * cdf_softirq_timer_mod() - modify existing timer to new timeout value
+ * qdf_timer_mod() - modify existing timer to new timeout value
  * @timer: Timer object pointer
  * @msec: Expiration period in milliseconds
  *
  * Return: none
  */
-static inline void cdf_softirq_timer_mod(cdf_softirq_timer_t *timer, int msec)
+static inline void qdf_timer_mod(qdf_timer_t *timer, int msec)
 {
-	__cdf_softirq_timer_mod(timer, msec);
+	__qdf_timer_mod(timer, msec);
 }
 
 /**
- * cdf_softirq_timer_cancel() - cancel cdf softirq timer
+ * qdf_timer_stop() - cancel qdf timer
  * @timer: Timer object pointer
- * @retval: Timer was cancelled and deactived
- * @retval: Timer was cancelled but already got fired.
+ *
+ * return: bool TRUE Timer was cancelled and deactived
+ * FALSE Timer was cancelled but already got fired.
  *
  * The function will return after any running timer completes.
+ */
+static inline bool qdf_timer_stop(qdf_timer_t *timer)
+{
+	return __qdf_timer_stop(timer);
+}
+
+
+/**
+ * qdf_timer_sync_cancel - Cancel a timer synchronously
+ * The function will return after any running timer completes.
+ * @timer: timer object pointer
  *
- * Return: none
+ * return: bool TRUE timer was cancelled and deactived
+ * FALSE timer was not cancelled
  */
-static inline bool cdf_softirq_timer_cancel(cdf_softirq_timer_t *timer)
+static inline bool qdf_timer_sync_cancel(qdf_timer_t *timer)
 {
-	return __cdf_softirq_timer_cancel(timer);
+	return __qdf_timer_sync_cancel(timer);
 }
 
+
 /**
- * cdf_softirq_timer_free() - free cdf softirq timer
+ * qdf_timer_free() - free qdf timer
  * @timer: Timer object pointer
  *
  * The function will return after any running timer completes.
  * Return: none
  */
-static inline void cdf_softirq_timer_free(cdf_softirq_timer_t *timer)
+static inline void qdf_timer_free(qdf_timer_t *timer)
 {
-	__cdf_softirq_timer_free(timer);
+	__qdf_timer_free(timer);
 }
 
-#endif
+#endif /* _QDF_TIMER_H */

+ 217 - 139
qdf/inc/qdf_trace.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,79 +25,71 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_TRACE_H)
-#define __CDF_TRACE_H
+#if !defined(__QDF_TRACE_H)
+#define __QDF_TRACE_H
 
 /**
- *  DOC:  cdf_trace
- *
- *  Connectivity driver framework trace APIs
- *
+ *  DOC: qdf_trace
+ *  QCA driver framework trace APIs
  *  Trace, logging, and debugging definitions and APIs
- *
  */
 
 /* Include Files */
-#include  <cdf_types.h>         /* For CDF_MODULE_ID... */
+#include  <qdf_types.h>         /* For QDF_MODULE_ID... */
 #include  <stdarg.h>            /* For va_list... */
-#include  <cdf_status.h>
-#include  <cdf_nbuf.h>
-#include  <cds_packet.h>
-#include  <i_cdf_types.h>
+#include  <qdf_status.h>
+#include  <qdf_nbuf.h>
+#include  <i_qdf_types.h>
 
 /* Type declarations */
 
+/**
+ * typedef enum QDF_TRACE_LEVEL - Debug Trace level
+ * @QDF_TRACE_LEVEL_NONE: no trace will be logged. This value is in place
+ * for the qdf_trace_setlevel() to allow the user to turn off all traces
+ * @QDF_TRACE_LEVEL_FATAL: enable trace for fatal Error
+ * @QDF_TRACE_LEVEL_ERROR: enable trace for errors
+ * @QDF_TRACE_LEVEL_WARN: enable trace for warnings
+ * @QDF_TRACE_LEVEL_INFO: enable trace for information
+ * @QDF_TRACE_LEVEL_INFO_HIGH: enable high level trace information
+ * @QDF_TRACE_LEVEL_INFO_MED: enable middle level trace information
+ * @QDF_TRACE_LEVEL_INFO_LOW: enable low level trace information
+ * @QDF_TRACE_LEVEL_DEBUG: enable trace for debugging
+ * @QDF_TRACE_LEVEL_ALL: enable all trace
+ * @QDF_TRACE_LEVEL_MAX: enable max level trace
+ */
 typedef enum {
-	/* NONE means NO traces will be logged.  This value is in place
-	 * for the cdf_trace_setlevel() to allow the user to turn off
-	 * all traces
-	 */
-	CDF_TRACE_LEVEL_NONE = 0,
-
-	/* Following trace levels are the ones that 'callers' of CDF_TRACE()
-	 * can specify in for the CDF_TRACE_LEVEL parameter.  Traces are
-	 * classified by severity. FATAL being more serious than INFO for
-	 * example
-	 */
-	CDF_TRACE_LEVEL_FATAL,
-	CDF_TRACE_LEVEL_ERROR,
-	CDF_TRACE_LEVEL_WARN,
-	CDF_TRACE_LEVEL_INFO,
-	CDF_TRACE_LEVEL_INFO_HIGH,
-	CDF_TRACE_LEVEL_INFO_MED,
-	CDF_TRACE_LEVEL_INFO_LOW,
-	CDF_TRACE_LEVEL_DEBUG,
-
-	/* All means all trace levels will be active.  This value is in place
-	 * for the cdf_trace_setlevel() to allow the user to turn ON all traces
-	 */
-	CDF_TRACE_LEVEL_ALL,
-
-	/* Not a real level.  Used to identify the maximum number of
-	 * CDF_TRACE_LEVELs defined
-	 */
-	CDF_TRACE_LEVEL_MAX
-} CDF_TRACE_LEVEL;
+	QDF_TRACE_LEVEL_NONE = 0,
+	QDF_TRACE_LEVEL_FATAL,
+	QDF_TRACE_LEVEL_ERROR,
+	QDF_TRACE_LEVEL_WARN,
+	QDF_TRACE_LEVEL_INFO,
+	QDF_TRACE_LEVEL_INFO_HIGH,
+	QDF_TRACE_LEVEL_INFO_MED,
+	QDF_TRACE_LEVEL_INFO_LOW,
+	QDF_TRACE_LEVEL_DEBUG,
+	QDF_TRACE_LEVEL_ALL,
+	QDF_TRACE_LEVEL_MAX
+} QDF_TRACE_LEVEL;
 
 /* By default Data Path module will have all log levels enabled, except debug
  * log level. Debug level will be left up to the framework or user space modules
  * to be enabled when issue is detected
  */
-#define CDF_DATA_PATH_TRACE_LEVEL \
-	((1 << CDF_TRACE_LEVEL_FATAL) | (1 << CDF_TRACE_LEVEL_ERROR) | \
-	(1 << CDF_TRACE_LEVEL_WARN) | (1 << CDF_TRACE_LEVEL_INFO) | \
-	(1 << CDF_TRACE_LEVEL_INFO_HIGH) | (1 << CDF_TRACE_LEVEL_INFO_MED) | \
-	(1 << CDF_TRACE_LEVEL_INFO_LOW))
+#define QDF_DATA_PATH_TRACE_LEVEL \
+	((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR) | \
+	(1 << QDF_TRACE_LEVEL_WARN) | (1 << QDF_TRACE_LEVEL_INFO) | \
+	(1 << QDF_TRACE_LEVEL_INFO_HIGH) | (1 << QDF_TRACE_LEVEL_INFO_MED) | \
+	(1 << QDF_TRACE_LEVEL_INFO_LOW))
 
 /* Preprocessor definitions and constants */
 #define ASSERT_BUFFER_SIZE (512)
 
-#define CDF_ENABLE_TRACING
-#define MAX_CDF_TRACE_RECORDS 4000
-#define INVALID_CDF_TRACE_ADDR 0xffffffff
-#define DEFAULT_CDF_TRACE_DUMP_COUNT 0
+#define MAX_QDF_TRACE_RECORDS 4000
+#define INVALID_QDF_TRACE_ADDR 0xffffffff
+#define DEFAULT_QDF_TRACE_DUMP_COUNT 0
 
-#include  <i_cdf_trace.h>
+#include  <i_qdf_trace.h>
 
 #ifdef TRACE_RECORD
 
@@ -109,102 +101,122 @@ typedef enum {
 
 #endif
 
-/* Structure definition */
-typedef struct cdf_trace_record_s {
+/**
+ * typedef struct qdf_trace_record_s - keep trace record
+ * @time: timestamp
+ * @module: module name
+ * @code: hold record of code
+ * @session: hold record of session
+ * @data: hold data
+ * @pid: hold pid of the process
+ */
+typedef struct qdf_trace_record_s {
 	uint64_t time;
 	uint8_t module;
 	uint8_t code;
 	uint16_t session;
 	uint32_t data;
 	uint32_t pid;
-} cdf_trace_record_t, *tp_cdf_trace_record;
+} qdf_trace_record_t, *tp_qdf_trace_record;
 
-typedef struct s_cdf_trace_data {
-	/* MTRACE logs are stored in ring buffer where head represents the
-	 * position of first record, tail represents the position of last record
-	 * added till now and num is the count of total record added
-	 */
+/**
+ * typedef struct s_qdf_trace_data - MTRACE logs are stored in ring buffer
+ * @head: position of first record
+ * @tail: position of last record
+ * @num: count of total record
+ * @num_since_last_dump: count from last dump
+ * @enable: config for controlling the trace
+ * @dump_count: Dump after number of records reach this number
+ */
+typedef struct s_qdf_trace_data {
 	uint32_t head;
 	uint32_t tail;
 	uint32_t num;
-	uint16_t numSinceLastDump;
-
-	/* config for controlling the trace */
+	uint16_t num_since_last_dump;
 	uint8_t enable;
-	/* Dump after number of records reach this number */
-	uint16_t dumpCount;
-} t_cdf_trace_data;
+	uint16_t dump_count;
+} t_qdf_trace_data;
 
 #define CASE_RETURN_STRING(str) case ((str)): return (uint8_t *)(# str);
 
 /* DP Trace Implementation */
 #define DPTRACE(p) p
 
-#define MAX_CDF_DP_TRACE_RECORDS       4000
-#define CDF_DP_TRACE_RECORD_SIZE       16
-#define INVALID_CDF_DP_TRACE_ADDR      0xffffffff
-#define CDF_DP_TRACE_VERBOSITY_HIGH    3
-#define CDF_DP_TRACE_VERBOSITY_MEDIUM  2
-#define CDF_DP_TRACE_VERBOSITY_LOW     1
-#define CDF_DP_TRACE_VERBOSITY_DEFAULT 0
+#define MAX_QDF_DP_TRACE_RECORDS       4000
+#define QDF_DP_TRACE_RECORD_SIZE       16
+#define INVALID_QDF_DP_TRACE_ADDR      0xffffffff
+#define QDF_DP_TRACE_VERBOSITY_HIGH    3
+#define QDF_DP_TRACE_VERBOSITY_MEDIUM  2
+#define QDF_DP_TRACE_VERBOSITY_LOW     1
+#define QDF_DP_TRACE_VERBOSITY_DEFAULT 0
 
 /**
- * enum CDF_DP_TRACE_ID - Generic ID to identify various events in data path
- * @CDF_DP_TRACE_INVALID: Invalid ID
- * @CDF_DP_TRACE_DROP_PACKET_RECORD: Dropped packet stored with this id
- * @CDF_DP_TRACE_HDD_PACKET_PTR_RECORD: nbuf->data ptr of HDD
- * @CDF_DP_TRACE_HDD_PACKET_RECORD: nbuf->data stored with this id
- * @CDF_DP_TRACE_CE_PACKET_PTR_RECORD: nbuf->data ptr of CE
- * @CDF_DP_TRACE_CE_PACKET_RECORD: nbuf->data stored with this id
- * @CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD: nbuf->data ptr of txrx queue
- * @CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: nbuf->data ptr of txrx
- * @CDF_DP_TRACE_HTT_PACKET_PTR_RECORD: nbuf->data ptr of htt
- * @CDF_DP_TRACE_HTC_PACKET_PTR_RECORD: nbuf->data ptr of htc
- * @CDF_DP_TRACE_HIF_PACKET_PTR_RECORD: nbuf->data ptr of hif
- * @CDF_DP_TRACE_HDD_TX_TIMEOUT: hdd tx timeout event
- * @CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: hdd tx softap timeout event
- * @CDF_DP_TRACE_VDEV_PAUSE: vdev pause event
- * @CDF_DP_TRACE_VDEV_UNPAUSE: vdev unpause event
- *
+ * enum QDF_DP_TRACE_ID - Generic ID to identify various events in data path
+ * @QDF_DP_TRACE_INVALID: Invalid ID
+ * @QDF_DP_TRACE_DROP_PACKET_RECORD: Dropped packet stored with this id
+ * @QDF_DP_TRACE_HDD_PACKET_PTR_RECORD: nbuf->data ptr of HDD
+ * @QDF_DP_TRACE_HDD_PACKET_RECORD: nbuf->data stored with this id
+ * @QDF_DP_TRACE_CE_PACKET_PTR_RECORD: nbuf->data ptr of CE
+ * @QDF_DP_TRACE_CE_PACKET_RECORD: nbuf->data stored with this id
+ * @QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD: nbuf->data ptr of txrx queue
+ * @QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD: nbuf->data ptr of txrx
+ * @QDF_DP_TRACE_HTT_PACKET_PTR_RECORD: nbuf->data ptr of htt
+ * @QDF_DP_TRACE_HTC_PACKET_PTR_RECORD: nbuf->data ptr of htc
+ * @QDF_DP_TRACE_HIF_PACKET_PTR_RECORD: nbuf->data ptr of hif
+ * @QDF_DP_TRACE_HDD_TX_TIMEOUT: hdd tx timeout event
+ * @QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT: hdd tx softap timeout event
+ * @QDF_DP_TRACE_VDEV_PAUSE: vdev pause event
+ * @QDF_DP_TRACE_VDEV_UNPAUSE: vdev unpause event
  */
-enum  CDF_DP_TRACE_ID {
-	CDF_DP_TRACE_INVALID                           = 0,
-	CDF_DP_TRACE_DROP_PACKET_RECORD                = 1,
-	CDF_DP_TRACE_HDD_PACKET_PTR_RECORD             = 2,
-	CDF_DP_TRACE_HDD_PACKET_RECORD                 = 3,
-	CDF_DP_TRACE_CE_PACKET_PTR_RECORD              = 4,
-	CDF_DP_TRACE_CE_PACKET_RECORD                  = 5,
-	CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD      = 6,
-	CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD            = 7,
-	CDF_DP_TRACE_HTT_PACKET_PTR_RECORD             = 8,
-	CDF_DP_TRACE_HTC_PACKET_PTR_RECORD             = 9,
-	CDF_DP_TRACE_HIF_PACKET_PTR_RECORD             = 10,
-	CDF_DP_TRACE_HDD_TX_TIMEOUT                    = 11,
-	CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT             = 12,
-	CDF_DP_TRACE_VDEV_PAUSE                        = 13,
-	CDF_DP_TRACE_VDEV_UNPAUSE                      = 14,
-	CDF_DP_TRACE_MAX
+enum  QDF_DP_TRACE_ID {
+	QDF_DP_TRACE_INVALID                           = 0,
+	QDF_DP_TRACE_DROP_PACKET_RECORD                = 1,
+	QDF_DP_TRACE_HDD_PACKET_PTR_RECORD             = 2,
+	QDF_DP_TRACE_HDD_PACKET_RECORD                 = 3,
+	QDF_DP_TRACE_CE_PACKET_PTR_RECORD              = 4,
+	QDF_DP_TRACE_CE_PACKET_RECORD                  = 5,
+	QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD      = 6,
+	QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD            = 7,
+	QDF_DP_TRACE_HTT_PACKET_PTR_RECORD             = 8,
+	QDF_DP_TRACE_HTC_PACKET_PTR_RECORD             = 9,
+	QDF_DP_TRACE_HIF_PACKET_PTR_RECORD             = 10,
+	QDF_DP_TRACE_HDD_TX_TIMEOUT                    = 11,
+	QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT             = 12,
+	QDF_DP_TRACE_VDEV_PAUSE                        = 13,
+	QDF_DP_TRACE_VDEV_UNPAUSE                      = 14,
+	QDF_DP_TRACE_MAX
 
 };
 
+/*
+ * Log levels
+ */
+#define QDF_DEBUG_FUNCTRACE     0x01
+#define QDF_DEBUG_LEVEL0        0x02
+#define QDF_DEBUG_LEVEL1        0x04
+#define QDF_DEBUG_LEVEL2        0x08
+#define QDF_DEBUG_LEVEL3        0x10
+#define QDF_DEBUG_ERROR         0x20
+#define QDF_DEBUG_CFG           0x40
+
 /**
- * struct cdf_dp_trace_record_s - Describes a record in DP trace
+ * struct qdf_dp_trace_record_s - Describes a record in DP trace
  * @time: time when it got stored
  * @code: Describes the particular event
  * @data: buffer to store data
  * @size: Length of the valid data stored in this record
  * @pid : process id which stored the data in this record
  */
-struct cdf_dp_trace_record_s {
+struct qdf_dp_trace_record_s {
 	uint64_t time;
 	uint8_t code;
-	uint8_t data[CDF_DP_TRACE_RECORD_SIZE];
+	uint8_t data[QDF_DP_TRACE_RECORD_SIZE];
 	uint8_t size;
 	uint32_t pid;
 };
 
 /**
- * struct cdf_dp_trace_data - Parameters to configure/control DP trace
+ * struct qdf_dp_trace_data - Parameters to configure/control DP trace
  * @head: Position of first record
  * @tail: Position of last record
  * @num:  Current index
@@ -214,12 +226,10 @@ struct cdf_dp_trace_record_s {
  * @enable: enable/disable DP trace
  * @count: current packet number
  */
-struct s_cdf_dp_trace_data {
+struct s_qdf_dp_trace_data {
 	uint32_t head;
 	uint32_t tail;
 	uint32_t num;
-
-	/* config for controlling the trace */
 	uint8_t proto_bitmap;
 	uint8_t no_of_record;
 	uint8_t verbosity;
@@ -229,10 +239,10 @@ struct s_cdf_dp_trace_data {
 /* Function declarations and documenation */
 
 /**
- * cdf_trace_set_level() - Set the trace level for a particular module
+ * qdf_trace_set_level() - Set the trace level for a particular module
  * @level : trace level
  *
- * Trace level is a member of the CDF_TRACE_LEVEL enumeration indicating
+ * Trace level is a member of the QDF_TRACE_LEVEL enumeration indicating
  * the severity of the condition causing the trace message to be issued.
  * More severe conditions are more likely to be logged.
  *
@@ -240,44 +250,112 @@ struct s_cdf_dp_trace_data {
  *
  * Return:  nothing
  */
-void cdf_trace_set_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level);
+void qdf_trace_set_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level);
 
 /**
- * cdf_trace_get_level() - get the trace level
+ * qdf_trace_get_level() - get the trace level
  * @level : trace level
  *
  * This is an external API that returns a bool value to signify if a
  * particular trace level is set for the specified module.
- * A member of the CDF_TRACE_LEVEL enumeration indicating the severity
+ * A member of the QDF_TRACE_LEVEL enumeration indicating the severity
  * of the condition causing the trace message to be issued.
  *
  * Note that individual trace levels are the only valid values
- * for this API.  CDF_TRACE_LEVEL_NONE and CDF_TRACE_LEVEL_ALL
+ * for this API.  QDF_TRACE_LEVEL_NONE and QDF_TRACE_LEVEL_ALL
  * are not valid input and will return false
  *
  * Return:
  *  false - the specified trace level for the specified module is OFF
  *  true - the specified trace level for the specified module is ON
  */
-bool cdf_trace_get_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level);
-
-typedef void (*tp_cdf_trace_cb)(void *pMac, tp_cdf_trace_record, uint16_t);
-void cdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data);
-void cdf_trace_register(CDF_MODULE_ID, tp_cdf_trace_cb);
-CDF_STATUS cdf_trace_spin_lock_init(void);
-void cdf_trace_init(void);
-void cdf_trace_enable(uint32_t, uint8_t enable);
-void cdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t);
-
-void cdf_dp_trace_spin_lock_init(void);
-void cdf_dp_trace_init(void);
-void cdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records,
+bool qdf_trace_get_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level);
+
+typedef void (*tp_qdf_trace_cb)(void *p_mac, tp_qdf_trace_record, uint16_t);
+void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data);
+void qdf_trace_register(QDF_MODULE_ID, tp_qdf_trace_cb);
+QDF_STATUS qdf_trace_spin_lock_init(void);
+void qdf_trace_init(void);
+void qdf_trace_enable(uint32_t, uint8_t enable);
+void qdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t);
+
+void qdf_dp_trace_spin_lock_init(void);
+void qdf_dp_trace_init(void);
+void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records,
 			 uint8_t verbosity);
-void cdf_dp_trace_set_track(cdf_nbuf_t nbuf);
-void cdf_dp_trace(cdf_nbuf_t nbuf, enum CDF_DP_TRACE_ID code,
+void qdf_dp_trace_set_track(qdf_nbuf_t nbuf);
+void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
 			uint8_t *data, uint8_t size);
-void cdf_dp_trace_dump_all(uint32_t count);
-typedef void (*tp_cdf_dp_trace_cb)(struct cdf_dp_trace_record_s* , uint16_t);
-void cdf_dp_display_record(struct cdf_dp_trace_record_s *record,
+void qdf_dp_trace_dump_all(uint32_t count);
+typedef void (*tp_qdf_dp_trace_cb)(struct qdf_dp_trace_record_s* , uint16_t);
+void qdf_dp_display_record(struct qdf_dp_trace_record_s *record,
 							uint16_t index);
+
+
+/**
+ * qdf_trace_msg()- logging API
+ * @module: Module identifier. A member of the QDF_MODULE_ID enumeration that
+ *	    identifies the module issuing the trace message.
+ * @level: Trace level. A member of the QDF_TRACE_LEVEL enumeration indicating
+ *	   the severity of the condition causing the trace message to be issued.
+ *	   More severe conditions are more likely to be logged.
+ * @str_format: Format string. The message to be logged. This format string
+ *	       contains printf-like replacement parameters, which follow this
+ *	       parameter in the variable argument list.
+ *
+ * Users wishing to add tracing information to their code should use
+ * QDF_TRACE.  QDF_TRACE() will compile into a call to qdf_trace_msg() when
+ * tracing is enabled.
+ *
+ * Return: nothing
+ *
+ */
+void __printf(3, 4) qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+				  char *str_format, ...);
+
+void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+			void *data, int buf_len);
+
+void qdf_trace_display(void);
+
+void qdf_trace_set_value(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+			 uint8_t on);
+
+void qdf_trace_set_module_trace_level(QDF_MODULE_ID module, uint32_t level);
+
+/* QDF_TRACE is the macro invoked to add trace messages to code.  See the
+ * documenation for qdf_trace_msg() for the parameters etc. for this function.
+ *
+ * NOTE:  Code QDF_TRACE() macros into the source code.  Do not code directly
+ * to the qdf_trace_msg() function.
+ *
+ * NOTE 2:  qdf tracing is totally turned off if WLAN_DEBUG is *not* defined.
+ * This allows us to build 'performance' builds where we can measure performance
+ * without being bogged down by all the tracing in the code
+ */
+
+#ifdef CONFIG_MCL
+#if defined(WLAN_DEBUG)
+#define QDF_TRACE qdf_trace_msg
+#define QDF_TRACE_HEX_DUMP qdf_trace_hex_dump
+#else
+#define QDF_TRACE(arg ...)
+#define QDF_TRACE_HEX_DUMP(arg ...)
+#endif
+#else
+#define QDF_TRACE qdf_trace
+
+#define qdf_trace(log_level, args...) \
+		do {	\
+			extern int qdf_dbg_mask; \
+			if (qdf_dbg_mask >= log_level) { \
+				printk("qdf: "args); \
+				printk("\n"); \
+			} \
+		} while (0)
 #endif
+void __printf(3, 4) qdf_snprintf(char *str_buffer, unsigned int size,
+				 char *str_format, ...);
+#define QDF_SNPRINTF qdf_snprintf
+
+#endif /* __QDF_TRACE_H */

+ 285 - 254
qdf/inc/qdf_types.h

@@ -25,147 +25,174 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__CDF_TYPES_H)
-#define __CDF_TYPES_H
 /**
- * DOC: cdf_types.h
- *
- * Connectivity driver framework (CDF) basic type definitions
+ * DOC: qdf_types.h
+ * QCA driver framework (QDF) basic type definitions
  */
 
+#if !defined(__QDF_TYPES_H)
+#define __QDF_TYPES_H
+
 /* Include Files */
-#include "i_cdf_types.h"
-#include <string.h>
+#include <i_qdf_types.h>
 
 /* Preprocessor definitions and constants */
+#define QDF_MAX_SGLIST 4
 
 /**
- * CDF_MAX - get maximum of two values
+ * struct qdf_sglist - scatter-gather list
+ * @nsegs: total number of segments
+ * struct __sg_segs - scatter-gather segment list
+ * @vaddr: Virtual address of the segment
+ * @len: Length of the segment
+ */
+typedef struct qdf_sglist {
+	uint32_t nsegs;
+	struct __sg_segs {
+		uint8_t  *vaddr;
+		uint32_t len;
+	} sg_segs[QDF_MAX_SGLIST];
+} qdf_sglist_t;
+
+#define QDF_MAX_SCATTER __QDF_MAX_SCATTER
+
+/**
+ * QDF_MAX - get maximum of two values
  * @_x: 1st arguement
  * @_y: 2nd arguement
  */
-#define CDF_MAX(_x, _y) (((_x) > (_y)) ? (_x) : (_y))
+#define QDF_MAX(_x, _y) (((_x) > (_y)) ? (_x) : (_y))
 
 /**
- * CDF_MIN - get minimum of two values
+ * QDF_MIN - get minimum of two values
  * @_x: 1st arguement
  * @_y: 2nd arguement
  */
-#define CDF_MIN(_x, _y) (((_x) < (_y)) ? (_x) : (_y))
+#define QDF_MIN(_x, _y) (((_x) < (_y)) ? (_x) : (_y))
 
 /**
- * CDF_SWAP_U16 - swap input u16 value
+ * QDF_SWAP_U16 - swap input u16 value
  * @_x: variable to swap
  */
-#define CDF_SWAP_U16(_x) \
+#define QDF_SWAP_U16(_x) \
 	((((_x) << 8) & 0xFF00) | (((_x) >> 8) & 0x00FF))
 
 /**
- * CDF_SWAP_U32 - swap input u32 value
+ * QDF_SWAP_U32 - swap input u32 value
  * @_x: variable to swap
  */
-#define CDF_SWAP_U32(_x) \
+#define QDF_SWAP_U32(_x) \
 	(((((_x) << 24) & 0xFF000000) | (((_x) >> 24) & 0x000000FF)) | \
 	 ((((_x) << 8) & 0x00FF0000) | (((_x) >> 8) & 0x0000FF00)))
 
-#define CDF_TICKS_PER_SECOND        (1000)
+/* ticks per second */
+#define QDF_TICKS_PER_SECOND (1000)
 
 /**
- * CDF_ARRAY_SIZE - get array size
+ * QDF_ARRAY_SIZE - get array size
  * @_arr: array variable name
  */
-#define CDF_ARRAY_SIZE(_arr) (sizeof(_arr) / sizeof((_arr)[0]))
-
-/* endian operations for Big Endian and Small Endian modes */
-#ifdef ANI_LITTLE_BYTE_ENDIAN
+#define QDF_ARRAY_SIZE(_arr) (sizeof(_arr) / sizeof((_arr)[0]))
 
-#define cdf_be16_to_cpu(_x) CDF_SWAP_U16(_x)
+#define QDF_MAX_SCATTER __QDF_MAX_SCATTER
 
-#endif
-
-#ifdef ANI_BIG_BYTE_ENDIAN
+/**
+ * qdf_packed - denotes structure is packed.
+ */
+#define qdf_packed __qdf_packed
 
-#define cdf_be16_to_cpu(_x) (_x)
+typedef void *qdf_net_handle_t;
 
-#endif
+typedef void *qdf_netlink_handle_t;
+typedef void *qdf_drv_handle_t;
+typedef void *qdf_os_handle_t;
+typedef void *qdf_pm_t;
 
-#ifndef __ahdecl
-#ifdef __i386__
-#define __ahdecl   __attribute__((regparm(0)))
-#else
-#define __ahdecl
-#endif
-#endif
 
-#define CDF_OS_MAX_SCATTER  __CDF_OS_MAX_SCATTER
+/**
+ * typedef qdf_handle_t - handles opaque to each other
+ */
+typedef void *qdf_handle_t;
 
 /**
- * @brief denotes structure is packed.
+ * typedef qdf_device_t - Platform/bus generic handle.
+ * Used for bus specific functions.
  */
-#define cdf_packed __cdf_packed
+typedef __qdf_device_t qdf_device_t;
 
 /**
- * typedef cdf_handle_t - handles opaque to each other
+ * typedef qdf_size_t - size of an object
  */
-typedef void *cdf_handle_t;
+typedef __qdf_size_t qdf_size_t;
 
 /**
- * typedef cdf_device_t - Platform/bus generic handle.
- *			  Used for bus specific functions.
+ * typedef __qdf_off_t - offset for API's that need them.
  */
-typedef __cdf_device_t cdf_device_t;
+typedef __qdf_off_t      qdf_off_t;
 
 /**
- * typedef cdf_size_t - size of an object
+ * typedef qdf_dma_map_t - DMA mapping object.
  */
-typedef __cdf_size_t cdf_size_t;
+typedef __qdf_dma_map_t qdf_dma_map_t;
 
 /**
- * typedef cdf_dma_map_t - DMA mapping object.
+ * tyepdef qdf_dma_addr_t - DMA address.
  */
-typedef __cdf_dma_map_t cdf_dma_map_t;
+typedef __qdf_dma_addr_t qdf_dma_addr_t;
 
 /**
- * tyepdef cdf_dma_addr_t - DMA address.
+ * typedef __qdf_dma_size_t - DMA size.
  */
-typedef __cdf_dma_addr_t cdf_dma_addr_t;
+typedef __qdf_dma_size_t     qdf_dma_size_t;
 
 /**
- * tyepdef cdf_dma_context_t - DMA context.
+ * tyepdef qdf_dma_context_t - DMA context.
  */
-typedef __cdf_dma_context_t cdf_dma_context_t;
+typedef __qdf_dma_context_t qdf_dma_context_t;
 
+/**
+ * struct qdf_dma_map_info - Information inside a DMA map.
+ * @nsegs: total number mapped segments
+ * struct __dma_segs - Information of physical address.
+ * @paddr: physical(dam'able) address of the segment
+ * @len: length of the segment
+ */
+typedef struct qdf_dma_map_info {
+	uint32_t nsegs;
+	struct __dma_segs {
+		qdf_dma_addr_t paddr;
+		qdf_dma_size_t len;
+	} dma_segs[QDF_MAX_SCATTER];
+} qdf_dmamap_info_t;
+
+#define qdf_iomem_t __qdf_iomem_t;
 
-#define cdf_iomem_t   __cdf_iomem_t;
 /**
- * typedef enum CDF_TIMER_TYPE - CDF timer type
- * @CDF_TIMER_TYPE_SW: Deferrable SW timer it will not cause CPU to wake up
- *			on expiry
- * @CDF_TIMER_TYPE_WAKE_APPS:  Non deferrable timer which will cause CPU to
- *				wake up on expiry
+ * typedef enum QDF_TIMER_TYPE - QDF timer type
+ * @QDF_TIMER_TYPE_SW: Deferrable SW timer it will not cause CPU to wake up
+ * on expiry
+ * @QDF_TIMER_TYPE_WAKE_APPS: Non deferrable timer which will cause CPU to
+ * wake up on expiry
  */
 typedef enum {
-	CDF_TIMER_TYPE_SW,
-	CDF_TIMER_TYPE_WAKE_APPS
-} CDF_TIMER_TYPE;
+	QDF_TIMER_TYPE_SW,
+	QDF_TIMER_TYPE_WAKE_APPS
+} QDF_TIMER_TYPE;
 
 /**
- * tyepdef cdf_resource_type_t - hw resources
- *
- * @CDF_RESOURCE_TYPE_MEM: memory resource
- * @CDF_RESOURCE_TYPE_IO: io resource
- *
+ * tyepdef qdf_resource_type_t - hw resources
+ * @QDF_RESOURCE_TYPE_MEM: memory resource
+ * @QDF_RESOURCE_TYPE_IO: io resource
  * Define the hw resources the OS has allocated for the device
  * Note that start defines a mapped area.
  */
 typedef enum {
-	CDF_RESOURCE_TYPE_MEM,
-	CDF_RESOURCE_TYPE_IO,
-} cdf_resource_type_t;
+	QDF_RESOURCE_TYPE_MEM,
+	QDF_RESOURCE_TYPE_IO,
+} qdf_resource_type_t;
 
 /**
- * tyepdef cdf_resource_t - representation of a h/w resource.
- *
+ * tyepdef qdf_resource_t - representation of a h/w resource.
  * @start: start
  * @end: end
  * @type: resource type
@@ -173,60 +200,59 @@ typedef enum {
 typedef struct {
 	uint64_t start;
 	uint64_t end;
-	cdf_resource_type_t type;
-} cdf_resource_t;
+	qdf_resource_type_t type;
+} qdf_resource_t;
 
 /**
- * typedef cdf_dma_dir_t - DMA directions
- *
- * @CDF_DMA_BIDIRECTIONAL: bidirectional data
- * @CDF_DMA_TO_DEVICE: data going from device to memory
- * @CDF_DMA_FROM_DEVICE: data going from memory to device
+ * typedef qdf_dma_dir_t - DMA directions
+ * @QDF_DMA_BIDIRECTIONAL: bidirectional data
+ * @QDF_DMA_TO_DEVICE: data going from device to memory
+ * @QDF_DMA_FROM_DEVICE: data going from memory to device
  */
 typedef enum {
-	CDF_DMA_BIDIRECTIONAL = __CDF_DMA_BIDIRECTIONAL,
-	CDF_DMA_TO_DEVICE = __CDF_DMA_TO_DEVICE,
-	CDF_DMA_FROM_DEVICE = __CDF_DMA_FROM_DEVICE,
-} cdf_dma_dir_t;
+	QDF_DMA_BIDIRECTIONAL = __QDF_DMA_BIDIRECTIONAL,
+	QDF_DMA_TO_DEVICE = __QDF_DMA_TO_DEVICE,
+	QDF_DMA_FROM_DEVICE = __QDF_DMA_FROM_DEVICE,
+} qdf_dma_dir_t;
 
 /* work queue(kernel thread)/DPC function callback */
-typedef void (*cdf_defer_fn_t)(void *);
+typedef void (*qdf_defer_fn_t)(void *);
 
-/* Prototype of the critical region function that is to be
+/*
+ * Prototype of the critical region function that is to be
  * executed with spinlock held and interrupt disalbed
  */
-typedef bool (*cdf_irqlocked_func_t)(void *);
+typedef bool (*qdf_irqlocked_func_t)(void *);
 
 /* Prototype of timer function */
-typedef void (*cdf_softirq_timer_func_t)(void *);
+typedef void (*qdf_timer_func_t)(void *);
 
-#define cdf_offsetof(type, field) offsetof(type, field)
+#define qdf_offsetof(type, field) offsetof(type, field)
 
 /**
- * typedef CDF_MODULE_ID - CDF Module IDs
- *
- * @CDF_MODULE_ID_TLSHIM: TLSHIM module ID
- * @CDF_MODULE_ID_WMI: WMI module ID
- * @CDF_MODULE_ID_HTT: HTT module ID
- * @CDF_MODULE_ID_RSV4: Reserved
- * @CDF_MODULE_ID_HDD: HDD module ID
- * @CDF_MODULE_ID_SME: SME module ID
- * @CDF_MODULE_ID_PE: PE module ID
- * @CDF_MODULE_ID_WMA: WMA module ID
- * @CDF_MODULE_ID_SYS: SYS module ID
- * @CDF_MODULE_ID_CDF: CDF module ID
- * @CDF_MODULE_ID_SAP: SAP module ID
- * @CDF_MODULE_ID_HDD_SOFTAP: HDD SAP module ID
- * @CDF_MODULE_ID_HDD_DATA: HDD DATA module ID
- * @CDF_MODULE_ID_HDD_SAP_DATA: HDD SAP DATA module ID
- * @CDF_MODULE_ID_HIF: HIF module ID
- * @CDF_MODULE_ID_HTC: HTC module ID
- * @CDF_MODULE_ID_TXRX: TXRX module ID
- * @CDF_MODULE_ID_CDF_DEVICE: CDF DEVICE module ID
- * @CDF_MODULE_ID_CFG: CFG module ID
- * @CDF_MODULE_ID_BMI: BMI module ID
- * @CDF_MODULE_ID_EPPING: EPPING module ID
- * @CDF_MODULE_ID_MAX: Max place holder module ID
+ * typedef QDF_MODULE_ID - QDF Module IDs
+ * @QDF_MODULE_ID_TLSHIM: TLSHIM module ID
+ * @QDF_MODULE_ID_WMI: WMI module ID
+ * @QDF_MODULE_ID_HTT: HTT module ID
+ * @QDF_MODULE_ID_RSV4: Reserved
+ * @QDF_MODULE_ID_HDD: HDD module ID
+ * @QDF_MODULE_ID_SME: SME module ID
+ * @QDF_MODULE_ID_PE: PE module ID
+ * @QDF_MODULE_ID_WMA: WMA module ID
+ * @QDF_MODULE_ID_SYS: SYS module ID
+ * @QDF_MODULE_ID_QDF: QDF module ID
+ * @QDF_MODULE_ID_SAP: SAP module ID
+ * @QDF_MODULE_ID_HDD_SOFTAP: HDD SAP module ID
+ * @QDF_MODULE_ID_HDD_DATA: HDD DATA module ID
+ * @QDF_MODULE_ID_HDD_SAP_DATA: HDD SAP DATA module ID
+ * @QDF_MODULE_ID_HIF: HIF module ID
+ * @QDF_MODULE_ID_HTC: HTC module ID
+ * @QDF_MODULE_ID_TXRX: TXRX module ID
+ * @QDF_MODULE_ID_QDF_DEVICE: QDF DEVICE module ID
+ * @QDF_MODULE_ID_CFG: CFG module ID
+ * @QDF_MODULE_ID_BMI: BMI module ID
+ * @QDF_MODULE_ID_EPPING: EPPING module ID
+ * @QDF_MODULE_ID_MAX: Max place holder module ID
  *
  * These are generic IDs that identify the various modules in the software
  * system
@@ -234,165 +260,170 @@ typedef void (*cdf_softirq_timer_func_t)(void *);
  * 3 & 4 are unused for historical purposes
  */
 typedef enum {
-	CDF_MODULE_ID_TLSHIM = 1,
-	CDF_MODULE_ID_WMI = 2,
-	CDF_MODULE_ID_HTT = 3,
-	CDF_MODULE_ID_RSV4 = 4,
-	CDF_MODULE_ID_HDD = 5,
-	CDF_MODULE_ID_SME = 6,
-	CDF_MODULE_ID_PE = 7,
-	CDF_MODULE_ID_WMA = 8,
-	CDF_MODULE_ID_SYS = 9,
-	CDF_MODULE_ID_CDF = 10,
-	CDF_MODULE_ID_SAP = 11,
-	CDF_MODULE_ID_HDD_SOFTAP = 12,
-	CDF_MODULE_ID_HDD_DATA = 14,
-	CDF_MODULE_ID_HDD_SAP_DATA = 15,
-
-	CDF_MODULE_ID_HIF = 16,
-	CDF_MODULE_ID_HTC = 17,
-	CDF_MODULE_ID_TXRX = 18,
-	CDF_MODULE_ID_CDF_DEVICE = 19,
-	CDF_MODULE_ID_CFG = 20,
-	CDF_MODULE_ID_BMI = 21,
-	CDF_MODULE_ID_EPPING = 22,
-
-	CDF_MODULE_ID_MAX
-} CDF_MODULE_ID;
-
-#define cdf_print(args...) \
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR, ## args)
-#define cdf_vprint        __cdf_vprint
-#define cdf_snprint       __cdf_snprint
-
-/**
- * enum tCDF_ADAPTER_MODE - adapter role.
- *
- * @CDF_STA_MODE: STA mode
- * @CDF_SAP_MODE: SAP mode
- * @CDF_P2P_CLIENT_MODE: P2P client mode
- * @CDF_P2P_GO_MODE: P2P GO mode
- * @CDF_FTM_MODE: FTM mode
- * @CDF_IBSS_MODE: IBSS mode
- * @CDF_P2P_DEVICE_MODE: P2P device mode
- * @CDF_EPPING_MODE: EPPING device mode
- * @CDF_OCB_MODE: OCB device mode
- * @CDF_MAX_NO_OF_MODE: Max place holder
+	QDF_MODULE_ID_TLSHIM = 1,
+	QDF_MODULE_ID_WMI = 2,
+	QDF_MODULE_ID_HTT = 3,
+	QDF_MODULE_ID_RSV4 = 4,
+	QDF_MODULE_ID_HDD = 5,
+	QDF_MODULE_ID_SME = 6,
+	QDF_MODULE_ID_PE = 7,
+	QDF_MODULE_ID_WMA = 8,
+	QDF_MODULE_ID_SYS = 9,
+	QDF_MODULE_ID_QDF = 10,
+	QDF_MODULE_ID_SAP = 11,
+	QDF_MODULE_ID_HDD_SOFTAP = 12,
+	QDF_MODULE_ID_HDD_DATA = 14,
+	QDF_MODULE_ID_HDD_SAP_DATA = 15,
+	QDF_MODULE_ID_HIF = 16,
+	QDF_MODULE_ID_HTC = 17,
+	QDF_MODULE_ID_TXRX = 18,
+	QDF_MODULE_ID_QDF_DEVICE = 19,
+	QDF_MODULE_ID_CFG = 20,
+	QDF_MODULE_ID_BMI = 21,
+	QDF_MODULE_ID_EPPING = 22,
+	QDF_MODULE_ID_MAX
+} QDF_MODULE_ID;
+
+#define qdf_print(args...) \
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, ## args)
+#define qdf_vprint    __qdf_vprint
+#define qdf_snprint   __qdf_snprint
+
+
+/**
+ * enum tQDF_ADAPTER_MODE - Concurrency role.
+ * @QDF_STA_MODE: STA mode
+ * @QDF_SAP_MODE: SAP mode
+ * @QDF_P2P_CLIENT_MODE: P2P client mode
+ * @QDF_P2P_GO_MODE: P2P GO mode
+ * @QDF_FTM_MODE: FTM mode
+ * @QDF_IBSS_MODE: IBSS mode
+ * @QDF_P2P_DEVICE_MODE: P2P device mode
+ * @QDF_EPPING_MODE: EPPING device mode
+ * @QDF_OCB_MODE: OCB device mode
+ * @QDF_MAX_NO_OF_MODE: Max place holder
  *
  * These are generic IDs that identify the various roles
  * in the software system
  */
-enum tCDF_ADAPTER_MODE {
-	CDF_STA_MODE = 0,
-	CDF_SAP_MODE = 1,
-	CDF_P2P_CLIENT_MODE,
-	CDF_P2P_GO_MODE,
-	CDF_FTM_MODE,
-	CDF_IBSS_MODE,
-	CDF_P2P_DEVICE_MODE,
-	CDF_EPPING_MODE,
-	CDF_OCB_MODE,
-	CDF_MAX_NO_OF_MODE
+enum tQDF_ADAPTER_MODE {
+	QDF_STA_MODE = 0,
+	QDF_SAP_MODE = 1,
+	QDF_P2P_CLIENT_MODE,
+	QDF_P2P_GO_MODE,
+	QDF_FTM_MODE,
+	QDF_IBSS_MODE,
+	QDF_P2P_DEVICE_MODE,
+	QDF_EPPING_MODE,
+	QDF_OCB_MODE,
+	QDF_MAX_NO_OF_MODE
 };
 
 /**
- * enum tCDF_GLOBAL_CON_MODE - global config mode when
+ * enum tQDF_GLOBAL_CON_MODE - global config mode when
  * driver is loaded.
  *
- * @CDF_GLOBAL_MISSION_MODE: mission mode (STA, SAP...)
- * @CDF_GLOBAL_FTM_MODE: FTM mode
- * @CDF_GLOBAL_EPPING_MODE: EPPING mode
- * @CDF_GLOBAL_MAX_MODE: Max place holder
- */
-enum tCDF_GLOBAL_CON_MODE {
-	CDF_GLOBAL_MISSION_MODE,
-	CDF_GLOBAL_FTM_MODE = 5,
-	CDF_GLOBAL_EPPING_MODE = 8,
-	CDF_GLOBAL_MAX_MODE
+ * @QDF_GLOBAL_MISSION_MODE: mission mode (STA, SAP...)
+ * @QDF_GLOBAL_FTM_MODE: FTM mode
+ * @QDF_GLOBAL_EPPING_MODE: EPPING mode
+ * @QDF_GLOBAL_MAX_MODE: Max place holder
+ */
+enum tQDF_GLOBAL_CON_MODE {
+	QDF_GLOBAL_MISSION_MODE,
+	QDF_GLOBAL_FTM_MODE = 5,
+	QDF_GLOBAL_EPPING_MODE = 8,
+	QDF_GLOBAL_MAX_MODE
 };
 
 
+
 #ifdef WLAN_OPEN_P2P_INTERFACE
 /* This should match with WLAN_MAX_INTERFACES */
-#define CDF_MAX_CONCURRENCY_PERSONA    (4)
+#define QDF_MAX_CONCURRENCY_PERSONA  (4)
 #else
-#define CDF_MAX_CONCURRENCY_PERSONA    (3)
+#define QDF_MAX_CONCURRENCY_PERSONA  (3)
 #endif
 
-#define CDF_STA_MASK (1 << CDF_STA_MODE)
-#define CDF_SAP_MASK (1 << CDF_SAP_MODE)
-#define CDF_P2P_CLIENT_MASK (1 << CDF_P2P_CLIENT_MODE)
-#define CDF_P2P_GO_MASK (1 << CDF_P2P_GO_MODE)
+#define QDF_STA_MASK (1 << QDF_STA_MODE)
+#define QDF_SAP_MASK (1 << QDF_SAP_MODE)
+#define QDF_P2P_CLIENT_MASK (1 << QDF_P2P_CLIENT_MODE)
+#define QDF_P2P_GO_MASK (1 << QDF_P2P_GO_MODE)
 
 #ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
+
+/**
+ * typedef tQDF_MCC_TO_SCC_SWITCH_MODE - MCC to SCC switch mode.
+ * @QDF_MCC_TO_SCC_SWITCH_DISABLE: Disable switch
+ * @QDF_MCC_TO_SCC_SWITCH_ENABLE: Enable switch
+ * @QDF_MCC_TO_SCC_SWITCH_FORCE: force switch
+ * @QDF_MCC_TO_SCC_SWITCH_MAX: max switch
+ */
 typedef enum {
-	CDF_MCC_TO_SCC_SWITCH_DISABLE = 0,
-	CDF_MCC_TO_SCC_SWITCH_ENABLE,
-	CDF_MCC_TO_SCC_SWITCH_FORCE,
-	CDF_MCC_TO_SCC_SWITCH_MAX
-} tCDF_MCC_TO_SCC_SWITCH_MODE;
+	QDF_MCC_TO_SCC_SWITCH_DISABLE = 0,
+	QDF_MCC_TO_SCC_SWITCH_ENABLE,
+	QDF_MCC_TO_SCC_SWITCH_FORCE,
+	QDF_MCC_TO_SCC_SWITCH_MAX
+} tQDF_MCC_TO_SCC_SWITCH_MODE;
 #endif
 
 #if !defined(NULL)
 #ifdef __cplusplus
-#define NULL    0
+#define NULL   0
 #else
-#define NULL    ((void *)0)
+#define NULL   ((void *)0)
 #endif
 #endif
 
-/* 'Time' type */
-typedef unsigned long v_TIME_t;
-
-/* typedef for CDF Context... */
+/* typedef for QDF Context... */
 typedef void *v_CONTEXT_t;
 
-#define CDF_MAC_ADDR_SIZE (6)
+#define QDF_MAC_ADDR_SIZE (6)
 
 /**
- * struct cdf_mac_addr - mac address array
+ * struct qdf_mac_addr - mac address array
  * @bytes: MAC address bytes
  */
-struct cdf_mac_addr {
-	uint8_t bytes[CDF_MAC_ADDR_SIZE];
+struct qdf_mac_addr {
+	uint8_t bytes[QDF_MAC_ADDR_SIZE];
 };
 
-/* This macro is used to initialize a CDF MacAddress to the broadcast
- * MacAddress.  It is used like this...
- * struct cdf_mac_addr macAddress = CDF_MAC_ADDR_BROADCAST_INITIALIZER
+/**
+ * This macro is used to initialize a QDF MacAddress to the broadcast
+ * MacAddress. It is used like this...
  */
-#define CDF_MAC_ADDR_BROADCAST_INITIALIZER { { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }
+#define QDF_MAC_ADDR_BROADCAST_INITIALIZER \
+	{ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } }
 
-/* This macro is used to initialize a CDF MacAddress to zero
+/**
+ * This macro is used to initialize a QDF MacAddress to zero
  * It is used like this...
- * struct cdf_mac_addr macAddress = CDF_MAC_ADDR_ZERO_INITIALIZER
  */
-#define CDF_MAC_ADDR_ZERO_INITIALIZER { { 0, 0, 0, 0, 0, 0 } }
+#define QDF_MAC_ADDR_ZERO_INITIALIZER { { 0, 0, 0, 0, 0, 0 } }
 
-#define CDF_IPV4_ADDR_SIZE (4)
-#define CDF_IPV6_ADDR_SIZE (16)
+#define QDF_IPV4_ADDR_SIZE (4)
+#define QDF_IPV6_ADDR_SIZE (16)
 
 /**
- * struct cdf_tso_frag_t - fragments of a single TCP segment
- * @paddr_low_32:	Lower 32 bits of the buffer pointer
- * @paddr_upper_16:	upper 16 bits of the buffer pointer
- * @length:	length of the buffer
- * @vaddr:	virtual address
+ * struct qdf_tso_frag_t - fragments of a single TCP segment
+ * @paddr_low_32: Lower 32 bits of the buffer pointer
+ * @paddr_upper_16: upper 16 bits of the buffer pointer
+ * @length: length of the buffer
+ * @vaddr: virtual address
  *
  * This structure holds the fragments of a single TCP segment of a
  * given jumbo TSO network buffer
  */
-struct cdf_tso_frag_t {
+struct qdf_tso_frag_t {
 	uint32_t paddr_low_32;
 	uint32_t paddr_upper_16:16,
-		     length:16;
+		 length:16;
 	unsigned char *vaddr;
 };
 
 #define FRAG_NUM_MAX 6
 
 /**
- * struct cdf_tso_flags_t - TSO specific flags
+ * struct qdf_tso_flags_t - TSO specific flags
  * @tso_enable: Enable transmit segmentation offload
  * @tcp_flags_mask: Tcp_flag is inserted into the header based
  * on the mask
@@ -404,8 +435,8 @@ struct cdf_tso_frag_t {
  * This structure holds the TSO specific flags extracted from the TSO network
  * buffer for a given TCP segment
  */
-struct cdf_tso_flags_t {
-	u_int32_t tso_enable:1,
+struct qdf_tso_flags_t {
+	uint32_t tso_enable:1,
 			reserved_0a:6,
 			fin:1,
 			syn:1,
@@ -418,16 +449,13 @@ struct cdf_tso_flags_t {
 			ns:1,
 			tcp_flags_mask:9,
 			reserved_0b:7;
-/* ------------------------------------------------------------------- */
 
-	u_int32_t l2_len:16,
+	uint32_t l2_len:16,
 			ip_len:16;
-/* ------------------------------------------------------------------- */
 
-	u_int32_t tcp_seq_num;
-/* ------------------------------------------------------------------- */
+	uint32_t tcp_seq_num;
 
-	u_int32_t ip_id:16,
+	uint32_t ip_id:16,
 			ipv4_checksum_en:1,
 			udp_ipv4_checksum_en:1,
 			udp_ipv6_checksum_en:1,
@@ -435,41 +463,43 @@ struct cdf_tso_flags_t {
 			tcp_ipv6_checksum_en:1,
 			partial_checksum_en:1,
 			reserved_3a:10;
-/* ------------------------------------------------------------------- */
 
-	u_int32_t checksum_offset:14,
+	uint32_t checksum_offset:14,
 			reserved_4a:2,
 			payload_start_offset:14,
 			reserved_4b:2;
-/* ------------------------------------------------------------------- */
 
-	u_int32_t payload_end_offset:14,
+	uint32_t payload_end_offset:14,
 			reserved_5:18;
 };
 
 /**
- * struct cdf_tso_seg_t - single TSO segment
- * @tso_flags:	TSO flags
- * @num_frags:	number of fragments
- * @tso_frags:	array holding the fragments
+ * struct qdf_tso_seg_t - single TSO segment
+ * @tso_flags: TSO flags
+ * @num_frags: number of fragments
+ * @tso_frags: array holding the fragments
  *
  * This structure holds the information of a single TSO segment of a jumbo
  * TSO network buffer
  */
-struct cdf_tso_seg_t {
-	struct cdf_tso_flags_t tso_flags;
-/* ------------------------------------------------------------------- */
+struct qdf_tso_seg_t {
+	struct qdf_tso_flags_t tso_flags;
 	uint32_t num_frags;
-	struct cdf_tso_frag_t tso_frags[FRAG_NUM_MAX];
+	struct qdf_tso_frag_t tso_frags[FRAG_NUM_MAX];
 };
 
-struct cdf_tso_seg_elem_t {
-	struct cdf_tso_seg_t seg;
-	struct cdf_tso_seg_elem_t *next;
+/**
+ * qdf_tso_seg_elem_t - tso segment element
+ * @seg: instance of segment
+ * @next: pointer to the next segment
+ */
+struct qdf_tso_seg_elem_t {
+	struct qdf_tso_seg_t seg;
+	struct qdf_tso_seg_elem_t *next;
 };
 
 /**
- * struct cdf_tso_info_t - TSO information extracted
+ * struct qdf_tso_info_t - TSO information extracted
  * @is_tso: is this is a TSO frame
  * @num_segs: number of segments
  * @total_len: total length of the packet
@@ -480,42 +510,43 @@ struct cdf_tso_seg_elem_t {
  * jumbo network buffer. It contains a chain of the TSO segments belonging to
  * the jumbo packet
  */
-struct cdf_tso_info_t {
+struct qdf_tso_info_t {
 	uint8_t is_tso;
 	uint32_t num_segs;
 	uint32_t total_len;
-	struct cdf_tso_seg_elem_t *tso_seg_list;
-	struct cdf_tso_seg_elem_t *curr_seg;
+	struct qdf_tso_seg_elem_t *tso_seg_list;
+	struct qdf_tso_seg_elem_t *curr_seg;
 };
 
 /**
  * Used to set classify bit in CE desc.
  */
-#define CDF_CE_TX_CLASSIFY_BIT_S	5
+#define QDF_CE_TX_CLASSIFY_BIT_S   5
 
 /**
- * 2 bits starting at bit 6 in CE desc.
+ * QDF_CE_TX_PKT_TYPE_BIT_S - 2 bits starting at bit 6 in CE desc.
  */
-#define CDF_CE_TX_PKT_TYPE_BIT_S	6
+#define QDF_CE_TX_PKT_TYPE_BIT_S   6
 
 /**
- * 12 bits --> 16-27, in the CE desciptor, the length of HTT/HTC descriptor
+ * QDF_CE_TX_PKT_OFFSET_BIT_S - 12 bits --> 16-27, in the CE desciptor
+ *  the length of HTT/HTC descriptor
  */
-#define CDF_CE_TX_PKT_OFFSET_BIT_S	16
+#define QDF_CE_TX_PKT_OFFSET_BIT_S  16
 
 /**
- * Mask for packet offset in the CE descriptor.
+ * QDF_CE_TX_PKT_OFFSET_BIT_M - Mask for packet offset in the CE descriptor.
  */
-#define CDF_CE_TX_PKT_OFFSET_BIT_M	0x0fff0000
+#define QDF_CE_TX_PKT_OFFSET_BIT_M   0x0fff0000
 
 /**
- * enum cdf_suspend_type - type of suspend
- * CDF_SYSTEM_SUSPEND: System suspend triggered wlan suspend
- * CDF_RUNTIME_SUSPEND: Runtime pm inactivity timer triggered wlan suspend
+ * enum qdf_suspend_type - type of suspend
+ * @QDF_SYSTEM_SUSPEND: System suspend triggered wlan suspend
+ * @QDF_RUNTIME_SUSPEND: Runtime pm inactivity timer triggered wlan suspend
  */
-enum cdf_suspend_type {
-	CDF_SYSTEM_SUSPEND,
-	CDF_RUNTIME_SUSPEND
+enum qdf_suspend_type {
+	QDF_SYSTEM_SUSPEND,
+	QDF_RUNTIME_SUSPEND
 };
 
-#endif /* if !defined __CDF_TYPES_H */
+#endif /* __QDF_TYPES_H */

+ 193 - 167
qdf/inc/qdf_util.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,249 +26,203 @@
  */
 
 /**
- * DOC: cdf_util.h
- *
+ * DOC: qdf_util.h
  * This file defines utility functions.
  */
 
-#ifndef _CDF_UTIL_H
-#define _CDF_UTIL_H
+#ifndef _QDF_UTIL_H
+#define _QDF_UTIL_H
 
-#include <i_cdf_util.h>
+#include <i_qdf_util.h>
 
 /**
- * cdf_unlikely - Compiler-dependent macro denoting code likely to execute
+ * qdf_unlikely - Compiler-dependent macro denoting code likely to execute
  * @_expr: expression to be checked
  */
-#define cdf_unlikely(_expr)     __cdf_unlikely(_expr)
+#define qdf_unlikely(_expr)     __qdf_unlikely(_expr)
 
 /**
- * cdf_likely - Compiler-dependent macro denoting code unlikely to execute
+ * qdf_likely - Compiler-dependent macro denoting code unlikely to execute
  * @_expr: expression to be checked
  */
-#define cdf_likely(_expr)       __cdf_likely(_expr)
-
-CDF_INLINE_FN int cdf_status_to_os_return(CDF_STATUS status)
-{
-	return __cdf_status_to_os_return(status);
-}
+#define qdf_likely(_expr)       __qdf_likely(_expr)
 
 /**
- * cdf_assert - assert "expr" evaluates to false
- * @expr: assert expression
+ * qdf_mb - read + write memory barrier.
  */
-#ifdef CDF_OS_DEBUG
-#define cdf_assert(expr)         __cdf_assert(expr)
-#else
-#define cdf_assert(expr)
-#endif /* CDF_OS_DEBUG */
+#define qdf_mb()                 __qdf_mb()
 
 /**
- * @cdf_assert_always- alway assert "expr" evaluates to false
- * @expr: assert expression
+ * qdf_assert - assert "expr" evaluates to false.
  */
-#define cdf_assert_always(expr)  __cdf_assert(expr)
-
-/**
- * cdf_os_cpu_to_le64 - Convert a 64-bit value from CPU byte order to
- *			little-endian byte order
- * @x: value to be converted
- */
-#define cdf_os_cpu_to_le64(x)                   __cdf_os_cpu_to_le64(x)
+#ifdef QDF_DEBUG
+#define qdf_assert(expr)         __qdf_assert(expr)
+#else
+#define qdf_assert(expr)
+#endif /* QDF_DEBUG */
 
 /**
- * cdf_le16_to_cpu - Convert a 16-bit value from little-endian byte order
- *			to CPU byte order
- * @x: value to be converted
+ * qdf_assert_always - alway assert "expr" evaluates to false.
  */
-#define cdf_le16_to_cpu(x)                   __cdf_le16_to_cpu(x)
+#define qdf_assert_always(expr)  __qdf_assert(expr)
 
 /**
- * cdf_le32_to_cpu - Convert a 32-bit value from little-endian byte order to
- *			CPU byte order
- * @x: value to be converted
+ * qdf_target_assert_always - alway target assert "expr" evaluates to false.
  */
-#define cdf_le32_to_cpu(x)                   __cdf_le32_to_cpu(x)
+#define qdf_target_assert_always(expr)  __qdf_target_assert(expr)
 
 /**
- * cdf_in_interrupt - returns true if in interrupt context
+ * qdf_status_to_os_return - returns the status to OS.
+ * @status: enum QDF_STATUS
+ *
+ * returns: int status success/failure
  */
-#define cdf_in_interrupt          in_interrupt
+static inline int qdf_status_to_os_return(QDF_STATUS status)
+{
+	return __qdf_status_to_os_return(status);
+}
 
 /**
- * cdf_container_of - cast a member of a structure out to the containing
- *                    structure
- * @ptr:        the pointer to the member.
- * @type:       the type of the container struct this is embedded in.
- * @member:     the name of the member within the struct.
- *
+ * qdf_container_of - cast a member of a structure out to the containing
+ * structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
  */
-#define cdf_container_of(ptr, type, member) \
-	 __cdf_container_of(ptr, type, member)
+#define qdf_container_of(ptr, type, member) \
+	 __qdf_container_of(ptr, type, member)
 
 /**
- * cdf_is_pwr2 - test input value is power of 2 integer
- *
+ * qdf_is_pwr2 - test input value is power of 2 integer
  * @value: input integer
- *
  */
-#define CDF_IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
+#define QDF_IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
+
 
 /**
- * cdf_is_macaddr_equal() - compare two CDF MacAddress
- * @pMacAddr1: Pointer to one cdf MacAddress to compare
- * @pMacAddr2: Pointer to the other cdf MacAddress to compare
+ * qdf_is_macaddr_equal() - compare two QDF MacAddress
+ * @mac_addr1: Pointer to one qdf MacAddress to compare
+ * @mac_addr2: Pointer to the other qdf MacAddress to compare
  *
- * This function returns a bool that tells if a two CDF MacAddress'
+ * This function returns a bool that tells if a two QDF MacAddress'
  * are equivalent.
  *
  * Return: true if the MacAddress's are equal
- *	not true if the MacAddress's are not equal
+ * not true if the MacAddress's are not equal
  */
-CDF_INLINE_FN bool cdf_is_macaddr_equal(struct cdf_mac_addr *pMacAddr1,
-					struct cdf_mac_addr *pMacAddr2)
+static inline bool qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1,
+					struct qdf_mac_addr *mac_addr2)
 {
-	return 0 == memcmp(pMacAddr1, pMacAddr2, CDF_MAC_ADDR_SIZE);
+	return __qdf_is_macaddr_equal(mac_addr1, mac_addr2);
 }
 
+
 /**
- * cdf_is_macaddr_zero() - check for a MacAddress of all zeros.
- * @pMacAddr - pointer to the struct cdf_mac_addr to check.
+ * qdf_is_macaddr_zero() - check for a MacAddress of all zeros.
+ * @mac_addr: pointer to the struct qdf_mac_addr to check.
  *
  * This function returns a bool that tells if a MacAddress is made up of
  * all zeros.
  *
- *
- * Return:  true if the MacAddress is all Zeros
- *	false if the MacAddress is not all Zeros.
- *
+ * Return: true if the MacAddress is all Zeros
+ * false if the MacAddress is not all Zeros.
  */
-CDF_INLINE_FN bool cdf_is_macaddr_zero(struct cdf_mac_addr *pMacAddr)
+static inline bool qdf_is_macaddr_zero(struct qdf_mac_addr *mac_addr)
 {
-	struct cdf_mac_addr zeroMacAddr = CDF_MAC_ADDR_ZERO_INITIALIZER;
-
-	return cdf_is_macaddr_equal(pMacAddr, &zeroMacAddr);
+	struct qdf_mac_addr zero_mac_addr = QDF_MAC_ADDR_ZERO_INITIALIZER;
+	return qdf_is_macaddr_equal(mac_addr, &zero_mac_addr);
 }
 
 /**
- * cdf_zero_macaddr() - zero out a MacAddress
- * @pMacAddr: pointer to the struct cdf_mac_addr to zero.
+ * qdf_zero_macaddr() - zero out a MacAddress
+ * @mac_addr: pointer to the struct qdf_mac_addr to zero.
  *
- * This function zeros out a CDF MacAddress type.
+ * This function zeros out a QDF MacAddress type.
  *
- * Return: nothing
+ * Return: none
  */
-CDF_INLINE_FN void cdf_zero_macaddr(struct cdf_mac_addr *pMacAddr)
+static inline void qdf_zero_macaddr(struct qdf_mac_addr *mac_addr)
 {
-	memset(pMacAddr, 0, CDF_MAC_ADDR_SIZE);
+	__qdf_zero_macaddr(mac_addr);
 }
 
+
 /**
- * cdf_is_macaddr_group() - check for a MacAddress is a 'group' address
- * @pMacAddr1: pointer to the cdf MacAddress to check
+ * qdf_is_macaddr_group() - check for a MacAddress is a 'group' address
+ * @mac_addr1: pointer to the qdf MacAddress to check
  *
- * This function returns a bool that tells if a the input CDF MacAddress
- * is a "group" address.  Group addresses have the 'group address bit' turned
- * on in the MacAddress.  Group addresses are made up of Broadcast and
+ * This function returns a bool that tells if a the input QDF MacAddress
+ * is a "group" address. Group addresses have the 'group address bit' turned
+ * on in the MacAddress. Group addresses are made up of Broadcast and
  * Multicast addresses.
  *
- * Return:  true if the input MacAddress is a Group address
- *	false if the input MacAddress is not a Group address
+ * Return: true if the input MacAddress is a Group address
+ * false if the input MacAddress is not a Group address
  */
-CDF_INLINE_FN bool cdf_is_macaddr_group(struct cdf_mac_addr *pMacAddr)
+static inline bool qdf_is_macaddr_group(struct qdf_mac_addr *mac_addr)
 {
-	return pMacAddr->bytes[0] & 0x01;
+	return mac_addr->bytes[0] & 0x01;
 }
 
+
 /**
- * cdf_is_macaddr_broadcast() - check for a MacAddress is a broadcast address
+ * qdf_is_macaddr_broadcast() - check for a MacAddress is a broadcast address
+ * @mac_addr: Pointer to the qdf MacAddress to check
  *
- * This function returns a bool that tells if a the input CDF MacAddress
+ * This function returns a bool that tells if a the input QDF MacAddress
  * is a "broadcast" address.
  *
- * @pMacAddr: Pointer to the cdf MacAddress to check
- *
- * Return:  true if the input MacAddress is a broadcast address
- *	false if the input MacAddress is not a broadcast address
+ * Return: true if the input MacAddress is a broadcast address
+ * flase if the input MacAddress is not a broadcast address
  */
-CDF_INLINE_FN bool cdf_is_macaddr_broadcast(struct cdf_mac_addr *pMacAddr)
+static inline bool qdf_is_macaddr_broadcast(struct qdf_mac_addr *mac_addr)
 {
-	struct cdf_mac_addr broadcastMacAddr =
-					CDF_MAC_ADDR_BROADCAST_INITIALIZER;
-
-	return cdf_is_macaddr_equal(pMacAddr, &broadcastMacAddr);
+	struct qdf_mac_addr broadcast_mac_addr =
+		QDF_MAC_ADDR_BROADCAST_INITIALIZER;
+	return qdf_is_macaddr_equal(mac_addr, &broadcast_mac_addr);
 }
 
 /**
- * cdf_copy_macaddr() - copy a CDF MacAddress
- * @pDst - pointer to the cdf MacAddress to copy TO (the destination)
- * @pSrc - pointer to the cdf MacAddress to copy FROM (the source)
- *
- * This function copies a CDF MacAddress into another CDF MacAddress.
+ * qdf_copy_macaddr() - copy a QDF MacAddress
+ * @dst_addr: pointer to the qdf MacAddress to copy TO (the destination)
+ * @src_addr: pointer to the qdf MacAddress to copy FROM (the source)
  *
+ * This function copies a QDF MacAddress into another QDF MacAddress.
  *
- * Return: nothing
+ * Return: none
  */
-CDF_INLINE_FN void cdf_copy_macaddr(struct cdf_mac_addr *pDst,
-				    struct cdf_mac_addr *pSrc)
+static inline void qdf_copy_macaddr(struct qdf_mac_addr *dst_addr,
+				    struct qdf_mac_addr *src_addr)
 {
-	*pDst = *pSrc;
+	*dst_addr = *src_addr;
 }
 
 /**
- * cdf_set_macaddr_broadcast() - set a CDF MacAddress to the 'broadcast'
- * @pMacAddr: pointer to the cdf MacAddress to set to broadcast
+ * qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast'
+ * @mac_addr: pointer to the qdf MacAddress to set to broadcast
  *
- * This function sets a CDF MacAddress to the 'broadcast' MacAddress. Broadcast
+ * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast
  * MacAddress contains all 0xFF bytes.
  *
- * Return: nothing
+ * Return: none
  */
-CDF_INLINE_FN void cdf_set_macaddr_broadcast(struct cdf_mac_addr *pMacAddr)
+static inline void qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr)
 {
-	memset(pMacAddr, 0xff, CDF_MAC_ADDR_SIZE);
+	__qdf_set_macaddr_broadcast(mac_addr);
 }
 
-#if defined(ANI_LITTLE_BYTE_ENDIAN)
-
-/**
- * i_cdf_htonl() - convert from host byte order to network byte order
- * @ul: input to be converted
- *
- * Return: converted network byte order
- */
-CDF_INLINE_FN unsigned long i_cdf_htonl(unsigned long ul)
-{
-	return ((ul & 0x000000ff) << 24) |
-		((ul & 0x0000ff00) << 8) |
-		((ul & 0x00ff0000) >> 8) | ((ul & 0xff000000) >> 24);
-}
-
-/**
- * i_cdf_ntohl() - convert network byte order to host byte order
- * @ul: input to be converted
- *
- * Return: converted host byte order
- */
-CDF_INLINE_FN unsigned long i_cdf_ntohl(unsigned long ul)
-{
-	return i_cdf_htonl(ul);
-}
-
-#endif
-
 /**
- * cdf_set_u16() - Assign 16-bit unsigned value to a byte array base on CPU's
- *			endianness.
+ * qdf_set_u16() - Assign 16-bit unsigned value to a byte array base on CPU's
+ * endianness.
  * @ptr: Starting address of a byte array
  * @value: The value to assign to the byte array
  *
  * Caller must validate the byte array has enough space to hold the vlaue
  *
  * Return: The address to the byte after the assignment. This may or may not
- *	be valid. Caller to verify.
+ * be valid. Caller to verify.
  */
-CDF_INLINE_FN uint8_t *cdf_set_u16(uint8_t *ptr, uint16_t value)
+static inline uint8_t *qdf_set_u16(uint8_t *ptr, uint16_t value)
 {
 #if defined(ANI_BIG_BYTE_ENDIAN)
 	*(ptr) = (uint8_t) (value >> 8);
@@ -277,53 +231,51 @@ CDF_INLINE_FN uint8_t *cdf_set_u16(uint8_t *ptr, uint16_t value)
 	*(ptr + 1) = (uint8_t) (value >> 8);
 	*(ptr) = (uint8_t) (value);
 #endif
-
 	return ptr + 2;
 }
 
 /**
- * cdf_get_u16() - Retrieve a 16-bit unsigned value from a byte array base on
- *			CPU's endianness.
+ * qdf_get_u16() - Retrieve a 16-bit unsigned value from a byte array base on
+ * CPU's endianness.
  * @ptr: Starting address of a byte array
- * @pValue: Pointer to a caller allocated buffer for 16 bit value. Value is to
- *		assign to this location.
+ * @value: Pointer to a caller allocated buffer for 16 bit value. Value is to
+ * assign to this location.
  *
  * Caller must validate the byte array has enough space to hold the vlaue
  *
  * Return: The address to the byte after the assignment. This may or may not
- *	be valid. Caller to verify.
+ * be valid. Caller to verify.
  */
-CDF_INLINE_FN uint8_t *cdf_get_u16(uint8_t *ptr, uint16_t *pValue)
+static inline uint8_t *qdf_get_u16(uint8_t *ptr, uint16_t *value)
 {
 #if defined(ANI_BIG_BYTE_ENDIAN)
-	*pValue = (((uint16_t) (*ptr << 8)) | ((uint16_t) (*(ptr + 1))));
+	*value = (((uint16_t) (*ptr << 8)) | ((uint16_t) (*(ptr + 1))));
 #else
-	*pValue = (((uint16_t) (*(ptr + 1) << 8)) | ((uint16_t) (*ptr)));
+	*value = (((uint16_t) (*(ptr + 1) << 8)) | ((uint16_t) (*ptr)));
 #endif
-
 	return ptr + 2;
 }
 
 /**
- * cdf_get_u32() - retrieve a 32-bit unsigned value from a byte array base on
- *			CPU's endianness.
+ * qdf_get_u32() - retrieve a 32-bit unsigned value from a byte array base on
+ * CPU's endianness.
  * @ptr: Starting address of a byte array
- * @pValue: Pointer to a caller allocated buffer for 32 bit value. Value is to
- *		assign to this location.
+ * @value: Pointer to a caller allocated buffer for 32 bit value. Value is to
+ * assign to this location.
  *
  * Caller must validate the byte array has enough space to hold the vlaue
  *
  * Return: The address to the byte after the assignment. This may or may not
- *		be valid. Caller to verify.
+ * be valid. Caller to verify.
  */
-CDF_INLINE_FN uint8_t *cdf_get_u32(uint8_t *ptr, uint32_t *pValue)
+static inline uint8_t *qdf_get_u32(uint8_t *ptr, uint32_t *value)
 {
 #if defined(ANI_BIG_BYTE_ENDIAN)
-	*pValue = ((uint32_t) (*(ptr) << 24) |
+	*value = ((uint32_t) (*(ptr) << 24) |
 		   (uint32_t) (*(ptr + 1) << 16) |
 		   (uint32_t) (*(ptr + 2) << 8) | (uint32_t) (*(ptr + 3)));
 #else
-	*pValue = ((uint32_t) (*(ptr + 3) << 24) |
+	*value = ((uint32_t) (*(ptr + 3) << 24) |
 		   (uint32_t) (*(ptr + 2) << 16) |
 		   (uint32_t) (*(ptr + 1) << 8) | (uint32_t) (*(ptr)));
 #endif
@@ -331,17 +283,91 @@ CDF_INLINE_FN uint8_t *cdf_get_u32(uint8_t *ptr, uint32_t *pValue)
 }
 
 /**
- * cdf_get_pwr2() - get next power of 2 integer from input value
+ * qdf_ntohs - Convert a 16-bit value from network byte order to host byte order
+ */
+#define qdf_ntohs(x)                         __qdf_ntohs(x)
+
+/**
+ * qdf_ntohl - Convert a 32-bit value from network byte order to host byte order
+ */
+#define qdf_ntohl(x)                         __qdf_ntohl(x)
+
+/**
+ * qdf_htons - Convert a 16-bit value from host byte order to network byte order
+ */
+#define qdf_htons(x)                         __qdf_htons(x)
+
+/**
+ * qdf_htonl - Convert a 32-bit value from host byte order to network byte order
+ */
+#define qdf_htonl(x)                         __qdf_htonl(x)
+
+/**
+ * qdf_cpu_to_le16 - Convert a 16-bit value from CPU byte order to
+ * little-endian byte order
+ */
+#define qdf_cpu_to_le16(x)                   __qdf_cpu_to_le16(x)
+
+/**
+ * qdf_cpu_to_le32 - Convert a 32-bit value from CPU byte order to
+ * little-endian byte order
+ */
+#define qdf_cpu_to_le32(x)                   __qdf_cpu_to_le32(x)
+
+/**
+ * qdf_cpu_to_le64 - Convert a 64-bit value from CPU byte order to
+ * little-endian byte order
+ */
+#define qdf_cpu_to_le64(x)                   __qdf_cpu_to_le64(x)
+
+/**
+ * qdf_be32_to_cpu - Convert a 32-bit value from big-endian byte order
+ * to CPU byte order
+ */
+#define qdf_be32_to_cpu(x)                   __qdf_be32_to_cpu(x)
+
+/**
+ * qdf_be64_to_cpu - Convert a 64-bit value from big-endian byte order
+ * to CPU byte order
+ */
+#define qdf_be64_to_cpu(x)                   __qdf_be64_to_cpu(x)
+
+/**
+ * qdf_le32_to_cpu - Convert a 32-bit value from little-endian byte
+ * order to CPU byte order
+ */
+#define qdf_le32_to_cpu(x)                   __qdf_le32_to_cpu(x)
+
+/**
+ * qdf_le64_to_cpu - Convert a 64-bit value from little-endian byte
+ * order to CPU byte order
+ */
+#define qdf_le64_to_cpu(x)                   __qdf_le64_to_cpu(x)
+
+/**
+ * qdf_le16_to_cpu - Convert a 16-bit value from little-endian byte order
+ * to CPU byte order
+ * @x: value to be converted
+ */
+#define qdf_le16_to_cpu(x)                   __qdf_le16_to_cpu(x)
+
+/**
+ * qdf_function - replace with the name of the current function
+ */
+#define qdf_function             __qdf_function
+
+/**
+ * qdf_get_pwr2() - get next power of 2 integer from input value
  * @value: input value to find next power of 2 integer
  *
  * Get next power of 2 integer from input value
  *
  * Return: Power of 2 integer
  */
-CDF_INLINE_FN int cdf_get_pwr2(int value)
+static inline int qdf_get_pwr2(int value)
 {
 	int log2;
-	if (CDF_IS_PWR2(value))
+	if (QDF_IS_PWR2(value))
 		return value;
 
 	log2 = 0;
@@ -352,4 +378,4 @@ CDF_INLINE_FN int cdf_get_pwr2(int value)
 	return 1 << log2;
 }
 
-#endif /*_CDF_UTIL_H*/
+#endif /*_QDF_UTIL_H*/

+ 200 - 0
qdf/linux/src/i_osdep.h

@@ -0,0 +1,200 @@
+/*
+ * Copyright (q) 2013-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ *  DOC: i_osdep
+ *  QCA driver framework OS dependent types
+ */
+
+#ifndef _I_OSDEP_H
+#define _I_OSDEP_H
+
+#ifdef CONFIG_MCL
+#include <cds_queue.h>
+#include <cds_if_upperproto.h>
+#else
+#include <sys/queue.h>
+#endif
+
+/**
+ * enum qdf_bus_type - Supported Bus types
+ * @QDF_BUS_TYPE_PCI: PCI Bus
+ * @QDF_BUS_TYPE_AHB: AHB Bus
+ * @QDF_BUS_TYPE_SNOC: SNOC Bus
+ * @QDF_BUS_TYPE_SIM: Simulator
+ */
+enum qdf_bus_type {
+	QDF_BUS_TYPE_PCI,
+	QDF_BUS_TYPE_AHB,
+	QDF_BUS_TYPE_SNOC,
+	QDF_BUS_TYPE_SIM
+};
+
+/*
+ * Byte Order stuff
+ */
+#define    le16toh(_x)    le16_to_cpu(_x)
+#define    htole16(_x)    cpu_to_le16(_x)
+#define    htobe16(_x)    cpu_to_be16(_x)
+#define    le32toh(_x)    le32_to_cpu(_x)
+#define    htole32(_x)    cpu_to_le32(_x)
+#define    be16toh(_x)    be16_to_cpu(_x)
+#define    be32toh(_x)    be32_to_cpu(_x)
+#define    htobe32(_x)    cpu_to_be32(_x)
+
+typedef struct timer_list os_timer_t;
+
+#ifdef CONFIG_SMP
+/* Undo the one provided by the kernel to debug spin locks */
+#undef spin_lock
+#undef spin_unlock
+#undef spin_trylock
+
+#define spin_lock(x) \
+	do { \
+		spin_lock_bh(x); \
+	} while (0)
+
+#define spin_unlock(x) \
+	do { \
+		if (!spin_is_locked(x)) { \
+			WARN_ON(1); \
+			printk(KERN_EMERG " %s:%d unlock addr=%p, %s \n", __func__,  __LINE__, x, \
+			       !spin_is_locked(x) ? "Not locked" : "");	\
+		} \
+		spin_unlock_bh(x); \
+	} while (0)
+#define spin_trylock(x) spin_trylock_bh(x)
+#define OS_SUPPORT_ASYNC_Q 1    /* support for handling asyn function calls */
+
+#else
+#define OS_SUPPORT_ASYNC_Q 0
+#endif /* ifdef CONFIG_SMP */
+
+/**
+ * struct os_mest_t - maintain attributes of message
+ * @mesg_next: pointer to the nexgt message
+ * @mest_type: type of message
+ * @mesg_len: length of the message
+ */
+typedef struct _os_mesg_t {
+	STAILQ_ENTRY(_os_mesg_t) mesg_next;
+	uint16_t mesg_type;
+	uint16_t mesg_len;
+} os_mesg_t;
+
+/**
+ * struct qdf_bus_context - Bus to hal context handoff
+ * @bc_tag: bus context tag
+ * @bc_handle: bus context handle
+ * @bc_bustype: bus type
+ */
+typedef struct qdf_bus_context {
+	int bc_tag;
+	char *bc_handle;
+	enum qdf_bus_type bc_bustype;
+} QDF_BUS_CONTEXT;
+
+typedef struct _NIC_DEV *osdev_t;
+
+typedef void (*os_mesg_handler_t)(void *ctx, uint16_t mesg_type,
+				  uint16_t mesg_len,
+				  void *mesg);
+
+
+/**
+ * typedef os_mesg_queue_t - Object to maintain message queue
+ * @dev_handle: OS handle
+ * @num_queued: number of queued messages
+ * @mesg_len: message length
+ * @mesg_queue_buf: pointer to message queue buffer
+ * @mesg_head: queued mesg buffers
+ * @mesg_free_head: free mesg buffers
+ * @lock: spinlock object
+ * @ev_handler_lock: spinlock object to event handler
+ * @task: pointer to task
+ * @_timer: instance of timer
+ * @handler: message handler
+ * @ctx: pointer to context
+ * @is_synchronous: bit to save synchronous status
+ */
+typedef struct {
+	osdev_t dev_handle;
+	int32_t num_queued;
+	int32_t mesg_len;
+	uint8_t *mesg_queue_buf;
+	STAILQ_HEAD(, _os_mesg_t) mesg_head;
+	STAILQ_HEAD(, _os_mesg_t) mesg_free_head;
+	spinlock_t lock;
+	spinlock_t ev_handler_lock;
+#ifdef USE_SOFTINTR
+	void *_task;
+#else
+	os_timer_t _timer;
+#endif
+	os_mesg_handler_t handler;
+	void *ctx;
+	uint8_t is_synchronous:1;
+} os_mesg_queue_t;
+
+/**
+ * struct _NIC_DEV - Definition of OS-dependent device structure.
+ * It'll be opaque to the actual ATH layer.
+ * @bdev: bus device handle
+ * @netdev: net device handle (wifi%d)
+ * @intr_tq: tasklet
+ * @devstats: net device statistics
+ * @bc: hal bus context
+ * @device: generic device
+ * @event_queue: instance to wait queue
+ * @is_device_asleep: keep device status, sleep or awake
+ */
+struct _NIC_DEV {
+	void *bdev;
+	struct net_device *netdev;
+	qdf_bh_t intr_tq;
+	struct net_device_stats devstats;
+	QDF_BUS_CONTEXT bc;
+#ifdef ATH_PERF_PWR_OFFLOAD
+	struct device *device;
+	wait_queue_head_t event_queue;
+#endif /* PERF_PWR_OFFLOAD */
+#if OS_SUPPORT_ASYNC_Q
+	os_mesg_queue_t async_q;
+#endif
+#ifdef ATH_BUS_PM
+	uint8_t is_device_asleep;
+#endif /* ATH_BUS_PM */
+};
+
+#define __QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer, lenp, ppos) \
+	proc_dointvec(ctl, write, buffer, lenp, ppos)
+
+#define __QDF_SYSCTL_PROC_DOSTRING(ctl, write, filp, buffer, lenp, ppos) \
+	proc_dostring(ctl, write, filp, buffer, lenp, ppos)
+
+#endif /* _I_OSDEP_H */

+ 146 - 0
qdf/linux/src/i_qdf_atomic.h

@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_atomic.h
+ * This file provides OS dependent atomic APIs.
+ */
+
+#ifndef I_QDF_ATOMIC_H
+#define I_QDF_ATOMIC_H
+
+#include <qdf_status.h>         /* QDF_STATUS */
+#include <linux/atomic.h>
+
+typedef atomic_t __qdf_atomic_t;
+
+/**
+ * __qdf_atomic_init() - initialize an atomic type variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS __qdf_atomic_init(__qdf_atomic_t *v)
+{
+	atomic_set(v, 0);
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_atomic_read() - read the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: The current value of the variable
+ */
+static inline int32_t __qdf_atomic_read(__qdf_atomic_t *v)
+{
+	return atomic_read(v);
+}
+
+/**
+ * __qdf_atomic_inc() - increment the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_inc(__qdf_atomic_t *v)
+{
+	atomic_inc(v);
+}
+
+/**
+ * __qdf_atomic_dec() - decrement the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_dec(__qdf_atomic_t *v)
+{
+	atomic_dec(v);
+}
+
+/**
+ * __qdf_atomic_add() - add a value to the value of an atomic variable
+ * @i: The amount by which to increase the atomic counter
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_add(int i, __qdf_atomic_t *v)
+{
+	atomic_add(i, v);
+}
+
+/**
+ * __qdf_atomic_sub() - Subtract a value from an atomic variable
+ * @i: the amount by which to decrease the atomic counter
+ * @v: a pointer to an opaque atomic variable
+ *
+ * Return: none
+ */
+static inline void __qdf_atomic_sub(int i, __qdf_atomic_t *v)
+{
+	atomic_sub(i, v);
+}
+
+/**
+ * __qdf_atomic_dec_and_test() - decrement an atomic variable and check if the
+ * new value is zero
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return:
+ * true (non-zero) if the new value is zero,
+ * false (0) if the new value is non-zero
+ */
+static inline int32_t __qdf_atomic_dec_and_test(__qdf_atomic_t *v)
+{
+	return atomic_dec_and_test(v);
+}
+
+/**
+ * __qdf_atomic_set() - set a value to the value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: None
+ */
+static inline void __qdf_atomic_set(__qdf_atomic_t *v, int i)
+{
+	atomic_set(v, i);
+}
+
+/**
+ * __qdf_atomic_inc_return() - return the incremented value of an atomic variable
+ * @v: A pointer to an opaque atomic variable
+ *
+ * Return: The current value of the variable
+ */
+static inline int32_t __qdf_atomic_inc_return(__qdf_atomic_t *v)
+{
+	return atomic_inc_return(v);
+}
+
+#endif

+ 323 - 0
qdf/linux/src/i_qdf_defer.h

@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_defer.h
+ * This file provides OS dependent deferred API's.
+ */
+
+#ifndef _I_QDF_DEFER_H
+#define _I_QDF_DEFER_H
+
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_trace.h>
+
+typedef struct tasklet_struct __qdf_bh_t;
+typedef struct workqueue_struct __qdf_workqueue_t;
+
+#if LINUX_VERSION_CODE  <= KERNEL_VERSION(2, 6, 19)
+typedef struct work_struct      __qdf_work_t;
+typedef struct work_struct      __qdf_delayed_work_t;
+#else
+
+/**
+ * __qdf_work_t - wrapper around the real task func
+ * @work: Instance of work
+ * @fn: function pointer to the handler
+ * @arg: pointer to argument
+ */
+typedef struct {
+	struct work_struct   work;
+	qdf_defer_fn_t    fn;
+	void                 *arg;
+} __qdf_work_t;
+
+/**
+ * __qdf_delayed_work_t - wrapper around the real work func
+ * @dwork: Instance of delayed work
+ * @fn: function pointer to the handler
+ * @arg: pointer to argument
+ */
+typedef struct {
+	struct delayed_work  dwork;
+	qdf_defer_fn_t    fn;
+	void                 *arg;
+} __qdf_delayed_work_t;
+
+extern void __qdf_defer_func(struct work_struct *work);
+extern void __qdf_defer_delayed_func(struct work_struct *work);
+#endif
+
+typedef void (*__qdf_bh_fn_t)(unsigned long arg);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+/**
+ * __qdf_init_work - Initialize a work/task queue, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @hdl: OS handle
+ * @work: pointer to work
+ * @func: deferred function to run at bottom half non-interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline QDF_STATUS __qdf_init_work(qdf_handle_t hdl,
+					 __qdf_work_t *work,
+					 qdf_defer_fn_t func, void *arg)
+{
+	/*Initilize func and argument in work struct */
+	INIT_WORK(&work->work, __qdf_defer_func);
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_init_delayed_work - create a work/task, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @hdl: OS handle
+ * @work: pointer to work
+ * @func: deferred function to run at bottom half non-interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline uint32_t __qdf_init_delayed_work(qdf_handle_t hdl,
+					       __qdf_delayed_work_t *work,
+					       qdf_defer_fn_t func, void *arg)
+{
+	INIT_WORK(work, func, arg);
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_queue_work - Queue the work/task
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * @work: pointer to work
+ * Return: none
+ */
+static inline void __qdf_queue_work(qdf_handle_t hdl,
+				    __qdf_workqueue_t *wqueue,
+				    __qdf_work_t *work)
+{
+	queue_work(wqueue, work);
+}
+
+/**
+ * __qdf_queue_delayed_work - Queue the delayed work/task
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * @work: pointer to work
+ * @delay: delay interval
+ * Return: none
+ */
+static inline void __qdf_queue_delayed_work(qdf_handle_t hdl,
+					    __qdf_workqueue_t *wqueue,
+					    __qdf_delayed_work_t *work,
+					    uint32_t delay)
+{
+	queue_delayed_work(wqueue, work, delay);
+}
+
+/**
+ * __qdf_sched_work - Schedule a deferred task on non-interrupt context
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Retrun: none
+ */
+static inline QDF_STATUS __qdf_sched_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+	schedule_work(work);
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_flush_work - Flush a deferred task on non-interrupt context
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: none
+ */
+static inline uint32_t __qdf_flush_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+	flush_work(work);
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS __qdf_init_work(qdf_handle_t hdl,
+					 __qdf_work_t *work,
+					 qdf_defer_fn_t func, void *arg)
+{
+	work->fn = func;
+	work->arg = arg;
+#ifdef CONFIG_CNSS
+	cnss_init_work(&work->work, __qdf_defer_func);
+#else
+	INIT_WORK(&work->work, __qdf_defer_func);
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline uint32_t __qdf_init_delayed_work(qdf_handle_t hdl,
+					       __qdf_delayed_work_t *work,
+					       qdf_defer_fn_t func, void *arg)
+{
+	/*Initilize func and argument in work struct */
+	work->fn = func;
+	work->arg = arg;
+	INIT_DELAYED_WORK(&work->dwork, __qdf_defer_delayed_func);
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline void __qdf_queue_work(qdf_handle_t hdl,
+				    __qdf_workqueue_t *wqueue,
+				    __qdf_work_t *work)
+{
+	queue_work(wqueue, &work->work);
+}
+
+static inline void __qdf_queue_delayed_work(qdf_handle_t hdl,
+					    __qdf_workqueue_t *wqueue,
+					    __qdf_delayed_work_t *work,
+					    uint32_t delay)
+{
+	queue_delayed_work(wqueue, &work->dwork, delay);
+}
+
+static inline QDF_STATUS __qdf_sched_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+	schedule_work(&work->work);
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline uint32_t __qdf_flush_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+	flush_work(&work->work);
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * __qdf_create_workqueue - create a workqueue, This runs in non-interrupt
+ * context, so can be preempted by H/W & S/W intr
+ * @name: string
+ * Return: pointer of type qdf_workqueue_t
+ */
+static inline __qdf_workqueue_t *__qdf_create_workqueue(char *name)
+{
+	return create_workqueue(name);
+}
+
+/**
+ * __qdf_flush_workqueue - flush the workqueue
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * Return: none
+ */
+static inline void __qdf_flush_workqueue(qdf_handle_t hdl,
+	__qdf_workqueue_t *wqueue)
+{
+	flush_workqueue(wqueue);
+}
+
+/**
+ * __qdf_destroy_workqueue - Destroy the workqueue
+ * @hdl: OS handle
+ * @wqueue: pointer to workqueue
+ * Return: none
+ */
+static inline void __qdf_destroy_workqueue(qdf_handle_t hdl,
+	 __qdf_workqueue_t *wqueue)
+{
+	destroy_workqueue(wqueue);
+}
+
+/**
+ * __qdf_init_bh - creates the Bottom half deferred handler
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * @func: deferred function to run at bottom half interrupt context.
+ * @arg: argument for the deferred function
+ * Return: none
+ */
+static inline QDF_STATUS __qdf_init_bh(qdf_handle_t hdl,
+			struct tasklet_struct *bh,
+			qdf_defer_fn_t func, void *arg)
+{
+	tasklet_init(bh, (__qdf_bh_fn_t) func, (unsigned long)arg);
+	return QDF_STATUS_SUCCESS;
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+#else
+#endif
+
+/**
+ * __qdf_sched_bh - schedule a bottom half (DPC)
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * Return: none
+ */
+static inline QDF_STATUS
+__qdf_sched_bh(qdf_handle_t hdl, struct tasklet_struct *bh)
+{
+	tasklet_schedule(bh);
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_disable_work - disable the deferred task (synchronous)
+ * @hdl: OS handle
+ * @work: pointer to work
+ * Return: unsigned int
+ */
+static inline QDF_STATUS
+__qdf_disable_work(qdf_handle_t hdl, __qdf_work_t *work)
+{
+	if (cancel_work_sync(&work->work))
+		return QDF_STATUS_E_ALREADY;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_disable_bh - destroy the bh (synchronous)
+ * @hdl: OS handle
+ * @bh: pointer to bottom
+ * Return: none
+ */
+static inline QDF_STATUS
+__qdf_disable_bh(qdf_handle_t hdl, struct tasklet_struct *bh)
+{
+	tasklet_kill(bh);
+	return QDF_STATUS_SUCCESS;
+}
+
+#endif /*_I_QDF_DEFER_H*/

+ 17 - 24
qdf/src/i_qdf_event.h → qdf/linux/src/i_qdf_event.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,19 +25,26 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__I_CDF_EVENT_H)
-#define __I_CDF_EVENT_H
-
 /**
- * DOC: i_cdf_event.h
- *
- * Linux-specific definitions for CDF Events
+ * DOC: i_qdf_event.h
+ * This file provides OS dependent event API's.
  */
 
-/* Include Files */
-#include <cdf_types.h>
+#if !defined(__I_QDF_EVENT_H)
+#define __I_QDF_EVENT_H
+
 #include <linux/completion.h>
 
+/**
+ * qdf_event_t - manages events
+ * @complete: instance to completion
+ * @cookie: unsigned int
+ */
+typedef struct qdf_evt {
+	struct completion complete;
+	uint32_t cookie;
+} __qdf_event_t;
+
 /* Preprocessor definitions and constants */
 #define LINUX_EVENT_COOKIE 0x12341234
 
@@ -45,18 +52,4 @@
 #define INIT_COMPLETION(event) reinit_completion(&event)
 #endif
 
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/* Type declarations */
-
-typedef struct evt {
-	struct completion complete;
-	uint32_t cookie;
-} cdf_event_t;
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-#endif /* __I_CDF_EVENT_H */
+#endif /*__I_QDF_EVENT_H*/

+ 62 - 0
qdf/linux/src/i_qdf_list.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_list.h
+ * This file provides OS dependent list API's.
+ */
+
+#if !defined(__I_QDF_LIST_H)
+#define __I_QDF_LIST_H
+
+#include <linux/list.h>
+
+/* Type declarations */
+typedef struct list_head __qdf_list_node_t;
+
+/* Preprocessor definitions and constants */
+
+typedef struct qdf_list_s {
+	__qdf_list_node_t anchor;
+	uint32_t count;
+	uint32_t max_size;
+} __qdf_list_t;
+
+/**
+ * __qdf_list_create() - Initialize list head
+ * @list: object of list
+ * @max_size: max size of the list
+ * Return: none
+ */
+static inline void __qdf_list_create(__qdf_list_t *list, uint32_t max_size)
+{
+	INIT_LIST_HEAD(&list->anchor);
+	list->count = 0;
+	list->max_size = max_size;
+}
+
+#endif

+ 336 - 0
qdf/linux/src/i_qdf_lock.h

@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_lock.h
+ * Linux-specific definitions for QDF Lock API's
+ */
+
+#if !defined(__I_QDF_LOCK_H)
+#define __I_QDF_LOCK_H
+
+/* Include Files */
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/device.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
+#include <asm/semaphore.h>
+#else
+#include <linux/semaphore.h>
+#endif
+#include <linux/interrupt.h>
+#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+#include <linux/wakelock.h>
+#endif
+
+/* define for flag */
+#define QDF_LINUX_UNLOCK_BH  1
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+enum {
+	LOCK_RELEASED = 0x11223344,
+	LOCK_ACQUIRED,
+	LOCK_DESTROYED
+};
+
+/**
+ * typedef struct - __qdf_mutex_t
+ * @m_lock: Mutex lock
+ * @cookie: Lock cookie
+ * @process_id: Process ID to track lock
+ * @state: Lock status
+ * @refcount: Reference count for recursive lock
+ */
+struct qdf_lock_s {
+	struct mutex m_lock;
+	uint32_t cookie;
+	int process_id;
+	uint32_t state;
+	uint8_t refcount;
+};
+
+typedef struct qdf_lock_s __qdf_mutex_t;
+
+/**
+ * typedef struct - qdf_spinlock_t
+ * @spinlock: Spin lock
+ * @flags: Lock flag
+ */
+typedef struct __qdf_spinlock {
+	spinlock_t spinlock;
+	unsigned long flags;
+} __qdf_spinlock_t;
+
+typedef struct semaphore __qdf_semaphore_t;
+
+#if defined CONFIG_CNSS
+typedef struct wakeup_source qdf_wake_lock_t;
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+typedef struct wake_lock qdf_wake_lock_t;
+#else
+typedef int qdf_wake_lock_t;
+#endif
+
+#define LINUX_LOCK_COOKIE 0x12345678
+
+/* Function declarations and documenation */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+/**
+ * __qdf_semaphore_init() - initialize the semaphore
+ * @m: Semaphore object
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
+{
+	init_MUTEX(m);
+	return QDF_STATUS_SUCCESS;
+}
+#else
+static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
+{
+	sema_init(m, 1);
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * __qdf_semaphore_acquire() - acquire semaphore
+ * @m: Semaphore object
+ *
+ * Return: 0
+ */
+static inline int __qdf_semaphore_acquire(struct semaphore *m)
+{
+	down(m);
+	return 0;
+}
+
+/**
+ * __qdf_semaphore_acquire_intr() - down_interruptible allows a user-space
+ * process that is waiting on a semaphore to be interrupted by the user.
+ * If the operation is interrupted, the function returns a nonzero value,
+ * and the caller does not hold the semaphore.
+ * Always checking the return value and responding accordingly.
+ * @osdev: OS device handle
+ * @m: Semaphore object
+ *
+ * Return: int
+ */
+static inline int __qdf_semaphore_acquire_intr(struct semaphore *m)
+{
+	return down_interruptible(m);
+}
+
+/**
+ * __qdf_semaphore_release() - release semaphore
+ * @m: Semaphore object
+ *
+ * Return: result of UP operation in integer
+ */
+static inline void __qdf_semaphore_release(struct semaphore *m)
+{
+	up(m);
+}
+
+/**
+ * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
+ * @m: semaphore to take
+ * @timeout: maximum time to try to take the semaphore
+ * Return: int
+ */
+static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m,
+						  unsigned long timeout)
+{
+	unsigned long jiffie_val = msecs_to_jiffies(timeout);
+	return down_timeout(m, jiffie_val);
+}
+
+/**
+ * __qdf_spinlock_create() - initialize spin lock
+ * @lock: Spin lock object
+ *
+ * Return: QDF_STATUS_SUCCESS
+ */
+static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock)
+{
+	spin_lock_init(&lock->spinlock);
+	lock->flags = 0;
+	return QDF_STATUS_SUCCESS;
+}
+
+#define __qdf_spinlock_destroy(lock)
+
+/**
+ * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_lock(__qdf_spinlock_t *lock)
+{
+	spin_lock(&lock->spinlock);
+}
+
+/**
+ * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock)
+{
+	spin_unlock(&lock->spinlock);
+}
+
+/**
+ * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
+ * (Preemptive) and disable IRQs
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock)
+{
+	spin_lock_irqsave(&lock->spinlock, lock->flags);
+}
+
+/**
+ * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
+ * Preemption and enable IRQ
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock)
+{
+	spin_unlock_irqrestore(&lock->spinlock, lock->flags);
+}
+
+/*
+ * Synchronous versions - only for OS' that have interrupt disable
+ */
+#define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags)
+#define __qdf_spin_unlock_irq(_p_lock, _flags) \
+	spin_unlock_irqrestore(_p_lock, _flags)
+
+/**
+ * __qdf_spin_trylock_bh() - spin trylock bottomhalf
+ * @lock: spinlock object
+ *
+ * Retrun: int
+ */
+static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock)
+{
+	if (likely(irqs_disabled() || in_irq() || in_softirq())) {
+		return spin_trylock(&lock->spinlock);
+	} else {
+		if (spin_trylock_bh(&lock->spinlock)) {
+			lock->flags |= QDF_LINUX_UNLOCK_BH;
+			return 1;
+		} else {
+			return 0;
+		}
+	}
+}
+
+/**
+ * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock)
+{
+	if (likely(irqs_disabled() || in_irq() || in_softirq())) {
+		spin_lock(&lock->spinlock);
+	} else {
+		spin_lock_bh(&lock->spinlock);
+		lock->flags |= QDF_LINUX_UNLOCK_BH;
+	}
+}
+
+/**
+ * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
+ * @lock: Lock object
+ *
+ * Return: none
+ */
+static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock)
+{
+	if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) {
+		lock->flags &= ~QDF_LINUX_UNLOCK_BH;
+		spin_unlock_bh(&lock->spinlock);
+	} else
+		spin_unlock(&lock->spinlock);
+}
+
+/**
+ * __qdf_spinlock_irq_exec - Execute the input function with spinlock held and interrupt disabled.
+ * @hdl: OS handle
+ * @lock: spinlock to be held for the critical region
+ * @func: critical region function that to be executed
+ * @context: context of the critical region function
+ * @return - Boolean status returned by the critical region function
+ */
+static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl,
+			__qdf_spinlock_t *lock,
+			qdf_irqlocked_func_t func,
+			void *arg)
+{
+	unsigned long flags;
+	bool ret;
+
+	spin_lock_irqsave(&lock->spinlock, flags);
+	ret = func(arg);
+	spin_unlock_irqrestore(&lock->spinlock, flags);
+
+	return ret;
+}
+
+/**
+ * __qdf_in_softirq() - in soft irq context
+ *
+ * Return: true if in softirs context else false
+ */
+static inline bool __qdf_in_softirq(void)
+{
+	return in_softirq();
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __I_QDF_LOCK_H */

+ 13 - 14
qdf/src/i_qdf_mc_timer.h → qdf/linux/src/i_qdf_mc_timer.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,18 +25,17 @@
  * to the Linux Foundation.
  */
 
-#if !defined(__I_CDF_MC_TIMER_H)
-#define __I_CDF_MC_TIMER_H
-
 /**
- * DOC: i_cdf_mc_timer.h
- *
- * Linux-specific definitions for CDF timers serialized to MC thread
+ * DOC: i_qdf_mc_timer.h
+ * Linux-specific definitions for QDF timers serialized to MC thread
  */
 
+#if !defined(__I_QDF_MC_TIMER_H)
+#define __I_QDF_MC_TIMER_H
+
 /* Include Files */
-#include <cdf_mc_timer.h>
-#include <cdf_types.h>
+#include <qdf_mc_timer.h>
+#include <qdf_types.h>
 #include <linux/timer.h>
 #include <linux/time.h>
 #include <linux/jiffies.h>
@@ -48,14 +47,14 @@ extern "C" {
 #endif /* __cplusplus */
 /* Type declarations */
 
-typedef struct cdf_mc_timer_platform_s {
-	struct timer_list Timer;
-	int threadID;
+typedef struct qdf_mc_timer_platform_s {
+	struct timer_list timer;
+	int thread_id;
 	uint32_t cookie;
 	spinlock_t spinlock;
-} cdf_mc_timer_platform_t;
+} qdf_mc_timer_platform_t;
 
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
-#endif /* __I_CDF_MC_TIMER_H */
+#endif /* __I_QDF_MC_TIMER_H */

+ 213 - 0
qdf/linux/src/i_qdf_mem.h

@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_mem.h
+ * Linux-specific definitions for QDF memory API's
+ */
+
+#ifndef __I_QDF_MEM_H
+#define __I_QDF_MEM_H
+
+#ifdef __KERNEL__
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
+#include <linux/autoconf.h>
+#else
+#include <generated/autoconf.h>
+#endif
+#endif
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h> /* pci_alloc_consistent */
+#if CONFIG_MCL
+#include <cds_queue.h>
+#else
+#include <sys/queue.h>
+#endif
+#else
+/*
+ * Provide dummy defs for kernel data types, functions, and enums
+ * used in this header file.
+ */
+#define GFP_KERNEL 0
+#define GFP_ATOMIC 0
+#define kzalloc(size, flags) NULL
+#define vmalloc(size)        NULL
+#define kfree(buf)
+#define vfree(buf)
+#define pci_alloc_consistent(dev, size, paddr) NULL
+#define __qdf_mempool_t
+#endif /* __KERNEL__ */
+#include <qdf_status.h>
+
+#ifdef __KERNEL__
+typedef struct mempool_elem {
+	STAILQ_ENTRY(mempool_elem) mempool_entry;
+} mempool_elem_t;
+
+/**
+ * typedef __qdf_mempool_ctxt_t - Memory pool context
+ * @pool_id: pool identifier
+ * @flags: flags
+ * @elem_size: size of each pool element in bytes
+ * @pool_mem: pool_addr address of the pool created
+ * @mem_size: Total size of the pool in bytes
+ * @free_list: free pool list
+ * @lock: spinlock object
+ * @max_elem: Maximum number of elements in tha pool
+ * @free_cnt: Number of free elements available
+ */
+typedef struct __qdf_mempool_ctxt {
+	int pool_id;
+	u_int32_t flags;
+	size_t elem_size;
+	void *pool_mem;
+	u_int32_t mem_size;
+	STAILQ_HEAD(, mempool_elem) free_list;
+	spinlock_t lock;
+	u_int32_t max_elem;
+	u_int32_t free_cnt;
+} __qdf_mempool_ctxt_t;
+#endif
+typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
+
+/* typedef for dma_data_direction */
+typedef enum dma_data_direction __dma_data_direction;
+
+/**
+ * __qdf_str_cmp() - Compare two strings
+ * @str1: First string
+ * @str2: Second string
+ *
+ * Return: =0 equal
+ * >0 not equal, if  str1  sorts lexicographically after str2
+ * <0 not equal, if  str1  sorts lexicographically before str2
+ */
+static inline int32_t __qdf_str_cmp(const char *str1, const char *str2)
+{
+	return strcmp(str1, str2);
+}
+
+/**
+ * __qdf_str_lcopy() - Copy from one string to another
+ * @dest: destination string
+ * @src: source string
+ * @bytes: limit of num bytes to copy
+ *
+ * @return: 0 returns the initial value of dest
+ */
+static inline uint32_t __qdf_str_lcopy(char *dest, const char *src,
+				    uint32_t bytes)
+{
+	return strlcpy(dest, src, bytes);
+}
+
+/**
+ * __qdf_mem_map_nbytes_single - Map memory for DMA
+ * @osdev: pomter OS device context
+ * @buf: pointer to memory to be dma mapped
+ * @dir: DMA map direction
+ * @nbytes: number of bytes to be mapped.
+ * @phy_addr: ponter to recive physical address.
+ *
+ * Return: success/failure
+ */
+static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
+						  void *buf, qdf_dma_dir_t dir,
+						  int nbytes,
+						  uint32_t *phy_addr)
+{
+	/* assume that the OS only provides a single fragment */
+	*phy_addr = dma_map_single(osdev->dev, buf, nbytes, dir);
+	return dma_mapping_error(osdev->dev, *phy_addr) ?
+	QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
+ *
+ * @osdev: pomter OS device context
+ * @phy_addr: physical address of memory to be dma unmapped
+ * @dir: DMA unmap direction
+ * @nbytes: number of bytes to be unmapped.
+ *
+ * @return - none
+ */
+static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
+						 uint32_t phy_addr,
+						 qdf_dma_dir_t dir, int nbytes)
+{
+	dma_unmap_single(osdev->dev, phy_addr, nbytes, dir);
+}
+#ifdef __KERNEL__
+
+typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
+
+int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool, int pool_cnt,
+		       size_t pool_entry_size, u_int32_t flags);
+void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
+void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
+void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
+
+#define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size);
+#endif
+
+/**
+ * __qdf_str_len() - returns the length of a string
+ * @str: input string
+ * Return:
+ * length of string
+ */
+static inline int32_t __qdf_str_len(const char *str)
+{
+	return strlen(str);
+}
+
+/**
+ * __qdf_mem_cmp() - memory compare
+ * @memory1: pointer to one location in memory to compare.
+ * @memory2: pointer to second location in memory to compare.
+ * @num_bytes: the number of bytes to compare.
+ *
+ * Function to compare two pieces of memory, similar to memcmp function
+ * in standard C.
+ * Return:
+ * int32_t - returns a bool value that tells if the memory
+ * locations are equal or not equal.
+ * 0 -- equal
+ * < 0 -- *memory1 is less than *memory2
+ * > 0 -- *memory1 is bigger than *memory2
+ */
+static inline int32_t __qdf_mem_cmp(const void *memory1, const void *memory2,
+				    uint32_t num_bytes)
+{
+	return (int32_t) memcmp(memory1, memory2, num_bytes);
+}
+
+#endif /* __I_QDF_MEM_H */

+ 63 - 0
qdf/linux/src/i_qdf_module.h

@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2010-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_module.h
+ * Linux-specific definitions for QDF module API's
+ */
+
+#ifndef _I_QDF_MODULE_H
+#define _I_QDF_MODULE_H
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <qdf_types.h>
+
+
+#define __qdf_virt_module_init(_x)  \
+	static int _x##_mod(void) \
+	{                   \
+		uint32_t st;  \
+		st = (_x)();         \
+		if (st != QDF_STATUS_SUCCESS)  \
+			return QDF_STATUS_E_INVAL;            \
+		else                    \
+			return 0;             \
+	}                           \
+	module_init(_x##_mod);
+
+#define __qdf_virt_module_exit(_x)  module_exit(_x)
+
+#define __qdf_virt_module_name(_name) MODULE_LICENSE("Proprietary");
+
+#define __qdf_export_symbol(_sym) EXPORT_SYMBOL(_sym)
+
+#define __qdf_declare_param(_name, _type) \
+	module_param(_name, _type, 0600)
+
+#endif /* _I_QDF_MODULE_H */

+ 1569 - 0
qdf/linux/src/i_qdf_nbuf.h

@@ -0,0 +1,1569 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_nbuf.h
+ * This file provides OS dependent nbuf API's.
+ */
+
+#ifndef _I_QDF_NBUF_H
+#define _I_QDF_NBUF_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <qdf_util.h>
+#include <qdf_net_types.h>
+#include <qdf_mem.h>
+#include <linux/tcp.h>
+#include <qdf_util.h>
+
+/*
+ * Use socket buffer as the underlying implentation as skbuf .
+ * Linux use sk_buff to represent both packet and data,
+ * so we use sk_buffer to represent both skbuf .
+ */
+typedef struct sk_buff *__qdf_nbuf_t;
+
+#define QDF_NBUF_CB_TX_MAX_OS_FRAGS 1
+
+/* QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS -
+ * max tx fragments added by the driver
+ * The driver will always add one tx fragment (the tx descriptor)
+ */
+#define QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS 2
+
+/*
+ * Make sure that qdf_dma_addr_t in the cb block is always 64 bit aligned
+ */
+typedef union {
+	uint64_t       u64;
+	qdf_dma_addr_t dma_addr;
+} qdf_paddr_t;
+
+/**
+ * struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
+ *                    - data passed between layers of the driver.
+ *
+ * Notes:
+ *   1. Hard limited to 48 bytes. Please count your bytes
+ *   2. The size of this structure has to be easily calculatable and
+ *      consistently so: do not use any conditional compile flags
+ *   3. Split into a common part followed by a tx/rx overlay
+ *   4. There is only one extra frag, which represents the HTC/HTT header
+ *
+ * @common.paddr   : physical addressed retrived by dma_map of nbuf->data
+ * @rx.lro_flags   : hardware assisted flags:
+ *   @rx.lro_eligible    : flag to indicate whether the MSDU is LRO eligible
+ *   @rx.tcp_proto       : L4 protocol is TCP
+ *   @rx.tcp_pure_ack    : A TCP ACK packet with no payload
+ *   @rx.ipv6_proto      : L3 protocol is IPV6
+ *   @rx.ip_offset       : offset to IP header
+ *   @rx.tcp_offset      : offset to TCP header
+ *   @rx.tcp_udp_chksum  : L4 payload checksum
+ *   @rx.tcp_seq_num     : TCP sequence number
+ *   @rx.tcp_ack_num     : TCP ACK number
+ *   @rx.flow_id_toeplitz: 32-bit 5-tuple Toeplitz hash
+ * @tx.extra_frag  : represent HTC/HTT header
+ * @tx.efrag.vaddr       : virtual address of ~
+ * @tx.efrag.paddr       : physical/DMA address of ~
+ * @tx.efrag.len         : length of efrag pointed by the above pointers
+ * @tx.efrag.num         : number of extra frags ( 0 or 1)
+ * @tx.efrag.flags.nbuf  : flag, nbuf payload to be swapped (wordstream)
+ * @tx.efrag.flags.efrag : flag, efrag payload to be swapped (wordstream)
+ * @tx.efrag.flags.chfrag_start: used by WIN
+ * @tx.efrags.flags.chfrag_end:   used by WIN
+ * @tx.data_attr   : value that is programmed into CE descr, includes:
+ *                 + (1) CE classification enablement bit
+ *                 + (2) packet type (802.3 or Ethernet type II)
+ *                 + (3) packet offset (usually length of HTC/HTT descr)
+ * @tx.trace       : combined structure for DP and protocol trace
+ * @tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
+ *                       +               (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
+ * @tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
+ * @tx.trace.proto_type  : bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
+ *                       +                              (MGMT_ACTION)] - 4 bits
+ * @tx.trace.dp_trace    : flag (Datapath trace)
+ * @tx.trace.htt2_frm    : flag (high-latency path only)
+ * @tx.trace.vdev_id     : vdev (for protocol trace)
+ * @tx.ipa.owned   : packet owned by IPA
+ * @tx.ipa.priv    : private data, used by IPA
+ */
+struct qdf_nbuf_cb {
+	/* common */
+	qdf_paddr_t paddr; /* of skb->data */
+	/* valid only in one direction */
+	union {
+		/* Note: MAX: 40 bytes */
+		struct {
+			uint32_t lro_eligible:1,
+				tcp_proto:1,
+				tcp_pure_ack:1,
+				ipv6_proto:1,
+				ip_offset:7,
+				tcp_offset:7;
+			uint32_t tcp_udp_chksum:16,
+				tcp_win:16;
+			uint32_t tcp_seq_num;
+			uint32_t tcp_ack_num;
+			uint32_t flow_id_toeplitz;
+		} rx; /* 20 bytes */
+
+		/* Note: MAX: 40 bytes */
+		struct {
+			struct {
+				unsigned char *vaddr;
+				qdf_paddr_t paddr;
+				uint16_t len;
+				union {
+					struct {
+						uint8_t flag_efrag:1,
+							flag_nbuf:1,
+							num:1,
+							flag_chfrag_start:1,
+							flag_chfrag_end:1,
+							reserved:3;
+					} bits;
+					uint8_t u8;
+				} flags;
+			}  extra_frag; /* 19 bytes */
+			union {
+				struct {
+					uint8_t ftype;
+					uint32_t submit_ts;
+					void *fctx;
+					void *vdev_ctx;
+				} win; /* 21 bytes*/
+				struct {
+					uint32_t data_attr; /* 4 bytes */
+					union {
+						struct {
+							uint8_t packet_state;
+							uint8_t packet_track:4,
+								proto_type:4;
+							uint8_t dp_trace:1,
+								htt2_frm:1,
+								rsrvd:6;
+							uint8_t vdev_id;
+						} hl;
+						struct {
+							uint8_t packet_state;
+							uint8_t packet_track:4,
+								proto_type:4;
+							uint8_t dp_trace:1,
+								rsrvd:7;
+							uint8_t vdev_id;
+						} ll; /* low latency */
+					} trace; /* 4 bytes */
+					struct {
+						uint32_t owned:1,
+							priv:31;
+					} ipa; /* 4 */
+				} mcl;/* 12 bytes*/
+			} dev;
+		} tx; /* 40 bytes */
+	} u;
+}; /* struct qdf_nbuf_cb: MAX 48 bytes */
+
+/**
+ *  access macros to qdf_nbuf_cb
+ *  Note: These macros can be used as L-values as well as R-values.
+ *        When used as R-values, they effectively function as "get" macros
+ *        When used as L_values, they effectively function as "set" macros
+ */
+
+#define QDF_NBUF_CB_PADDR(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
+
+#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
+#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
+#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
+#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
+#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
+#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
+#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
+#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
+#define QDF_NBUF_CB_RX_TCP_WIN(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
+#define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_seq_num)
+#define QDF_NBUF_CB_RX_TCP_ACK_NUM(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_ack_num)
+#define QDF_NBUF_CB_RX_FLOW_ID_TOEPLITZ(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id_toeplitz)
+
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.vaddr)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.paddr.dma_addr)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.len)
+#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.bits.num)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.extra_frag.flags.u8)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
+	(((struct qdf_nbuf_cb *) \
+	((skb)->cb))->u.tx.extra_frag.flags.bits.flag_chfrag_start)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
+		(((struct qdf_nbuf_cb *) \
+		((skb)->cb))->u.tx.extra_frag.flags.bits.flag_chfrag_end)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
+	(((struct qdf_nbuf_cb *) \
+		((skb)->cb))->u.tx.extra_frag.flags.bits.flag_efrag)
+#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
+	(((struct qdf_nbuf_cb *) \
+		((skb)->cb))->u.tx.extra_frag.flags.bits.flag_nbuf)
+#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.data_attr)
+#define QDF_NBUF_CB_TX_PACKET_STATE(skb) \
+	(((struct qdf_nbuf_cb *) \
+		((skb)->cb))->u.tx.dev.mcl.trace.ll.packet_state)
+#define QDF_NBUF_CB_TX_PACKET_TRACK(skb) \
+	(((struct qdf_nbuf_cb *) \
+		((skb)->cb))->u.tx.dev.mcl.trace.ll.packet_track)
+#define QDF_NBUF_CB_TX_PROTO_TYPE(skb) \
+	(((struct qdf_nbuf_cb *) \
+		((skb)->cb))->u.tx.dev.mcl.trace.ll.proto_type)
+#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
+	qdf_nbuf_set_state(skb, PACKET_STATE)
+#define QDF_NBUF_GET_PACKET_TRACK(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.ll.packet_track)
+#define QDF_NBUF_CB_TX_DP_TRACE(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.ll.dp_trace)
+#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.hl.htt2_frm)
+#define QDF_NBUF_CB_TX_VDEV_ID(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.trace.ll.vdev_id)
+#define QDF_NBUF_CB_TX_IPA_OWNED(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.ipa.owned)
+#define QDF_NBUF_CB_TX_IPA_PRIV(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.mcl.ipa.priv)
+#define QDF_NBUF_CB_TX_FTYPE(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.ftype)
+#define QDF_NBUF_CB_TX_SUBMIT_TS(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.submit_ts)
+#define QDF_NBUF_CB_TX_FCTX(skb) \
+	(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.fctx)
+#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
+		(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.win.vdev_ctx)
+
+
+/* assume the OS provides a single fragment */
+#define __qdf_nbuf_get_num_frags(skb)		   \
+	(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
+
+#define __qdf_nbuf_reset_num_frags(skb) \
+	do { \
+		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0; \
+	} while (0)
+
+/**
+ *   end of nbuf->cb access macros
+ */
+
+typedef void (*qdf_nbuf_trace_update_t)(char *);
+
+#define __qdf_nbuf_mapped_paddr_get(skb) QDF_NBUF_CB_PADDR(skb)
+
+#define __qdf_nbuf_mapped_paddr_set(skb, paddr)	\
+	do { \
+		QDF_NBUF_CB_PADDR(skb) = paddr; \
+	} while (0)
+
+#define __qdf_nbuf_frag_push_head(					\
+	skb, frag_len, frag_vaddr, frag_paddr)				\
+	do {					\
+		QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 1;		\
+		QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) = frag_vaddr;	\
+		QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) = frag_paddr;	\
+		QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) = frag_len;		\
+	} while (0)
+
+#define __qdf_nbuf_get_frag_vaddr(skb, frag_num)		\
+	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
+	 QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) : ((skb)->data))
+
+#define __qdf_nbuf_get_frag_vaddr_always(skb)       \
+			QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb)
+
+#define __qdf_nbuf_get_frag_paddr(skb, frag_num)			\
+	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
+	 QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) :				\
+	 /* assume that the OS only provides a single fragment */	\
+	 QDF_NBUF_CB_PADDR(skb))
+
+#define __qdf_nbuf_get_frag_len(skb, frag_num)			\
+	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb)) ?		\
+	 QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) : (skb)->len)
+
+#define __qdf_nbuf_get_frag_is_wordstream(skb, frag_num)		\
+	((frag_num < QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))		\
+	 ? (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb))		\
+	 : (QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb)))
+
+#define __qdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wstrm)	\
+	do {								\
+		if (frag_num >= QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb))	\
+			frag_num = QDF_NBUF_CB_TX_MAX_EXTRA_FRAGS;	\
+		if (frag_num)						\
+			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) =  \
+							      is_wstrm; \
+		else					\
+			QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) =   \
+							      is_wstrm; \
+	} while (0)
+
+#define __qdf_nbuf_set_vdev_ctx(skb, vdev_ctx) \
+	do { \
+		QDF_NBUF_CB_TX_VDEV_CTX((skb)) = (vdev_ctx); \
+	} while (0)
+
+#define __qdf_nbuf_get_vdev_ctx(skb) \
+	QDF_NBUF_CB_TX_VDEV_CTX((skb))
+
+#define __qdf_nbuf_set_fctx_type(skb, ctx, type) \
+	do { \
+		QDF_NBUF_CB_TX_FCTX((skb)) = (ctx);	\
+		QDF_NBUF_CB_TX_FTYPE((skb)) = (type); \
+	} while (0)
+
+#define __qdf_nbuf_get_fctx(skb) \
+		 QDF_NBUF_CB_TX_FCTX((skb))
+
+#define __qdf_nbuf_get_ftype(skb) \
+		 QDF_NBUF_CB_TX_FTYPE((skb))
+
+#define __qdf_nbuf_set_chfrag_start(skb, val) \
+	do { \
+		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb))) = val; \
+	} while (0)
+
+#define __qdf_nbuf_is_chfrag_start(skb) \
+	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START((skb)))
+
+#define __qdf_nbuf_set_chfrag_end(skb, val) \
+	do { \
+		(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb))) = val; \
+	} while (0)
+
+#define __qdf_nbuf_is_chfrag_end(skb) \
+	(QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END((skb)))
+
+#define __qdf_nbuf_trace_set_proto_type(skb, proto_type)	\
+	do { \
+		QDF_NBUF_CB_TX_PROTO_TYPE(skb) = (proto_type); \
+	} while (0)
+
+#define __qdf_nbuf_trace_get_proto_type(skb) \
+	QDF_NBUF_CB_TX_PROTO_TYPE(skb)
+
+#define __qdf_nbuf_data_attr_get(skb)		\
+	QDF_NBUF_CB_TX_DATA_ATTR(skb)
+#define __qdf_nbuf_data_attr_set(skb, data_attr) \
+	do { \
+		QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr); \
+	} while (0)
+
+#define __qdf_nbuf_ipa_owned_get(skb) \
+	QDF_NBUF_CB_TX_IPA_OWNED(skb)
+
+#define __qdf_nbuf_ipa_owned_set(skb) \
+	do { \
+		QDF_NBUF_CB_TX_IPA_OWNED(skb) = 1; \
+	} while (0)
+
+#define __qdf_nbuf_ipa_priv_get(skb)	\
+	QDF_NBUF_CB_TX_IPA_PRIV(skb)
+
+#define __qdf_nbuf_ipa_priv_set(skb, priv) \
+	do { \
+		QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv); \
+	} while (0)
+
+/**
+ * __qdf_nbuf_num_frags_init() - init extra frags
+ * @skb: sk buffer
+ *
+ * Return: none
+ */
+static inline
+void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
+{
+	QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
+}
+
+typedef enum {
+	CB_FTYPE_MCAST2UCAST = 1,
+	CB_FTYPE_TSO = 2,
+	CB_FTYPE_TSO_SG = 3,
+	CB_FTYPE_SG = 4,
+} CB_FTYPE;
+
+/*
+ * prototypes. Implemented in qdf_nbuf.c
+ */
+__qdf_nbuf_t __qdf_nbuf_alloc(__qdf_device_t osdev, size_t size, int reserve,
+			int align, int prio);
+void __qdf_nbuf_free(struct sk_buff *skb);
+QDF_STATUS __qdf_nbuf_map(__qdf_device_t osdev,
+			struct sk_buff *skb, qdf_dma_dir_t dir);
+void __qdf_nbuf_unmap(__qdf_device_t osdev,
+			struct sk_buff *skb, qdf_dma_dir_t dir);
+QDF_STATUS __qdf_nbuf_map_single(__qdf_device_t osdev,
+				 struct sk_buff *skb, qdf_dma_dir_t dir);
+void __qdf_nbuf_unmap_single(__qdf_device_t osdev,
+			struct sk_buff *skb, qdf_dma_dir_t dir);
+void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr);
+
+QDF_STATUS __qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap);
+void __qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap);
+void __qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg);
+QDF_STATUS __qdf_nbuf_map_nbytes(qdf_device_t osdev, struct sk_buff *skb,
+	qdf_dma_dir_t dir, int nbytes);
+void __qdf_nbuf_unmap_nbytes(qdf_device_t osdev, struct sk_buff *skb,
+	qdf_dma_dir_t dir, int nbytes);
+#ifndef REMOVE_INIT_DEBUG_CODE
+void __qdf_nbuf_sync_for_cpu(qdf_device_t osdev, struct sk_buff *skb,
+	qdf_dma_dir_t dir);
+#endif
+QDF_STATUS __qdf_nbuf_map_nbytes_single(
+	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
+void __qdf_nbuf_unmap_nbytes_single(
+	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes);
+void __qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg);
+uint32_t __qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag);
+void __qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg);
+QDF_STATUS __qdf_nbuf_frag_map(
+	qdf_device_t osdev, __qdf_nbuf_t nbuf,
+	int offset, qdf_dma_dir_t dir, int cur_frag);
+QDF_STATUS
+__qdf_nbuf_get_vlan_info(qdf_net_handle_t hdl, struct sk_buff *skb,
+		qdf_net_vlanhdr_t *vlan);
+
+
+#ifdef QCA_PKT_PROTO_TRACE
+void __qdf_nbuf_trace_update(struct sk_buff *buf, char *event_string);
+#else
+#define __qdf_nbuf_trace_update(skb, event_string)
+#endif /* QCA_PKT_PROTO_TRACE */
+
+/**
+ * __qdf_to_status() - OS to QDF status conversion
+ * @error : OS error
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS __qdf_to_status(signed int error)
+{
+	switch (error) {
+	case 0:
+		return QDF_STATUS_SUCCESS;
+	case ENOMEM:
+	case -ENOMEM:
+		return QDF_STATUS_E_NOMEM;
+	default:
+		return QDF_STATUS_E_NOSUPPORT;
+	}
+}
+
+/**
+ * __qdf_nbuf_len() - return the amount of valid data in the skb
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the amount of valid data in the skb, If there are frags
+ * then it returns total length.
+ *
+ * Return: network buffer length
+ */
+static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
+{
+	int i, extra_frag_len = 0;
+
+	i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
+	if (i > 0)
+		extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
+
+	return extra_frag_len + skb->len;
+}
+
+/**
+ * __qdf_nbuf_cat() - link two nbufs
+ * @dst: Buffer to piggyback into
+ * @src: Buffer to put
+ *
+ * Link tow nbufs the new buf is piggybacked into the older one. The older
+ * (src) skb is released.
+ *
+ * Return: QDF_STATUS (status of the call) if failed the src skb
+ *         is released
+ */
+static inline QDF_STATUS
+__qdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
+{
+	QDF_STATUS error = 0;
+
+	qdf_assert(dst && src);
+
+	/*
+	 * Since pskb_expand_head unconditionally reallocates the skb->head
+	 * buffer, first check whether the current buffer is already large
+	 * enough.
+	 */
+	if (skb_tailroom(dst) < src->len) {
+		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
+		if (error)
+			return __qdf_to_status(error);
+	}
+	memcpy(skb_tail_pointer(dst), src->data, src->len);
+
+	skb_put(dst, src->len);
+	dev_kfree_skb_any(src);
+
+	return __qdf_to_status(error);
+}
+
+/*
+ * nbuf manipulation routines
+ */
+/**
+ * __qdf_nbuf_headroom() - return the amount of tail space available
+ * @buf: Pointer to network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline int __qdf_nbuf_headroom(struct sk_buff *skb)
+{
+	return skb_headroom(skb);
+}
+
+/**
+ * __qdf_nbuf_tailroom() - return the amount of tail space available
+ * @buf: Pointer to network buffer
+ *
+ * Return: amount of tail room
+ */
+static inline uint32_t __qdf_nbuf_tailroom(struct sk_buff *skb)
+{
+	return skb_tailroom(skb);
+}
+
+/**
+ * __qdf_nbuf_push_head() - Push data in the front
+ * @skb: Pointer to network buffer
+ * @size: size to be pushed
+ *
+ * Return: New data pointer of this buf after data has been pushed,
+ *         or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *__qdf_nbuf_push_head(struct sk_buff *skb, size_t size)
+{
+	if (QDF_NBUF_CB_PADDR(skb))
+		QDF_NBUF_CB_PADDR(skb) -= size;
+
+	return skb_push(skb, size);
+}
+
+/**
+ * __qdf_nbuf_put_tail() - Puts data in the end
+ * @skb: Pointer to network buffer
+ * @size: size to be pushed
+ *
+ * Return: data pointer of this buf where new data has to be
+ *         put, or NULL if there is not enough room in this buf.
+ */
+static inline uint8_t *__qdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
+{
+	if (skb_tailroom(skb) < size) {
+		if (unlikely(pskb_expand_head(skb, 0,
+			size - skb_tailroom(skb), GFP_ATOMIC))) {
+			dev_kfree_skb_any(skb);
+			return NULL;
+		}
+	}
+	return skb_put(skb, size);
+}
+
+/**
+ * __qdf_nbuf_pull_head() - pull data out from the front
+ * @skb: Pointer to network buffer
+ * @size: size to be popped
+ *
+ * Return: New data pointer of this buf after data has been popped,
+ *	   or NULL if there is not sufficient data to pull.
+ */
+static inline uint8_t *__qdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
+{
+	if (QDF_NBUF_CB_PADDR(skb))
+		QDF_NBUF_CB_PADDR(skb) += size;
+
+	return skb_pull(skb, size);
+}
+
+/**
+ * __qdf_nbuf_trim_tail() - trim data out from the end
+ * @skb: Pointer to network buffer
+ * @size: size to be popped
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
+{
+	return skb_trim(skb, skb->len - size);
+}
+
+
+/*
+ * prototypes. Implemented in qdf_nbuf.c
+ */
+qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb);
+QDF_STATUS __qdf_nbuf_set_rx_cksum(struct sk_buff *skb,
+				qdf_nbuf_rx_cksum_t *cksum);
+uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb);
+void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
+uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb);
+void __qdf_nbuf_ref(struct sk_buff *skb);
+int __qdf_nbuf_shared(struct sk_buff *skb);
+
+/*
+ * qdf_nbuf_pool_delete() implementation - do nothing in linux
+ */
+#define __qdf_nbuf_pool_delete(osdev)
+
+/**
+ * __qdf_nbuf_clone() - clone the nbuf (copy is readonly)
+ * @skb: Pointer to network buffer
+ *
+ * if GFP_ATOMIC is overkill then we can check whether its
+ * called from interrupt context and then do it or else in
+ * normal case use GFP_KERNEL
+ *
+ * example     use "in_irq() || irqs_disabled()"
+ *
+ * Return: cloned skb
+ */
+static inline struct sk_buff *__qdf_nbuf_clone(struct sk_buff *skb)
+{
+	return skb_clone(skb, GFP_ATOMIC);
+}
+
+/**
+ * __qdf_nbuf_copy() - returns a private copy of the skb
+ * @skb: Pointer to network buffer
+ *
+ * This API returns a private copy of the skb, the skb returned is completely
+ *  modifiable by callers
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *__qdf_nbuf_copy(struct sk_buff *skb)
+{
+	return skb_copy(skb, GFP_ATOMIC);
+}
+
+#define __qdf_nbuf_reserve      skb_reserve
+
+
+/**
+ * __qdf_nbuf_head() - return the pointer the skb's head pointer
+ * @skb: Pointer to network buffer
+ *
+ * Return: Pointer to head buffer
+ */
+static inline uint8_t *__qdf_nbuf_head(struct sk_buff *skb)
+{
+	return skb->head;
+}
+
+/**
+ * __qdf_nbuf_data() - return the pointer to data header in the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: Pointer to skb data
+ */
+static inline uint8_t *__qdf_nbuf_data(struct sk_buff *skb)
+{
+	return skb->data;
+}
+
+/**
+ * __qdf_nbuf_get_protocol() - return the protocol value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb protocol
+ */
+static inline uint16_t __qdf_nbuf_get_protocol(struct sk_buff *skb)
+{
+	return skb->protocol;
+}
+
+/**
+ * __qdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb ip_summed
+ */
+static inline uint8_t __qdf_nbuf_get_ip_summed(struct sk_buff *skb)
+{
+	return skb->ip_summed;
+}
+
+/**
+ * __qdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
+ * @skb: Pointer to network buffer
+ * @ip_summed: ip checksum
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_ip_summed(struct sk_buff *skb,
+		 uint8_t ip_summed)
+{
+	skb->ip_summed = ip_summed;
+}
+
+/**
+ * __qdf_nbuf_get_priority() - return the priority value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: skb priority
+ */
+static inline uint32_t __qdf_nbuf_get_priority(struct sk_buff *skb)
+{
+	return skb->priority;
+}
+
+/**
+ * __qdf_nbuf_set_priority() - sets the priority value of the skb
+ * @skb: Pointer to network buffer
+ * @p: priority
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
+{
+	skb->priority = p;
+}
+
+/**
+ * __qdf_nbuf_set_next() - sets the next skb pointer of the current skb
+ * @skb: Current skb
+ * @next_skb: Next skb
+ *
+ * Return: void
+ */
+static inline void
+__qdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
+{
+	skb->next = skb_next;
+}
+
+/**
+ * __qdf_nbuf_next() - return the next skb pointer of the current skb
+ * @skb: Current skb
+ *
+ * Return: the next skb pointed to by the current skb
+ */
+static inline struct sk_buff *__qdf_nbuf_next(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __qdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
+ * @skb: Current skb
+ * @next_skb: Next skb
+ *
+ * This fn is used to link up extensions to the head skb. Does not handle
+ * linking to the head
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
+{
+	skb->next = skb_next;
+}
+
+/**
+ * __qdf_nbuf_next_ext() - return the next skb pointer of the current skb
+ * @skb: Current skb
+ *
+ * Return: the next skb pointed to by the current skb
+ */
+static inline struct sk_buff *__qdf_nbuf_next_ext(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __qdf_nbuf_append_ext_list() - link list of packet extensions to the head
+ * @skb_head: head_buf nbuf holding head segment (single)
+ * @ext_list: nbuf list holding linked extensions to the head
+ * @ext_len: Total length of all buffers in the extension list
+ *
+ * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
+ * to the nbuf holding the head segment (seg0)
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_append_ext_list(struct sk_buff *skb_head,
+			struct sk_buff *ext_list, size_t ext_len)
+{
+	skb_shinfo(skb_head)->frag_list = ext_list;
+	skb_head->data_len = ext_len;
+	skb_head->len += skb_head->data_len;
+}
+
+/**
+ * __qdf_nbuf_tx_free() - free skb list
+ * @skb: Pointer to network buffer
+ * @tx_err: TX error
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_tx_free(struct sk_buff *bufs, int tx_err)
+{
+	while (bufs) {
+		struct sk_buff *next = __qdf_nbuf_next(bufs);
+		__qdf_nbuf_free(bufs);
+		bufs = next;
+	}
+}
+
+/**
+ * __qdf_nbuf_get_age() - return the checksum value of the skb
+ * @skb: Pointer to network buffer
+ *
+ * Return: checksum value
+ */
+static inline uint32_t __qdf_nbuf_get_age(struct sk_buff *skb)
+{
+	return skb->csum;
+}
+
+/**
+ * __qdf_nbuf_set_age() - sets the checksum value of the skb
+ * @skb: Pointer to network buffer
+ * @v: Value
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
+{
+	skb->csum = v;
+}
+
+/**
+ * __qdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
+ * @skb: Pointer to network buffer
+ * @adj: Adjustment value
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
+{
+	skb->csum -= adj;
+}
+
+/**
+ * __qdf_nbuf_copy_bits() - return the length of the copy bits for skb
+ * @skb: Pointer to network buffer
+ * @offset: Offset value
+ * @len: Length
+ * @to: Destination pointer
+ *
+ * Return: length of the copy bits for skb
+ */
+static inline int32_t
+__qdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
+{
+	return skb_copy_bits(skb, offset, to, len);
+}
+
+/**
+ * __qdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
+ * @skb: Pointer to network buffer
+ * @len:  Packet length
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
+{
+	if (skb->len > len) {
+		skb_trim(skb, len);
+	} else {
+		if (skb_tailroom(skb) < len - skb->len) {
+			if (unlikely(pskb_expand_head(skb, 0,
+				len - skb->len - skb_tailroom(skb),
+				GFP_ATOMIC))) {
+				dev_kfree_skb_any(skb);
+				qdf_assert(0);
+			}
+		}
+		skb_put(skb, (len - skb->len));
+	}
+}
+
+/**
+ * __qdf_nbuf_set_protocol() - sets the protocol value of the skb
+ * @skb: Pointer to network buffer
+ * @protocol: Protocol type
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
+{
+	skb->protocol = protocol;
+}
+
+#define __qdf_nbuf_set_tx_htt2_frm(skb, candi) \
+	do { \
+		QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) = (candi); \
+	} while (0)
+
+#define __qdf_nbuf_get_tx_htt2_frm(skb)	\
+	QDF_NBUF_CB_TX_HL_HTT2_FRM(skb)
+
+uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
+	struct qdf_tso_info_t *tso_info);
+
+uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
+
+static inline bool __qdf_nbuf_is_tso(struct sk_buff *skb)
+{
+	if (skb_is_gso(skb) &&
+		(skb_is_gso_v6(skb) ||
+		(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)))
+		return true;
+	else
+		return false;
+}
+
+struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb);
+
+/**
+ * __qdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
+ *			      and get hw_classify by peeking
+ *			      into packet
+ * @nbuf:		Network buffer (skb on Linux)
+ * @pkt_type:		Pkt type (from enum htt_pkt_type)
+ * @pkt_subtype:	Bit 4 of this field in HTT descriptor
+ *			needs to be set in case of CE classification support
+ *			Is set by this macro.
+ * @hw_classify:	This is a flag which is set to indicate
+ *			CE classification is enabled.
+ *			Do not set this bit for VLAN packets
+ *			OR for mcast / bcast frames.
+ *
+ * This macro parses the payload to figure out relevant Tx meta-data e.g.
+ * whether to enable tx_classify bit in CE.
+ *
+ * Overrides pkt_type only if required for 802.3 frames (original ethernet)
+ * If protocol is less than ETH_P_802_3_MIN (0x600), then
+ * it is the length and a 802.3 frame else it is Ethernet Type II
+ * (RFC 894).
+ * Bit 4 in pkt_subtype is the tx_classify bit
+ *
+ * Return:	void
+ */
+#define __qdf_nbuf_tx_info_get(skb, pkt_type,			\
+				pkt_subtype, hw_classify)	\
+do {								\
+	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
+	uint16_t ether_type = ntohs(eh->h_proto);		\
+	bool is_mc_bc;						\
+								\
+	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
+		   is_multicast_ether_addr((uint8_t *)eh);	\
+								\
+	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
+		hw_classify = 1;				\
+		pkt_subtype = 0x01 <<				\
+			HTT_TX_CLASSIFY_BIT_S;			\
+	}							\
+								\
+	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
+		pkt_type = htt_pkt_type_ethernet;		\
+								\
+} while (0)
+
+/**
+ * nbuf private buffer routines
+ */
+
+/**
+ * __qdf_nbuf_peek_header() - return the header's addr & m_len
+ * @skb: Pointer to network buffer
+ * @addr: Pointer to store header's addr
+ * @m_len: network buffer length
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
+{
+	*addr = skb->data;
+	*len = skb->len;
+}
+
+/**
+ * typedef struct __qdf_nbuf_queue_t -  network buffer queue
+ * @head: Head pointer
+ * @tail: Tail pointer
+ * @qlen: Queue length
+ */
+typedef struct __qdf_nbuf_qhead {
+	struct sk_buff *head;
+	struct sk_buff *tail;
+	unsigned int qlen;
+} __qdf_nbuf_queue_t;
+
+/******************Functions *************/
+
+/**
+ * __qdf_nbuf_queue_init() - initiallize the queue head
+ * @qhead: Queue head
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS __qdf_nbuf_queue_init(__qdf_nbuf_queue_t *qhead)
+{
+	memset(qhead, 0, sizeof(struct __qdf_nbuf_qhead));
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * __qdf_nbuf_queue_add() - add an skb in the tail of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it
+ * needs to synchronize
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_queue_add(__qdf_nbuf_queue_t *qhead, struct sk_buff *skb)
+{
+	skb->next = NULL;       /*Nullify the next ptr */
+
+	if (!qhead->head)
+		qhead->head = skb;
+	else
+		qhead->tail->next = skb;
+
+	qhead->tail = skb;
+	qhead->qlen++;
+}
+
+/**
+ * __qdf_nbuf_queue_append() - Append src list at the end of dest list
+ * @dest: target netbuf queue
+ * @src:  source netbuf queue
+ *
+ * Return: target netbuf queue
+ */
+static inline __qdf_nbuf_queue_t *
+__qdf_nbuf_queue_append(__qdf_nbuf_queue_t *dest, __qdf_nbuf_queue_t *src)
+{
+	if (!dest)
+		return NULL;
+	else if (!src || !(src->head))
+		return dest;
+
+	if (!(dest->head))
+		dest->head = src->head;
+	else
+		dest->tail->next = src->head;
+
+	dest->tail = src->tail;
+	dest->qlen += src->qlen;
+	return dest;
+}
+
+/**
+ * __qdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
+ * @qhead: Queue head
+ * @skb: Pointer to network buffer
+ *
+ * This is a lockless version, driver must acquire locks if it needs to
+ * synchronize
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_queue_insert_head(__qdf_nbuf_queue_t *qhead, __qdf_nbuf_t skb)
+{
+	if (!qhead->head) {
+		/*Empty queue Tail pointer Must be updated */
+		qhead->tail = skb;
+	}
+	skb->next = qhead->head;
+	qhead->head = skb;
+	qhead->qlen++;
+}
+
+/**
+ * __qdf_nbuf_queue_remove() - remove a skb from the head of the queue
+ * @qhead: Queue head
+ *
+ * This is a lockless version. Driver should take care of the locks
+ *
+ * Return: skb or NULL
+ */
+static inline
+struct sk_buff *__qdf_nbuf_queue_remove(__qdf_nbuf_queue_t *qhead)
+{
+	__qdf_nbuf_t tmp = NULL;
+
+	if (qhead->head) {
+		qhead->qlen--;
+		tmp = qhead->head;
+		if (qhead->head == qhead->tail) {
+			qhead->head = NULL;
+			qhead->tail = NULL;
+		} else {
+			qhead->head = tmp->next;
+		}
+		tmp->next = NULL;
+	}
+	return tmp;
+}
+
+/**
+ * __qdf_nbuf_queue_free() - free a queue
+ * @qhead: head of queue
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS
+__qdf_nbuf_queue_free(__qdf_nbuf_queue_t *qhead)
+{
+	__qdf_nbuf_t  buf = NULL;
+
+	while ((buf = __qdf_nbuf_queue_remove(qhead)) != NULL)
+		__qdf_nbuf_free(buf);
+	return QDF_STATUS_SUCCESS;
+}
+
+
+/**
+ * __qdf_nbuf_queue_first() - returns the first skb in the queue
+ * @qhead: head of queue
+ *
+ * Return: NULL if the queue is empty
+ */
+static inline struct sk_buff *
+__qdf_nbuf_queue_first(__qdf_nbuf_queue_t *qhead)
+{
+	return qhead->head;
+}
+
+/**
+ * __qdf_nbuf_queue_len() - return the queue length
+ * @qhead: Queue head
+ *
+ * Return: Queue length
+ */
+static inline uint32_t __qdf_nbuf_queue_len(__qdf_nbuf_queue_t *qhead)
+{
+	return qhead->qlen;
+}
+
+/**
+ * __qdf_nbuf_queue_next() - return the next skb from packet chain
+ * @skb: Pointer to network buffer
+ *
+ * This API returns the next skb from packet chain, remember the skb is
+ * still in the queue
+ *
+ * Return: NULL if no packets are there
+ */
+static inline struct sk_buff *__qdf_nbuf_queue_next(struct sk_buff *skb)
+{
+	return skb->next;
+}
+
+/**
+ * __qdf_nbuf_is_queue_empty() - check if the queue is empty or not
+ * @qhead: Queue head
+ *
+ * Return: true if length is 0 else false
+ */
+static inline bool __qdf_nbuf_is_queue_empty(__qdf_nbuf_queue_t *qhead)
+{
+	return qhead->qlen == 0;
+}
+
+/*
+ * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+/*
+ * Use sk_buff_head as the implementation of qdf_nbuf_queue_t.
+ * Because the queue head will most likely put in some structure,
+ * we don't use pointer type as the definition.
+ */
+
+static inline void
+__qdf_nbuf_set_send_complete_flag(struct sk_buff *skb, bool flag)
+{
+	return;
+}
+
+/**
+ * __qdf_nbuf_realloc_headroom() - This keeps the skb shell intact
+ *        expands the headroom
+ *        in the data region. In case of failure the skb is released.
+ * @skb: sk buff
+ * @headroom: size of headroom
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_realloc_headroom(struct sk_buff *skb, uint32_t headroom)
+{
+	if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+	}
+	return skb;
+}
+
+/**
+ * __qdf_nbuf_realloc_tailroom() - This keeps the skb shell intact
+ *        exapnds the tailroom
+ *        in data region. In case of failure it releases the skb.
+ * @skb: sk buff
+ * @tailroom: size of tailroom
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_realloc_tailroom(struct sk_buff *skb, uint32_t tailroom)
+{
+	if (likely(!pskb_expand_head(skb, 0, tailroom, GFP_ATOMIC)))
+		return skb;
+	/**
+	 * unlikely path
+	 */
+	dev_kfree_skb_any(skb);
+	return NULL;
+}
+
+/**
+ * __qdf_nbuf_unshare() - skb unshare
+ * @skb: sk buff
+ *
+ * create a version of the specified nbuf whose contents
+ * can be safely modified without affecting other
+ * users.If the nbuf is a clone then this function
+ * creates a new copy of the data. If the buffer is not
+ * a clone the original buffer is returned.
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_unshare(struct sk_buff *skb)
+{
+	return skb_unshare(skb, GFP_ATOMIC);
+}
+
+/**
+ * __qdf_nbuf_is_cloned() - test whether the nbuf is cloned or not
+ *@buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_is_cloned(struct sk_buff *skb)
+{
+	return skb_cloned(skb);
+}
+
+/**
+ * __qdf_nbuf_pool_init() - init pool
+ * @net: net handle
+ *
+ * Return: QDF status
+ */
+static inline QDF_STATUS __qdf_nbuf_pool_init(qdf_net_handle_t net)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * adf_nbuf_pool_delete() implementation - do nothing in linux
+ */
+#define __qdf_nbuf_pool_delete(osdev)
+
+/**
+ * __qdf_nbuf_expand() - Expand both tailroom & headroom. In case of failure
+ *        release the skb.
+ * @skb: sk buff
+ * @headroom: size of headroom
+ * @tailroom: size of tailroom
+ *
+ * Return: skb or NULL
+ */
+static inline struct sk_buff *
+__qdf_nbuf_expand(struct sk_buff *skb, uint32_t headroom, uint32_t tailroom)
+{
+	if (likely(!pskb_expand_head(skb, headroom, tailroom, GFP_ATOMIC)))
+		return skb;
+
+	dev_kfree_skb_any(skb);
+	return NULL;
+}
+
+/**
+ * __qdf_nbuf_tx_cksum_info() - tx checksum info
+ *
+ * Return: true/false
+ */
+static inline bool
+__qdf_nbuf_tx_cksum_info(struct sk_buff *skb, uint8_t **hdr_off,
+			 uint8_t **where)
+{
+	qdf_assert(0);
+	return false;
+}
+
+/**
+ * __qdf_nbuf_reset_ctxt() - mem zero control block
+ * @nbuf: buffer
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_reset_ctxt(__qdf_nbuf_t nbuf)
+{
+	qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
+}
+
+/**
+ * __qdf_nbuf_network_header() - get network header
+ * @buf: buffer
+ *
+ * Return: network header pointer
+ */
+static inline void *__qdf_nbuf_network_header(__qdf_nbuf_t buf)
+{
+	return skb_network_header(buf);
+}
+
+/**
+ * __qdf_nbuf_transport_header() - get transport header
+ * @buf: buffer
+ *
+ * Return: transport header pointer
+ */
+static inline void *__qdf_nbuf_transport_header(__qdf_nbuf_t buf)
+{
+	return skb_transport_header(buf);
+}
+
+/**
+ *  __qdf_nbuf_tcp_tso_size() - return the size of TCP segment size (MSS),
+ *  passed as part of network buffer by network stack
+ * @skb: sk buff
+ *
+ * Return: TCP MSS size
+ * */
+static inline size_t __qdf_nbuf_tcp_tso_size(struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_size;
+}
+
+/**
+ * __qdf_nbuf_init() - Re-initializes the skb for re-use
+ * @nbuf: sk buff
+ *
+ * Return: none
+ */
+static inline void __qdf_nbuf_init(__qdf_nbuf_t nbuf)
+{
+	atomic_set(&nbuf->users, 1);
+	nbuf->data = nbuf->head + NET_SKB_PAD;
+	skb_reset_tail_pointer(nbuf);
+}
+
+/**
+ * __qdf_nbuf_set_rx_info() - set rx info
+ * @nbuf: sk buffer
+ * @info: rx info
+ * @len: length
+ *
+ * Return: none
+ */
+static inline void
+__qdf_nbuf_set_rx_info(__qdf_nbuf_t nbuf, void *info, uint32_t len)
+{
+	/* Customer may have skb->cb size increased, e.g. to 96 bytes,
+	 * then len's large enough to save the rs status info struct
+	 */
+	uint8_t offset = sizeof(struct qdf_nbuf_cb);
+	uint32_t max = sizeof(((struct sk_buff *)0)->cb)-offset;
+
+	len = (len > max ? max : len);
+
+	memcpy(((uint8_t *)(nbuf->cb) + offset), info, len);
+}
+
+/**
+ * __qdf_nbuf_get_rx_info() - get rx info
+ * @nbuf: sk buffer
+ *
+ * Return: rx_info
+ */
+static inline void *
+__qdf_nbuf_get_rx_info(__qdf_nbuf_t nbuf)
+{
+	uint8_t offset = sizeof(struct qdf_nbuf_cb);
+	return (void *)((uint8_t *)(nbuf->cb) + offset);
+}
+
+/*
+ *  __qdf_nbuf_get_cb() - returns a pointer to skb->cb
+ * @nbuf: sk buff
+ *
+ * Return: void ptr
+ */
+static inline void *
+__qdf_nbuf_get_cb(__qdf_nbuf_t nbuf)
+{
+	return (void *)nbuf->cb;
+}
+
+/**
+ * __qdf_nbuf_headlen() - return the length of linear buffer of the skb
+ * @skb: sk buff
+ *
+ * Return: head size
+ */
+static inline size_t
+__qdf_nbuf_headlen(struct sk_buff *skb)
+{
+	return skb_headlen(skb);
+}
+
+/**
+ * __qdf_nbuf_get_nr_frags() - return the number of fragments in an skb,
+ * @skb: sk buff
+ *
+ * Return: number of fragments
+ */
+static inline size_t __qdf_nbuf_get_nr_frags(struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->nr_frags;
+}
+
+/**
+ * __qdf_nbuf_tso_tcp_v4() - to check if the TSO TCP pkt is a IPv4 or not.
+ * @buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_tso_tcp_v4(struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 ? 1 : 0;
+}
+
+/**
+ * __qdf_nbuf_tso_tcp_v6() - to check if the TSO TCP pkt is a IPv6 or not.
+ * @buf: sk buff
+ *
+ * Return: true/false
+ */
+static inline bool __qdf_nbuf_tso_tcp_v6(struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6 ? 1 : 0;
+}
+
+/**
+ * __qdf_nbuf_l2l3l4_hdr_len() - return the l2+l3+l4 hdr lenght of the skb
+ * @skb: sk buff
+ *
+ * Return: size of l2+l3+l4 header length
+ */
+static inline size_t __qdf_nbuf_l2l3l4_hdr_len(struct sk_buff *skb)
+{
+	return skb_transport_offset(skb) + tcp_hdrlen(skb);
+}
+
+/**
+ * __qdf_nbuf_is_nonlinear() - test whether the nbuf is nonlinear or not
+ * @buf: sk buff
+ *
+ * Return:  true/false
+ */
+static inline bool __qdf_nbuf_is_nonlinear(struct sk_buff *skb)
+{
+	if (skb_is_nonlinear(skb))
+		return true;
+	else
+		return false;
+}
+
+/**
+ * __qdf_nbuf_tcp_seq() - get the TCP sequence number of the  skb
+ * @buf: sk buff
+ *
+ * Return: TCP sequence number
+ */
+static inline uint32_t __qdf_nbuf_tcp_seq(struct sk_buff *skb)
+{
+	return ntohl(tcp_hdr(skb)->seq);
+}
+
+/**
+ * __qdf_nbuf_get_priv_ptr() - get the priv pointer from the nbuf'f private space
+ *@buf: sk buff
+ *
+ * Return: data pointer to typecast into your priv structure
+ */
+static inline uint8_t *
+__qdf_nbuf_get_priv_ptr(struct sk_buff *skb)
+{
+	return &skb->cb[8];
+}
+
+#endif /*_I_QDF_NET_BUF_H */

+ 52 - 0
qdf/linux/src/i_qdf_net_types.h

@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_net_types
+ * This file provides OS dependent net types API's.
+ */
+
+#ifndef _I_QDF_NET_TYPES_H
+#define _I_QDF_NET_TYPES_H
+
+#include <qdf_types.h>          /* uint8_t, etc. */
+#include <asm/checksum.h>
+#include <net/ip6_checksum.h>
+
+typedef struct in6_addr __in6_addr_t;
+typedef __wsum __wsum_t;
+
+static inline  int32_t __qdf_csum_ipv6(const struct in6_addr *saddr,
+				       const struct in6_addr *daddr,
+				       __u32 len, unsigned short proto,
+				       __wsum sum)
+{
+	return csum_ipv6_magic((struct in6_addr *)saddr,
+			       (struct in6_addr *)daddr, len, proto, sum);
+}
+
+#endif /* _I_QDF_NET_TYPES_H */

+ 88 - 0
qdf/linux/src/i_qdf_perf.h

@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_perf
+ * This file provides OS dependent perf API's.
+ */
+
+#ifndef _I_QDF_PERF_H
+#define _I_QDF_PERF_H
+
+#ifdef QCA_PERF_PROFILING
+
+#if (QCA_MIPS74K_PERF_PROFILING || QCA_MIPS24KK_PERF_PROFILING)
+#include <qdf_mips_perf_pvt.h>
+#endif
+
+/* #defines required for structures */
+#define MAX_SAMPLES_SHIFT   5   /* change this only*/
+#define MAX_SAMPLES         (1 << MAX_SAMPLES_SHIFT)
+#define INC_SAMPLES(x)      ((x + 1) & (MAX_SAMPLES - 1))
+#define MAX_SAMPLE_SZ       (sizeof(uint32_t) * MAX_SAMPLES)
+#define PER_SAMPLE_SZ       sizeof(uint32_t)
+
+/**
+ * typedef qdf_perf_entry_t - performance entry
+ * @list: pointer to next
+ * @child: pointer tochild
+ * @parent: pointer to top
+ * @type: perf cntr
+ * @name: string
+ * @proc: pointer to proc entry
+ * @start_tsc: array at start tsc
+ * @end_tsc: array at ent tsc
+ * @samples: array of samples
+ * @sample_idx: sample index
+ * @lock_irq: lock irq
+ */
+typedef struct qdf_os_perf_entry {
+	struct list_head        list;
+	struct list_head        child;
+
+	struct qdf_perf_entry   *parent;
+
+	qdf_perf_cntr_t type;
+	uint8_t *name;
+
+	struct proc_dir_entry   *proc;
+
+	uint64_t        start_tsc[MAX_SAMPLES];
+	uint64_t        end_tsc[MAX_SAMPLES];
+
+	uint32_t        samples[MAX_SAMPLES];
+	uint32_t        sample_idx;
+
+	spinlock_t      lock_irq;
+
+} qdf_perf_entry_t;
+
+/* typedefs */
+typedef void *__qdf_perf_id_t;
+
+#endif /* QCA_PERF_PROFILING */
+#endif /* _I_QDF_PERF_H */

+ 68 - 48
qdf/src/i_qdf_time.h → qdf/linux/src/i_qdf_time.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,13 +26,12 @@
  */
 
 /**
- * DOC: i_cdf_time.h
- *
- * Linux specific CDF timing APIs implementation
+ * DOC: i_qdf_time
+ * This file provides OS dependent time API's.
  */
 
-#ifndef _I_CDF_TIME_H
-#define _I_CDF_TIME_H
+#ifndef _I_QDF_TIME_H
+#define _I_QDF_TIME_H
 
 #include <linux/jiffies.h>
 #include <linux/delay.h>
@@ -41,150 +40,146 @@
 #include <net/cnss.h>
 #endif
 
-typedef unsigned long __cdf_time_t;
+typedef unsigned long __qdf_time_t;
 
 /**
- * __cdf_system_ticks() - get system ticks
+ * __qdf_system_ticks() - get system ticks
  *
  * Return: system tick in jiffies
  */
-static inline __cdf_time_t __cdf_system_ticks(void)
+static inline __qdf_time_t __qdf_system_ticks(void)
 {
 	return jiffies;
 }
 
 /**
- * __cdf_system_ticks_to_msecs() - convert system ticks into milli seconds
+ * __qdf_system_ticks_to_msecs() - convert system ticks into milli seconds
  * @ticks: System ticks
  *
  * Return: system tick converted into milli seconds
  */
-static inline uint32_t __cdf_system_ticks_to_msecs(unsigned long ticks)
+static inline uint32_t __qdf_system_ticks_to_msecs(unsigned long ticks)
 {
 	return jiffies_to_msecs(ticks);
 }
 
 /**
- * __cdf_system_msecs_to_ticks() - convert milli seconds into system ticks
+ * __qdf_system_msecs_to_ticks() - convert milli seconds into system ticks
  * @msecs: Milli seconds
  *
  * Return: milli seconds converted into system ticks
  */
-static inline __cdf_time_t __cdf_system_msecs_to_ticks(uint32_t msecs)
+static inline __qdf_time_t __qdf_system_msecs_to_ticks(uint32_t msecs)
 {
 	return msecs_to_jiffies(msecs);
 }
 
 /**
- * __cdf_get_system_uptime() - get system uptime
+ * __qdf_get_system_uptime() - get system uptime
  *
  * Return: system uptime in jiffies
  */
-static inline __cdf_time_t __cdf_get_system_uptime(void)
+static inline __qdf_time_t __qdf_get_system_uptime(void)
 {
 	return jiffies;
 }
 
-static inline __cdf_time_t __cdf_get_system_timestamp(void)
+static inline __qdf_time_t __qdf_get_system_timestamp(void)
 {
 	return (jiffies / HZ) * 1000 + (jiffies % HZ) * (1000 / HZ);
 }
 
+#ifdef CONFIG_ARM
 /**
- * __cdf_udelay() - delay execution for given microseconds
+ * __qdf_udelay() - delay execution for given microseconds
  * @usecs: Micro seconds to delay
  *
  * Return: none
  */
-static inline void __cdf_udelay(uint32_t usecs)
+static inline void __qdf_udelay(uint32_t usecs)
 {
-#ifdef CONFIG_ARM
 	/*
 	 * This is in support of XScale build.  They have a limit on the udelay
 	 * value, so we have to make sure we don't approach the limit
 	 */
-
 	uint32_t mticks;
 	uint32_t leftover;
 	int i;
-
 	/* slice into 1024 usec chunks (simplifies calculation) */
-
 	mticks = usecs >> 10;
 	leftover = usecs - (mticks << 10);
-
 	for (i = 0; i < mticks; i++)
 		udelay(1024);
-
 	udelay(leftover);
-
+}
 #else
+static inline void __qdf_udelay(uint32_t usecs)
+{
 	/* Normal Delay functions. Time specified in microseconds */
 	udelay(usecs);
-
-#endif
 }
+#endif
 
 /**
- * __cdf_mdelay() - delay execution for given milli seconds
- * @usecs: Milli seconds to delay
+ * __qdf_mdelay() - delay execution for given milliseconds
+ * @usecs: Milliseconds to delay
  *
  * Return: none
  */
-static inline void __cdf_mdelay(uint32_t msecs)
+static inline void __qdf_mdelay(uint32_t msecs)
 {
 	mdelay(msecs);
 }
 
 /**
- * __cdf_system_time_after() - Check if a is later than b
+ * __qdf_system_time_after() - Check if a is later than b
  * @a: Time stamp value a
  * @b: Time stamp value b
  *
  * Return:
- *	true if a < b else false
+ * true if a < b else false
  */
-static inline bool __cdf_system_time_after(__cdf_time_t a, __cdf_time_t b)
+static inline bool __qdf_system_time_after(__qdf_time_t a, __qdf_time_t b)
 {
 	return (long)(b) - (long)(a) < 0;
 }
 
 /**
- * __cdf_system_time_before() - Check if a is before b
+ * __qdf_system_time_before() - Check if a is before b
  * @a: Time stamp value a
  * @b: Time stamp value b
  *
  * Return:
- *	true if a is before b else false
+ * true if a is before b else false
  */
-static inline bool __cdf_system_time_before(__cdf_time_t a, __cdf_time_t b)
+static inline bool __qdf_system_time_before(__qdf_time_t a, __qdf_time_t b)
 {
-	return __cdf_system_time_after(b, a);
+	return __qdf_system_time_after(b, a);
 }
 
 /**
- * __cdf_system_time_before() - Check if a atleast as recent as b, if not
- *				later
+ * __qdf_system_time_after_eq() - Check if a atleast as recent as b, if not
+ * later
  * @a: Time stamp value a
  * @b: Time stamp value b
  *
  * Return:
- *	true if a >= b else false
+ * true if a >= b else false
  */
-static inline bool __cdf_system_time_after_eq(__cdf_time_t a, __cdf_time_t b)
+static inline bool __qdf_system_time_after_eq(__qdf_time_t a, __qdf_time_t b)
 {
 	return (long)(a) - (long)(b) >= 0;
 }
 
 /**
- * __cdf_get_monotonic_boottime() - get monotonic kernel boot time
- * This API is similar to cdf_get_system_boottime but it includes
+ * __qdf_get_monotonic_boottime() - get monotonic kernel boot time
+ * This API is similar to qdf_get_system_boottime but it includes
  * time spent in suspend.
  *
  * Return: Time in microseconds
  */
 #ifdef CONFIG_CNSS
-static inline uint64_t __cdf_get_monotonic_boottime(void)
+static inline uint64_t __qdf_get_monotonic_boottime(void)
 {
 	struct timespec ts;
 
@@ -193,25 +188,50 @@ static inline uint64_t __cdf_get_monotonic_boottime(void)
 	return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
 }
 #else
-static inline uint64_t __cdf_get_monotonic_boottime(void)
+static inline uint64_t __qdf_get_monotonic_boottime(void)
 {
-	return __cdf_system_ticks_to_msecs(__cdf_system_ticks()) * 1000;
+	return __qdf_system_ticks_to_msecs(__qdf_system_ticks()) * 1000;
 }
 #endif /* CONFIG_CNSS */
 
 #ifdef QCA_WIFI_3_0_ADRASTEA
+
 /**
- * __cdf_get_qtimer_ticks() - get QTIMER ticks
+ * __qdf_get_log_timestamp() - get QTIMER ticks
  *
  * Returns QTIMER(19.2 MHz) clock ticks. To convert it into seconds
  * divide it by 19200.
  *
  * Return: QTIMER(19.2 MHz) clock ticks
  */
-static inline uint64_t __cdf_get_qtimer_ticks(void)
+static inline uint64_t __qdf_get_log_timestamp(void)
 {
 	return arch_counter_get_cntpct();
 }
+#else
+
+/**
+ * __qdf_get_log_timestamp - get time stamp for logging
+ * For adrastea this API returns QTIMER tick which is needed to synchronize
+ * host and fw log timestamps
+ * For ROME and other discrete solution this API returns system boot time stamp
+ *
+ * Return:
+ * QTIMER ticks(19.2MHz) for adrastea
+ * System tick for rome and other future discrete solutions
+ */
+static inline uint64_t __qdf_get_log_timestamp(void)
+{
+#ifdef CONFIG_CNSS
+	struct timespec ts;
+
+	cnss_get_boottime(&ts);
+
+	return ((uint64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000);
+#else
+	return __qdf_system_ticks_to_msecs(__qdf_system_ticks()) * 1000;
+#endif /* CONFIG_CNSS */
+}
 #endif /* QCA_WIFI_3_0_ADRASTEA */
 
 #endif

+ 43 - 41
qdf/src/i_qdf_softirq_timer.h → qdf/linux/src/i_qdf_timer.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,92 +25,94 @@
  * to the Linux Foundation.
  */
 
-#ifndef _I_CDF_SOFTIRQ_TIMER_H
-#define _I_CDF_SOFTIRQ_TIMER_H
+/**
+ * DOC: i_qdf_timer
+ * This file provides OS dependent timer API's.
+ */
+
+#ifndef _I_QDF_TIMER_H
+#define _I_QDF_TIMER_H
 
 #include <linux/version.h>
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/jiffies.h>
-#include <cdf_types.h>
+#include <qdf_types.h>
 
 /* timer data type */
-typedef struct timer_list __cdf_softirq_timer_t;
-
-/* ugly - but every other OS takes, sanely, a void */
+typedef struct timer_list __qdf_timer_t;
 
-typedef void (*cdf_dummy_timer_func_t)(unsigned long arg);
+typedef void (*qdf_dummy_timer_func_t)(unsigned long arg);
 
 /**
- * __cdf_softirq_timer_init() - initialize a softirq timer
+ * __qdf_timer_init() - initialize a softirq timer
  * @hdl: OS handle
  * @timer: Pointer to timer object
  * @func: Function pointer
  * @arg: Arguement
  * @type: deferrable or non deferrable timer type
  *
- * Timer type CDF_TIMER_TYPE_SW means its a deferrable sw timer which will
+ * Timer type QDF_TIMER_TYPE_SW means its a deferrable sw timer which will
  * not cause CPU wake upon expiry
- * Timer type CDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
+ * Timer type QDF_TIMER_TYPE_WAKE_APPS means its a non-deferrable timer which
  * will cause CPU wake up on expiry
  *
- * Return: none
+ * Return: QDF_STATUS
  */
-static inline CDF_STATUS
-__cdf_softirq_timer_init(cdf_handle_t hdl,
-			 struct timer_list *timer,
-			 cdf_softirq_timer_func_t func, void *arg,
-			 CDF_TIMER_TYPE type)
+static inline QDF_STATUS __qdf_timer_init(qdf_handle_t hdl,
+					  struct timer_list *timer,
+					  qdf_timer_func_t func, void *arg,
+					  QDF_TIMER_TYPE type)
 {
-	if (CDF_TIMER_TYPE_SW == type)
+	if (QDF_TIMER_TYPE_SW == type)
 		init_timer_deferrable(timer);
 	else
 		init_timer(timer);
-	timer->function = (cdf_dummy_timer_func_t) func;
+	timer->function = (qdf_dummy_timer_func_t) func;
 	timer->data = (unsigned long)arg;
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * __cdf_softirq_timer_start() - start a cdf softirq timer
+ * __qdf_timer_start() - start a qdf softirq timer
  * @timer: Pointer to timer object
  * @delay: Delay in milli seconds
  *
- * Return: none
+ * Return: QDF_STATUS
  */
-static inline CDF_STATUS
-__cdf_softirq_timer_start(struct timer_list *timer, uint32_t delay)
+static inline QDF_STATUS __qdf_timer_start(struct timer_list *timer,
+					   uint32_t delay)
 {
 	timer->expires = jiffies + msecs_to_jiffies(delay);
 	add_timer(timer);
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * __cdf_softirq_timer_mod() - modify a timer
+ * __qdf_timer_mod() - modify a timer
  * @timer: Pointer to timer object
  * @delay: Delay in milli seconds
  *
- * Return: none
+ * Return: QDF_STATUS
  */
-static inline CDF_STATUS
-__cdf_softirq_timer_mod(struct timer_list *timer, uint32_t delay)
+static inline QDF_STATUS __qdf_timer_mod(struct timer_list *timer,
+					 uint32_t delay)
 {
 	mod_timer(timer, jiffies + msecs_to_jiffies(delay));
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * __cdf_softirq_timer_cancel() - cancel a timer
+ * __qdf_timer_stop() - cancel a timer
  * @timer: Pointer to timer object
  *
  * Return: true if timer was cancelled and deactived,
- *	false if timer was cancelled but already got fired.
+ * false if timer was cancelled but already got fired.
  */
-static inline bool __cdf_softirq_timer_cancel(struct timer_list *timer)
+static inline bool __qdf_timer_stop(struct timer_list *timer)
 {
 	if (likely(del_timer(timer)))
 		return 1;
@@ -119,34 +121,34 @@ static inline bool __cdf_softirq_timer_cancel(struct timer_list *timer)
 }
 
 /**
- * __cdf_softirq_timer_free() - free a cdf timer
+ * __qdf_timer_free() - free a qdf timer
  * @timer: Pointer to timer object
  *
  * Return: true if timer was cancelled and deactived,
- *	false if timer was cancelled but already got fired.
+ * false if timer was cancelled but already got fired.
  */
-static inline void __cdf_softirq_timer_free(struct timer_list *timer)
+static inline void __qdf_timer_free(struct timer_list *timer)
 {
 	del_timer_sync(timer);
 }
 
 /**
- * __cdf_sostirq_timer_sync_cancel() - Synchronously canel a timer
+ * __qdf_sostirq_timer_sync_cancel() - Synchronously canel a timer
  * @timer: Pointer to timer object
  *
  * Synchronization Rules:
  * 1. caller must make sure timer function will not use
- *    cdf_softirq_set_timer to add iteself again.
+ *    qdf_set_timer to add iteself again.
  * 2. caller must not hold any lock that timer function
  *    is likely to hold as well.
  * 3. It can't be called from interrupt context.
  *
  * Return: true if timer was cancelled and deactived,
- *	false if timer was cancelled but already got fired.
+ * false if timer was cancelled but already got fired.
  */
-static inline bool __cdf_sostirq_timer_sync_cancel(struct timer_list *timer)
+static inline bool __qdf_timer_sync_cancel(struct timer_list *timer)
 {
 	return del_timer_sync(timer);
 }
 
-#endif /*_ADF_OS_TIMER_PVT_H*/
+#endif /*_QDF_TIMER_PVT_H*/

+ 94 - 0
qdf/linux/src/i_qdf_trace.h

@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_trace.h
+ *
+ * Linux-specific definitions for QDF trace
+ *
+ */
+
+#if !defined(__I_QDF_TRACE_H)
+#define __I_QDF_TRACE_H
+
+#if !defined(__printf)
+#define __printf(a, b)
+#endif
+
+/* Include Files */
+#include <cds_packet.h>
+
+#define QDF_ENABLE_TRACING
+
+#ifdef QDF_ENABLE_TRACING
+
+#define QDF_ASSERT(_condition) \
+	do { \
+		if (!(_condition)) { \
+			pr_err("QDF ASSERT in %s Line %d\n", \
+			       __func__, __LINE__); \
+			WARN_ON(1); \
+		} \
+	} while (0)
+
+#else
+
+/* This code will be used for compilation if tracing is to be compiled out */
+/* of the code so these functions/macros are 'do nothing' */
+static inline void qdf_trace_msg(QDF_MODULE_ID module, ...)
+{
+}
+
+#define QDF_ASSERT(_condition)
+
+#endif
+
+#ifdef PANIC_ON_BUG
+
+#define QDF_BUG(_condition) \
+	do { \
+		if (!(_condition)) { \
+			pr_err("QDF BUG in %s Line %d\n", \
+			       __func__, __LINE__); \
+			BUG_ON(1); \
+		} \
+	} while (0)
+
+#else
+
+#define QDF_BUG(_condition) \
+	do { \
+		if (!(_condition)) { \
+			pr_err("QDF BUG in %s Line %d\n", \
+			       __func__, __LINE__); \
+			WARN_ON(1); \
+		} \
+	} while (0)
+
+#endif
+
+#endif /* __I_QDF_TRACE_H */

+ 292 - 0
qdf/linux/src/i_qdf_types.h

@@ -0,0 +1,292 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_types.h
+ * This file provides OS dependent types API's.
+ */
+
+#if !defined(__I_QDF_TYPES_H)
+#define __I_QDF_TYPES_H
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/version.h>
+#include <asm/div64.h>
+#include <qdf_status.h>
+
+#ifndef __KERNEL__
+#define __iomem
+#endif
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/version.h>
+
+#ifdef __KERNEL__
+#include <generated/autoconf.h>
+#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
+#include <linux/wireless.h>
+#include <linux/if.h>
+#else
+
+/*
+ * Hack - coexist with prior defs of dma_addr_t.
+ * Eventually all other defs of dma_addr_t should be removed.
+ * At that point, the "already_defined" wrapper can be removed.
+ */
+#ifndef __dma_addr_t_already_defined__
+#define __dma_addr_t_already_defined__
+typedef unsigned long dma_addr_t;
+#endif
+
+#ifndef __ahdecl
+#ifdef __i386__
+#define __ahdecl   __attribute__((regparm(0)))
+#else
+#define __ahdecl
+#endif
+#endif
+
+#define SIOCGIWAP       0
+#define IWEVCUSTOM      0
+#define IWEVREGISTERED  0
+#define IWEVEXPIRED     0
+#define SIOCGIWSCAN     0
+#define DMA_TO_DEVICE   0
+#define DMA_FROM_DEVICE 0
+#define __iomem
+#endif /* __KERNEL__ */
+
+/*
+ * max sg that we support
+ */
+#define __QDF_MAX_SCATTER        1
+
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+#define QDF_LITTLE_ENDIAN_MACHINE
+#elif defined(__BIG_ENDIAN_BITFIELD)
+#define QDF_BIG_ENDIAN_MACHINE
+#else
+#error  "Please fix <asm/byteorder.h>"
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) || !defined(__KERNEL__)
+#ifndef __bool_already_defined__
+#define __bool_already_defined__
+
+/**
+ * bool - This is an enum for boolean
+ * @false: zero
+ * @true: one
+ */
+typedef enum bool {
+	false = 0,
+	true  = 1,
+} bool;
+#endif /* __bool_already_defined__ */
+#endif
+
+#define __qdf_packed    __attribute__((packed))
+
+typedef int (*__qdf_os_intr)(void *);
+/**
+ * Private definitions of general data types
+ */
+typedef dma_addr_t __qdf_dma_addr_t;
+typedef size_t __qdf_dma_size_t;
+typedef dma_addr_t __qdf_dma_context_t;
+
+#define qdf_dma_mem_context(context) dma_addr_t context
+#define qdf_get_dma_mem_context(var, field)   ((qdf_dma_context_t)(var->field))
+
+/**
+ * typedef struct __qdf_resource_t - qdf resource type
+ * @paddr: Physical address
+ * @paddr: Virtual address
+ * @len: Length
+ */
+typedef struct __qdf_resource {
+	unsigned long paddr;
+	void __iomem *vaddr;
+	unsigned long len;
+} __qdf_resource_t;
+
+struct __qdf_mempool_ctxt;
+
+#define MAX_MEM_POOLS 64
+
+/**
+ * struct __qdf_device - generic qdf device type
+ * @drv: Pointer to driver
+ * @drv_hdl: Pointer to driver handle
+ * @drv_name: Pointer to driver name
+ * @irq: IRQ
+ * @dev: Pointer to device
+ * @res: QDF resource
+ * @func: Interrupt handler
+ * @mem_pool: array to pointer to mem context
+ */
+struct __qdf_device {
+	void *drv;
+	void *drv_hdl;
+	char *drv_name;
+	int irq;
+	struct device *dev;
+	__qdf_resource_t res;
+	__qdf_os_intr func;
+	struct __qdf_mempool_ctxt *mem_pool[MAX_MEM_POOLS];
+};
+typedef struct __qdf_device *__qdf_device_t;
+
+typedef size_t __qdf_size_t;
+typedef off_t __qdf_off_t;
+typedef uint8_t __iomem *__qdf_iomem_t;
+
+typedef uint32_t ath_dma_addr_t;
+
+/**
+ * typedef __qdf_segment_t - segment of memory
+ * @daddr: dma address
+ * @len: lenght of segment
+ */
+typedef struct __qdf_segment {
+	dma_addr_t  daddr;
+	uint32_t    len;
+} __qdf_segment_t;
+
+/**
+ * __qdf_dma_map - dma map of memory
+ * @mapped: mapped address
+ * @nsegs: number of segments
+ * @coherent: coherency status
+ * @seg: segment of memory
+ */
+struct __qdf_dma_map {
+	uint32_t                mapped;
+	uint32_t                nsegs;
+	uint32_t                coherent;
+	__qdf_segment_t      seg[__QDF_MAX_SCATTER];
+};
+typedef struct  __qdf_dma_map  *__qdf_dma_map_t;
+typedef uint32_t  ath_dma_addr_t;
+
+/**
+ * __qdf_net_wireless_evcode - enum for event code
+ * @__QDF_IEEE80211_ASSOC: association event code
+ * @__QDF_IEEE80211_REASSOC: reassociation event code
+ * @__QDF_IEEE80211_DISASSOC: disassociation event code
+ * @__QDF_IEEE80211_JOIN: join event code
+ * @__QDF_IEEE80211_LEAVE: leave event code
+ * @__QDF_IEEE80211_SCAN: scan event code
+ * @__QDF_IEEE80211_REPLAY: replay event code
+ * @__QDF_IEEE80211_MICHAEL:michael event code
+ * @__QDF_IEEE80211_REJOIN: rejoin event code
+ * @__QDF_CUSTOM_PUSH_BUTTON: push button event code
+ */
+enum __qdf_net_wireless_evcode {
+	__QDF_IEEE80211_ASSOC = SIOCGIWAP,
+	__QDF_IEEE80211_REASSOC = IWEVCUSTOM,
+	__QDF_IEEE80211_DISASSOC = SIOCGIWAP,
+	__QDF_IEEE80211_JOIN = IWEVREGISTERED,
+	__QDF_IEEE80211_LEAVE = IWEVEXPIRED,
+	__QDF_IEEE80211_SCAN = SIOCGIWSCAN,
+	__QDF_IEEE80211_REPLAY = IWEVCUSTOM,
+	__QDF_IEEE80211_MICHAEL = IWEVCUSTOM,
+	__QDF_IEEE80211_REJOIN = IWEVCUSTOM,
+	__QDF_CUSTOM_PUSH_BUTTON = IWEVCUSTOM,
+};
+
+#define __qdf_print               printk
+#define __qdf_vprint              vprintk
+#define __qdf_snprint             snprintf
+#define __qdf_vsnprint            vsnprintf
+
+#define __QDF_DMA_BIDIRECTIONAL  DMA_BIDIRECTIONAL
+#define __QDF_DMA_TO_DEVICE      DMA_TO_DEVICE
+#define __QDF_DMA_FROM_DEVICE    DMA_FROM_DEVICE
+#define __qdf_inline             inline
+
+/*
+ * 1. GNU C/C++ Compiler
+ *
+ * How to detect gcc : __GNUC__
+ * How to detect gcc version :
+ *   major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x)
+ *   minor version : __GNUC_MINOR__
+ *
+ * 2. Microsoft C/C++ Compiler
+ *
+ * How to detect msc : _MSC_VER
+ * How to detect msc version :
+ *   _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...)
+ *
+ */
+
+/*
+ * MACROs to help with compiler and OS specifics. May need to get a little
+ * more sophisticated than this and define these to specific 'VERSIONS' of
+ * the compiler and OS.  Until we have a need for that, lets go with this
+ */
+#if defined(_MSC_VER)
+
+#define QDF_COMPILER_MSC
+/* assuming that if we build with MSC, OS is WinMobile */
+#define QDF_OS_WINMOBILE
+
+#elif defined(__GNUC__)
+
+#define QDF_COMPILER_GNUC
+#define QDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */
+
+#endif
+
+#if defined(QDF_COMPILER_MSC)
+
+
+/*
+ * Does nothing on Windows.  packing individual structs is not
+ * supported on the Windows compiler
+ */
+#define QDF_PACK_STRUCT_1
+#define QDF_PACK_STRUCT_2
+#define QDF_PACK_STRUCT_4
+#define QDF_PACK_STRUCT_8
+#define QDF_PACK_STRUCT_16
+
+#elif defined(QDF_COMPILER_GNUC)
+
+#else
+#error "Compiling with an unknown compiler!!"
+#endif
+
+#endif /* __I_QDF_TYPES_H */

+ 239 - 0
qdf/linux/src/i_qdf_util.h

@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: i_qdf_util.h
+ * This file provides OS dependent API's.
+ */
+
+#ifndef _I_QDF_UTIL_H
+#define _I_QDF_UTIL_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <errno.h>
+
+#include <linux/random.h>
+
+#include <qdf_types.h>
+#include <qdf_status.h>
+#include <asm/byteorder.h>
+
+#if LINUX_VERSION_CODE  <= KERNEL_VERSION(3, 3, 8)
+#include <asm/system.h>
+#else
+#if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
+#include <asm/dec/system.h>
+#else
+#endif
+#endif
+
+#include <qdf_types.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#ifdef QCA_PARTNER_PLATFORM
+#include "ath_carr_pltfrm.h"
+#else
+#include <linux/byteorder/generic.h>
+#endif
+
+/*
+ * Generic compiler-dependent macros if defined by the OS
+ */
+#define __qdf_unlikely(_expr)   unlikely(_expr)
+#define __qdf_likely(_expr)     likely(_expr)
+
+/**
+ * __qdf_status_to_os_return() - translates qdf_status types to linux return types
+ * @status: status to translate
+ *
+ * Translates error types that linux may want to handle specially.
+ *
+ * return: 0 or the linux error code that most closely matches the QDF_STATUS.
+ * defaults to -1 (EPERM)
+ */
+static inline int __qdf_status_to_os_return(QDF_STATUS status)
+{
+	switch (status) {
+	case QDF_STATUS_SUCCESS:
+		return 0;
+	case QDF_STATUS_E_RESOURCES:
+		return -EBUSY;
+	case QDF_STATUS_E_NOMEM:
+		return -ENOMEM;
+	case QDF_STATUS_E_AGAIN:
+		return -EAGAIN;
+	case QDF_STATUS_E_INVAL:
+		return -EINVAL;
+	case QDF_STATUS_E_FAULT:
+		return -EFAULT;
+	case QDF_STATUS_E_ALREADY:
+		return -EALREADY;
+	case QDF_STATUS_E_BADMSG:
+		return -EBADMSG;
+	case QDF_STATUS_E_BUSY:
+		return -EBUSY;
+	case QDF_STATUS_E_CANCELED:
+		return -ECANCELED;
+	case QDF_STATUS_E_ABORTED:
+		return -ECONNABORTED;
+	case QDF_STATUS_E_PERM:
+		return -EPERM;
+	case QDF_STATUS_E_EXISTS:
+		return -EEXIST;
+	case QDF_STATUS_E_NOENT:
+		return -ENOENT;
+	case QDF_STATUS_E_E2BIG:
+		return -E2BIG;
+	case QDF_STATUS_E_NOSPC:
+		return -ENOSPC;
+	case QDF_STATUS_E_ADDRNOTAVAIL:
+		return -EADDRNOTAVAIL;
+	case QDF_STATUS_E_ENXIO:
+		return -ENXIO;
+	case QDF_STATUS_E_NETDOWN:
+		return -ENETDOWN;
+	case QDF_STATUS_E_IO:
+		return -EIO;
+	case QDF_STATUS_E_NETRESET:
+		return -ENETRESET;
+	default:
+		return -EPERM;
+	}
+}
+
+
+/**
+ * __qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast'
+ * @mac_addr: pointer to the qdf MacAddress to set to broadcast
+ *
+ * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast
+ * MacAddress contains all 0xFF bytes.
+ *
+ * Return: none
+ */
+static inline void __qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr)
+{
+	memset(mac_addr, 0xff, QDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * __qdf_zero_macaddr() - zero out a MacAddress
+ * @mac_addr: pointer to the struct qdf_mac_addr to zero.
+ *
+ * This function zeros out a QDF MacAddress type.
+ *
+ * Return: none
+ */
+static inline void __qdf_zero_macaddr(struct qdf_mac_addr *mac_addr)
+{
+	memset(mac_addr, 0, QDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * __qdf_is_macaddr_equal() - compare two QDF MacAddress
+ * @mac_addr1: Pointer to one qdf MacAddress to compare
+ * @mac_addr2: Pointer to the other qdf MacAddress to compare
+ *
+ * This function returns a bool that tells if a two QDF MacAddress'
+ * are equivalent.
+ *
+ * Return: true if the MacAddress's are equal
+ *      not true if the MacAddress's are not equal
+ */
+static inline bool __qdf_is_macaddr_equal(struct qdf_mac_addr *mac_addr1,
+					  struct qdf_mac_addr *mac_addr2)
+{
+	return 0 == memcmp(mac_addr1, mac_addr2, QDF_MAC_ADDR_SIZE);
+}
+
+/**
+ * qdf_in_interrupt - returns true if in interrupt context
+ */
+#define qdf_in_interrupt          in_interrupt
+
+/**
+ * @brief memory barriers.
+ */
+#define __qdf_min(_a, _b)         ((_a) < (_b) ? _a : _b)
+#define __qdf_max(_a, _b)         ((_a) > (_b) ? _a : _b)
+
+/**
+ * @brief Assert
+ */
+#define __qdf_assert(expr)  do { \
+		if (unlikely(!(expr))) { \
+			pr_err("Assertion failed! %s:%s %s:%d\n", \
+			       # expr, __func__, __FILE__, __LINE__); \
+			dump_stack(); \
+			BUG_ON(1); \
+		} \
+} while (0)
+
+/**
+ * @brief Assert
+ */
+#define __qdf_target_assert(expr)  do {    \
+	if (unlikely(!(expr))) {                                 \
+		qdf_print("Assertion failed! %s:%s %s:%d\n",   \
+		#expr, __FUNCTION__, __FILE__, __LINE__);      \
+		dump_stack();                                      \
+		panic("Take care of the TARGET ASSERT first\n");          \
+	}     \
+} while (0)
+
+#define __qdf_cpu_to_le64                cpu_to_le64
+#define __qdf_container_of(ptr, type, member) container_of(ptr, type, member)
+
+#define __qdf_ntohs                      ntohs
+#define __qdf_ntohl                      ntohl
+
+#define __qdf_htons                      htons
+#define __qdf_htonl                      htonl
+
+#define __qdf_cpu_to_le16                cpu_to_le16
+#define __qdf_cpu_to_le32                cpu_to_le32
+#define __qdf_cpu_to_le64                cpu_to_le64
+
+#define __qdf_le16_to_cpu                le16_to_cpu
+#define __qdf_le32_to_cpu                le32_to_cpu
+
+#define __qdf_be32_to_cpu                be32_to_cpu
+#define __qdf_be64_to_cpu                be64_to_cpu
+#define __qdf_le64_to_cpu                le64_to_cpu
+#define __qdf_le16_to_cpu                le16_to_cpu
+
+/**
+ * @brief memory barriers.
+ */
+#define __qdf_wmb()                wmb()
+#define __qdf_rmb()                rmb()
+#define __qdf_mb()                 mb()
+
+#endif /*_I_QDF_UTIL_H*/

+ 84 - 0
qdf/linux/src/qdf_defer.c

@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_defer.c
+ * This file provides OS dependent deferred API's.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+
+#include "i_qdf_defer.h"
+
+/**
+ * __qdf_defer_func() - defer work handler
+ * @work: Pointer to defer work
+ *
+ * Return: none
+ */
+void __qdf_defer_func(struct work_struct *work)
+{
+	__qdf_work_t *ctx = container_of(work, __qdf_work_t, work);
+	if (ctx->fn == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "No callback registered !!");
+		return;
+	}
+	ctx->fn(ctx->arg);
+}
+EXPORT_SYMBOL(__qdf_defer_func);
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 19)
+/**
+ * __qdf_defer_delayed_func() - defer work handler
+ * @dwork: Pointer to defer work
+ *
+ * Return: none
+ */
+void
+__qdf_defer_delayed_func(struct work_struct *dwork)
+{
+	return;
+}
+#else
+void
+__qdf_defer_delayed_func(struct work_struct *dwork)
+{
+	__qdf_delayed_work_t  *ctx = container_of(dwork, __qdf_delayed_work_t,
+		 dwork.work);
+	if (ctx->fn == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "BugCheck: Callback is not initilized while creating delayed work queue");
+		return;
+	}
+	ctx->fn(ctx->arg);
+}
+#endif
+EXPORT_SYMBOL(__qdf_defer_delayed_func);

+ 82 - 92
qdf/src/qdf_event.c → qdf/linux/src/qdf_event.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,231 +26,220 @@
  */
 
 /**
- * DOC: cdf_event.c
+ * DOC: qdf_event.c
  *
- * This source file contains linux specific definitions for CDF event APIs
+ * This source file contains linux specific definitions for QDF event APIs
  * The APIs mentioned in this file are used for initializing, setting,
  * resetting, destroying an event and waiting on an occurance of an event
  * among multiple events.
  */
 
 /* Include Files */
-#include "cdf_event.h"
-#include "cdf_trace.h"
-
-/* Preprocessor Definitions and Constants */
-
-/* Type Declarations */
-
-/* Global Data Definitions */
-
-/* Static Variable Definitions */
+#include "qdf_event.h"
 
 /* Function Definitions and Documentation */
 
 /**
- * cdf_event_init() - initializes a CDF event
+ * qdf_event_create() - initializes a QDF event
  * @event: Pointer to the opaque event object to initialize
  *
- * The cdf_event_init() function initializes the specified event. Upon
+ * The qdf_event_create() function initializes the specified event. Upon
  * successful initialization, the state of the event becomes initialized
- * and not signaled.
+ * and not signalled.
  *
  * An event must be initialized before it may be used in any other event
  * functions.
- *
  * Attempting to initialize an already initialized event results in
  * a failure.
  *
- * Return: CDF status
+ * Return: QDF status
  */
-CDF_STATUS cdf_event_init(cdf_event_t *event)
+QDF_STATUS qdf_event_create(qdf_event_t *event)
 {
 	/* check for null pointer */
 	if (NULL == event) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "NULL event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
 	}
 
 	/* check for 'already initialized' event */
 	if (LINUX_EVENT_COOKIE == event->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "Initialized event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_BUSY;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_BUSY;
 	}
 
 	/* initialize new event */
 	init_completion(&event->complete);
 	event->cookie = LINUX_EVENT_COOKIE;
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(qdf_event_create);
 
 /**
- * cdf_event_set() - sets a CDF event
+ * qdf_event_set() - sets a QDF event
  * @event: The event to set to the signalled state
  *
  * The state of the specified event is set to signalled by calling
- * cdf_event_set().
+ * qdf_event_set().
  *
- * Any threads waiting on the event as a result of a cdf_event_wait() will
+ * Any threads waiting on the event as a result of a qdf_event_wait() will
  * be unblocked and available to be scheduled for execution when the event
- * is signaled by a call to cdf_event_set().
+ * is signaled by a call to qdf_event_set().
  *
- *
- * Return: CDF status
+ * Return: QDF status
  */
-
-CDF_STATUS cdf_event_set(cdf_event_t *event)
+QDF_STATUS qdf_event_set(qdf_event_t *event)
 {
 	/* check for null pointer */
 	if (NULL == event) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "NULL event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
 	}
 
 	/* check if event refers to an initialized object */
 	if (LINUX_EVENT_COOKIE != event->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "Uninitialized event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
 	}
 
 	complete(&event->complete);
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(qdf_event_set);
 
 /**
- * cdf_event_reset() - resets a CDF event
+ * qdf_event_reset() - resets a QDF event
  * @event: The event to set to the NOT signalled state
  *
  * This function isn't required for Linux. Therefore, it doesn't do much.
  *
  * The state of the specified event is set to 'NOT signalled' by calling
- * cdf_event_reset().  The state of the event remains NOT signalled until an
- * explicit call to cdf_event_set().
+ * qdf_event_reset().  The state of the event remains NOT signalled until an
+ * explicit call to qdf_event_set().
  *
  * This function sets the event to a NOT signalled state even if the event was
  * signalled multiple times before being signaled.
  *
- *
- * Return: CDF status
+ * Return: QDF status
  */
-CDF_STATUS cdf_event_reset(cdf_event_t *event)
+QDF_STATUS qdf_event_reset(qdf_event_t *event)
 {
 	/* check for null pointer */
 	if (NULL == event) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "NULL event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
 	}
 
 	/* check to make sure it is an 'already initialized' event */
 	if (LINUX_EVENT_COOKIE != event->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "Uninitialized event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
 	}
 
 	/* (re)initialize event */
 	INIT_COMPLETION(event->complete);
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(qdf_event_reset);
 
 /**
- * cdf_event_destroy() - Destroys a CDF event
+ * qdf_event_destroy() - Destroys a QDF event
  * @event: The event object to be destroyed.
  *
  * This function doesn't do much in Linux. There is no need for the caller
  * to explicitly destroy an event after use.
  *
  * The os_event_destroy() function shall destroy the event object
- * referenced by event.  After a successful return from cdf_event_destroy()
+ * referenced by event.  After a successful return from qdf_event_destroy()
  * the event object becomes, in effect, uninitialized.
  *
- * A destroyed event object can be reinitialized using cdf_event_init();
+ * A destroyed event object can be reinitialized using qdf_event_create();
  * the results of otherwise referencing the object after it has been destroyed
- * are undefined.  Calls to CDF event functions to manipulate the lock such
- * as cdf_event_set() will fail if the event is destroyed.  Therefore,
+ * are undefined.  Calls to QDF event functions to manipulate the lock such
+ * as qdf_event_set() will fail if the event is destroyed.  Therefore,
  * don't use the event after it has been destroyed until it has
  * been re-initialized.
  *
- * Return: CDF status
+ * Return: QDF status
  */
-
-CDF_STATUS cdf_event_destroy(cdf_event_t *event)
+QDF_STATUS qdf_event_destroy(qdf_event_t *event)
 {
 	/* check for null pointer */
 	if (NULL == event) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "NULL event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
 	}
 
 	/* check to make sure it is an 'already initialized' event */
 	if (LINUX_EVENT_COOKIE != event->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "Uninitialized event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
 	}
 
 	/* make sure nobody is waiting on the event */
 	complete_all(&event->complete);
 
 	/* destroy the event */
-	memset(event, 0, sizeof(cdf_event_t));
+	memset(event, 0, sizeof(qdf_event_t));
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(qdf_event_destroy);
 
 /**
- * cdf_wait_single_event() - Waits for a single event to be set.
- *
+ * qdf_wait_single_event() - Waits for a single event to be set.
  * This API waits for the event to be set.
  *
- * @pEvent: Pointer to an event to wait on.
+ * @event: Pointer to an event to wait on.
  * @timeout: Timeout value (in milliseconds).  This function returns
- *	if this interval elapses, regardless if any of the events have
- *	been set.  An input value of 0 for this timeout parameter means
- *	to wait infinitely, meaning a timeout will never occur.
+ * if this interval elapses, regardless if any of the events have
+ * been set.  An input value of 0 for this timeout parameter means
+ * to wait infinitely, meaning a timeout will never occur.
  *
- * Return: CDF status
+ * Return: QDF status
  */
-CDF_STATUS cdf_wait_single_event(cdf_event_t *event, uint32_t timeout)
+QDF_STATUS qdf_wait_single_event(qdf_event_t *event, uint32_t timeout)
 {
 	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "%s cannot be called from interrupt context!!!",
 			  __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
 	}
 
 	/* check for null pointer */
 	if (NULL == event) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "NULL event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
 	}
 
 	/* check if cookie is same as that of initialized event */
 	if (LINUX_EVENT_COOKIE != event->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "Uninitialized event passed into %s", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
 	}
 
 	if (timeout) {
@@ -258,13 +247,14 @@ CDF_STATUS cdf_wait_single_event(cdf_event_t *event, uint32_t timeout)
 		ret = wait_for_completion_timeout(&event->complete,
 						  msecs_to_jiffies(timeout));
 		if (0 >= ret)
-			return CDF_STATUS_E_TIMEOUT;
+			return QDF_STATUS_E_TIMEOUT;
 	} else {
-		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "Zero timeout value passed into %s", __func__);
-		return CDF_STATUS_E_FAULT;
+		return QDF_STATUS_E_FAULT;
 	}
 
-	return CDF_STATUS_SUCCESS;
+	return QDF_STATUS_SUCCESS;
 }
+EXPORT_SYMBOL(qdf_wait_single_event);

+ 240 - 0
qdf/linux/src/qdf_list.c

@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_list.c
+ *
+ * QCA driver framework list manipulation APIs. QDF linked list
+ * APIs are NOT thread safe so make sure to use appropriate locking mechanisms
+ * to assure operations on the list are thread safe.
+ */
+
+/* Include files */
+#include <qdf_list.h>
+
+/* Function declarations and documenation */
+
+/**
+ * qdf_list_insert_front() - insert input node at front of the list
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_insert_front(qdf_list_t *list, qdf_list_node_t *node)
+{
+	list_add(node, &list->anchor);
+	list->count++;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_insert_front);
+
+/**
+ * qdf_list_insert_back() - insert input node at back of the list
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_insert_back(qdf_list_t *list, qdf_list_node_t *node)
+{
+	list_add_tail(node, &list->anchor);
+	list->count++;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_insert_back);
+
+/**
+ * qdf_list_insert_back_size() - insert input node at back of list and save
+ * list size
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ * @p_size: Pointer to store list size
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_insert_back_size(qdf_list_t *list,
+				     qdf_list_node_t *node, uint32_t *p_size)
+{
+	list_add_tail(node, &list->anchor);
+	list->count++;
+	*p_size = list->count;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_insert_back_size);
+
+/**
+ * qdf_list_remove_front() - remove node from front of the list
+ * @list: Pointer to list
+ * @node2: Double pointer to store the node which is removed from list
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_remove_front(qdf_list_t *list, qdf_list_node_t **node2)
+{
+	struct list_head *listptr;
+
+	if (list_empty(&list->anchor))
+		return QDF_STATUS_E_EMPTY;
+
+	listptr = list->anchor.next;
+	*node2 = listptr;
+	list_del(list->anchor.next);
+	list->count--;
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_remove_front);
+
+/**
+ * qdf_list_remove_back() - remove node from end of the list
+ * @list: Pointer to list
+ * @node2: Double pointer to store node which is removed from list
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_remove_back(qdf_list_t *list, qdf_list_node_t **node2)
+{
+	struct list_head *listptr;
+
+	if (list_empty(&list->anchor))
+		return QDF_STATUS_E_EMPTY;
+
+	listptr = list->anchor.prev;
+	*node2 = listptr;
+	list_del(list->anchor.prev);
+	list->count--;
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_remove_back);
+
+/**
+ * qdf_list_remove_node() - remove input node from list
+ * @list: Pointer to list
+ * @node_to_remove: Pointer to node which needs to be removed
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_remove_node(qdf_list_t *list,
+				qdf_list_node_t *node_to_remove)
+{
+	qdf_list_node_t *tmp;
+	int found = 0;
+
+	if (list_empty(&list->anchor))
+		return QDF_STATUS_E_EMPTY;
+
+	/* verify that node_to_remove is indeed part of list list */
+	list_for_each(tmp, &list->anchor) {
+		if (tmp == node_to_remove) {
+			found = 1;
+			break;
+		}
+	}
+	if (found == 0)
+		return QDF_STATUS_E_INVAL;
+
+	list_del(node_to_remove);
+	list->count--;
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_remove_node);
+
+/**
+ * qdf_list_peek_front() - peek front node from list
+ * @list: Pointer to list
+ * @node2: Double pointer to store peeked node pointer
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_peek_front(qdf_list_t *list, qdf_list_node_t **node2)
+{
+	struct list_head *listptr;
+	if (list_empty(&list->anchor))
+		return QDF_STATUS_E_EMPTY;
+
+	listptr = list->anchor.next;
+	*node2 = listptr;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_peek_front);
+
+/**
+ * qdf_list_peek_next() - peek next node of input node in the list
+ * @list: Pointer to list
+ * @node: Pointer to input node
+ * @node2: Double pointer to store peeked node pointer
+ *
+ * Return: QDF status
+ */
+QDF_STATUS qdf_list_peek_next(qdf_list_t *list, qdf_list_node_t *node,
+			      qdf_list_node_t **node2)
+{
+	struct list_head *listptr;
+	int found = 0;
+	qdf_list_node_t *tmp;
+
+	if ((list == NULL) || (node == NULL) || (node2 == NULL))
+		return QDF_STATUS_E_FAULT;
+
+	if (list_empty(&list->anchor))
+		return QDF_STATUS_E_EMPTY;
+
+	/* verify that node is indeed part of list list */
+	list_for_each(tmp, &list->anchor) {
+		if (tmp == node) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found == 0)
+		return QDF_STATUS_E_INVAL;
+
+	listptr = node->next;
+	if (listptr == &list->anchor)
+		return QDF_STATUS_E_EMPTY;
+
+	*node2 = listptr;
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_list_peek_next);
+
+/**
+ * qdf_list_empty() - check if the list is empty
+ * @list: pointer to the list
+ *
+ * Return: true if the list is empty and false otherwise.
+ */
+bool qdf_list_empty(qdf_list_t *list)
+{
+	return list_empty(&list->anchor);
+}
+EXPORT_SYMBOL(qdf_list_empty);

+ 660 - 0
qdf/linux/src/qdf_lock.c

@@ -0,0 +1,660 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include <linux/module.h>
+#include <qdf_lock.h>
+#include <qdf_trace.h>
+
+#include <qdf_types.h>
+#ifdef CONFIG_CNSS
+#include <net/cnss.h>
+#endif
+#ifdef CONFIG_MCL
+#include <i_host_diag_core_event.h>
+#include <cds_api.h>
+#endif
+#include <ani_global.h>
+#include <i_qdf_lock.h>
+#include <hif.h>
+
+/* Function declarations and documenation */
+typedef __qdf_mutex_t qdf_mutex_t;
+
+/**
+ * qdf_mutex_create() - Initialize a mutex
+ * @m: mutex to initialize
+ *
+ * Returns: QDF_STATUS
+ * =0 success
+ * else fail status
+ */
+QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock)
+{
+	/* check for invalid pointer */
+	if (lock == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		return QDF_STATUS_E_FAULT;
+	}
+	/* check for 'already initialized' lock */
+	if (LINUX_LOCK_COOKIE == lock->cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: already initialized lock", __func__);
+		return QDF_STATUS_E_BUSY;
+	}
+
+	if (in_interrupt()) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* initialize new lock */
+	mutex_init(&lock->m_lock);
+	lock->cookie = LINUX_LOCK_COOKIE;
+	lock->state = LOCK_RELEASED;
+	lock->process_id = 0;
+	lock->refcount = 0;
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mutex_create);
+
+/**
+ * qdf_mutex_acquire() - acquire a QDF lock
+ * @lock: Pointer to the opaque lock object to acquire
+ *
+ * A lock object is acquired by calling qdf_mutex_acquire().  If the lock
+ * is already locked, the calling thread shall block until the lock becomes
+ * available. This operation shall return with the lock object referenced by
+ * lock in the locked state with the calling thread as its owner.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: lock was successfully initialized
+ * QDF failure reason codes: lock is not initialized and can't be used
+ */
+QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock)
+{
+	int rc;
+	/* check for invalid pointer */
+	if (lock == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+	/* check if lock refers to an initialized object */
+	if (LINUX_LOCK_COOKIE != lock->cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: uninitialized lock", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	if (in_interrupt()) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+	if ((lock->process_id == current->pid) &&
+		(lock->state == LOCK_ACQUIRED)) {
+		lock->refcount++;
+#ifdef QDF_NESTED_LOCK_DEBUG
+			pe_err("%s: %x %d %d", __func__, lock, current->pid,
+			  lock->refcount);
+#endif
+		return QDF_STATUS_SUCCESS;
+	}
+	/* acquire a Lock */
+	mutex_lock(&lock->m_lock);
+	rc = mutex_is_locked(&lock->m_lock);
+	if (rc == 0) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: unable to lock mutex (rc = %d)", __func__, rc);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+#ifdef QDF_NESTED_LOCK_DEBUG
+		pe_err("%s: %x %d", __func__, lock, current->pid);
+#endif
+	if (LOCK_DESTROYED != lock->state) {
+		lock->process_id = current->pid;
+		lock->refcount++;
+		lock->state = LOCK_ACQUIRED;
+		return QDF_STATUS_SUCCESS;
+	} else {
+		/* lock is already destroyed */
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Lock is already destroyed", __func__);
+		mutex_unlock(&lock->m_lock);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAILURE;
+	}
+}
+EXPORT_SYMBOL(qdf_mutex_acquire);
+
+/**
+ * qdf_mutex_release() - release a QDF lock
+ * @lock: Pointer to the opaque lock object to be released
+ *
+ * qdf_mutex_release() function shall release the lock object
+ * referenced by 'lock'.
+ *
+ * If a thread attempts to release a lock that it unlocked or is not
+ * initialized, an error is returned.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: lock was successfully initialized
+ * QDF failure reason codes: lock is not initialized and can't be used
+ */
+QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock)
+{
+	/* check for invalid pointer */
+	if (lock == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* check if lock refers to an uninitialized object */
+	if (LINUX_LOCK_COOKIE != lock->cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: uninitialized lock", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	if (in_interrupt()) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* current_thread = get_current_thread_id();
+	 * Check thread ID of caller against thread ID
+	 * of the thread which acquire the lock
+	 */
+	if (lock->process_id != current->pid) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: current task pid does not match original task pid!!",
+			  __func__);
+#ifdef QDF_NESTED_LOCK_DEBUG
+		pe_err("%s: Lock held by=%d being released by=%d",
+			  __func__, lock->process_id, current->pid);
+#endif
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_PERM;
+	}
+	if ((lock->process_id == current->pid) &&
+		(lock->state == LOCK_ACQUIRED)) {
+		if (lock->refcount > 0)
+			lock->refcount--;
+	}
+#ifdef QDF_NESTED_LOCK_DEBUG
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id,
+		  lock->refcount);
+#endif
+	if (lock->refcount)
+		return QDF_STATUS_SUCCESS;
+
+	lock->process_id = 0;
+	lock->refcount = 0;
+	lock->state = LOCK_RELEASED;
+	/* release a Lock */
+	mutex_unlock(&lock->m_lock);
+#ifdef QDF_NESTED_LOCK_DEBUG
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id,
+		  lock->refcount);
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mutex_release);
+
+/**
+ * qdf_wake_lock_name() - This function returns the name of the wakelock
+ * @lock: Pointer to the wakelock
+ *
+ * This function returns the name of the wakelock
+ *
+ * Return: Pointer to the name if it is valid or a default string
+ */
+static const char *qdf_wake_lock_name(qdf_wake_lock_t *lock)
+{
+#if defined CONFIG_CNSS
+	if (lock->name)
+		return lock->name;
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	if (lock->ws.name)
+		return lock->ws.name;
+#endif
+	return "UNNAMED_WAKELOCK";
+}
+EXPORT_SYMBOL(qdf_wake_lock_name);
+
+/**
+ * qdf_wake_lock_create() - initializes a wake lock
+ * @lock: The wake lock to initialize
+ * @name: Name of wake lock
+ *
+ * Return:
+ * QDF status success: if wake lock is initialized
+ * QDF status failure: if wake lock was not initialized
+ */
+QDF_STATUS qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name)
+{
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_init(lock, name);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock_init(lock, WAKE_LOCK_SUSPEND, name);
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_create);
+
+/**
+ * qdf_wake_lock_acquire() - acquires a wake lock
+ * @lock: The wake lock to acquire
+ * @reason: Reason for wakelock
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason)
+{
+#ifdef CONFIG_MCL
+	host_diag_log_wlock(reason, qdf_wake_lock_name(lock),
+			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
+			WIFI_POWER_EVENT_WAKELOCK_TAKEN);
+#endif
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock(lock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock(lock);
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_acquire);
+
+/**
+ * qdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
+ * @lock: The wake lock to acquire
+ * @reason: Reason for wakelock
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec,
+					 uint32_t reason)
+{
+	/* Wakelock for Rx is frequent.
+	 * It is reported only during active debug
+	 */
+#ifdef CONFIG_MCL
+	if (((cds_get_ring_log_level(RING_ID_WAKELOCK) >= WLAN_LOG_LEVEL_ACTIVE)
+			&& (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX == reason)) ||
+			(WIFI_POWER_EVENT_WAKELOCK_HOLD_RX != reason)) {
+		host_diag_log_wlock(reason, qdf_wake_lock_name(lock), msec,
+				WIFI_POWER_EVENT_WAKELOCK_TAKEN);
+	}
+#endif
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_timeout(lock, msec);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock_timeout(lock, msecs_to_jiffies(msec));
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_timeout_acquire);
+
+/**
+ * qdf_wake_lock_release() - releases a wake lock
+ * @lock: the wake lock to release
+ * @reason: Reason for wakelock
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason)
+{
+#ifdef CONFIG_MCL
+	host_diag_log_wlock(reason, qdf_wake_lock_name(lock),
+			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
+			WIFI_POWER_EVENT_WAKELOCK_RELEASED);
+#endif
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_release(lock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_unlock(lock);
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_release);
+
+/**
+ * qdf_wake_lock_destroy() - destroys a wake lock
+ * @lock: The wake lock to destroy
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ * QDF status failure: if wake lock was not acquired
+ */
+QDF_STATUS qdf_wake_lock_destroy(qdf_wake_lock_t *lock)
+{
+#if defined CONFIG_CNSS
+	cnss_pm_wake_lock_destroy(lock);
+#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
+	wake_lock_destroy(lock);
+#endif
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_wake_lock_destroy);
+
+/**
+ * qdf_runtime_pm_get() - do a get opperation on the device
+ *
+ * A get opperation will prevent a runtime suspend untill a
+ * corresponding put is done.  This api should be used when sending
+ * data.
+ *
+ * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
+ * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
+ *
+ * return: success if the bus is up and a get has been issued
+ *   otherwise an error code.
+ */
+QDF_STATUS qdf_runtime_pm_get(void)
+{
+	void *ol_sc;
+	int ret;
+
+	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+	if (ol_sc == NULL) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: HIF context is null!", __func__);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	ret = hif_pm_runtime_get(ol_sc);
+
+	if (ret)
+		return QDF_STATUS_E_FAILURE;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_get);
+
+/**
+ * qdf_runtime_pm_put() - do a put opperation on the device
+ *
+ * A put opperation will allow a runtime suspend after a corresponding
+ * get was done.  This api should be used when sending data.
+ *
+ * This api will return a failure if the hif module hasn't been
+ * initialized
+ *
+ * return: QDF_STATUS_SUCCESS if the put is performed
+ */
+QDF_STATUS qdf_runtime_pm_put(void)
+{
+	void *ol_sc;
+	int ret;
+
+	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+	if (ol_sc == NULL) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: HIF context is null!", __func__);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	ret = hif_pm_runtime_put(ol_sc);
+
+	if (ret)
+		return QDF_STATUS_E_FAILURE;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_put);
+
+/**
+ * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
+ * @lock: an opaque context for tracking
+ *
+ * The lock can only be acquired once per lock context and is tracked.
+ *
+ * return: QDF_STATUS_SUCCESS or failure code.
+ */
+QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t lock)
+{
+	void *ol_sc;
+	int ret;
+
+	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+	if (ol_sc == NULL) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: HIF context is null!", __func__);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	ret = hif_pm_runtime_prevent_suspend(ol_sc, lock);
+
+	if (ret)
+		return QDF_STATUS_E_FAILURE;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_prevent_suspend);
+
+/**
+ * qdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
+ * @lock: an opaque context for tracking
+ *
+ * The lock can only be acquired once per lock context and is tracked.
+ *
+ * return: QDF_STATUS_SUCCESS or failure code.
+ */
+QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t lock)
+{
+	void *ol_sc;
+	int ret;
+	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
+
+	if (ol_sc == NULL) {
+		QDF_ASSERT(0);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+		"%s: HIF context is null!", __func__);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	ret = hif_pm_runtime_allow_suspend(ol_sc, lock);
+	if (ret)
+		return QDF_STATUS_E_FAILURE;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_runtime_pm_allow_suspend);
+
+/**
+ * qdf_runtime_lock_init() - initialize runtime lock
+ * @name: name of the runtime lock
+ *
+ * Initialize a runtime pm lock.  This lock can be used
+ * to prevent the runtime pm system from putting the bus
+ * to sleep.
+ *
+ * Return: runtime_pm_lock_t
+ */
+qdf_runtime_lock_t qdf_runtime_lock_init(const char *name)
+{
+	return hif_runtime_lock_init(name);
+}
+EXPORT_SYMBOL(qdf_runtime_lock_init);
+
+/**
+ * qdf_runtime_lock_deinit() - deinitialize runtime pm lock
+ * @lock: the lock to deinitialize
+ *
+ * Ensures the lock is released. Frees the runtime lock.
+ *
+ * Return: void
+ */
+void qdf_runtime_lock_deinit(qdf_runtime_lock_t lock)
+{
+	void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
+	hif_runtime_lock_deinit(hif_ctx, lock);
+}
+EXPORT_SYMBOL(qdf_runtime_lock_deinit);
+
+/**
+ * qdf_spinlock_acquire() - acquires a spin lock
+ * @lock: Spin lock to acquire
+ *
+ * Return:
+ * QDF status success: if wake lock is acquired
+ */
+QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock)
+{
+	spin_lock(&lock->spinlock);
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_spinlock_acquire);
+
+
+/**
+ * qdf_spinlock_release() - release a spin lock
+ * @lock: Spin lock to release
+ *
+ * Return:
+ * QDF status success : if wake lock is acquired
+ */
+QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock)
+{
+	spin_unlock(&lock->spinlock);
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_spinlock_release);
+
+/**
+ * qdf_mutex_destroy() - destroy a QDF lock
+ * @lock: Pointer to the opaque lock object to be destroyed
+ *
+ * function shall destroy the lock object referenced by lock. After a
+ * successful return from qdf_mutex_destroy()
+ * the lock object becomes, in effect, uninitialized.
+ *
+ * A destroyed lock object can be reinitialized using qdf_mutex_create();
+ * the results of otherwise referencing the object after it has been destroyed
+ * are undefined.  Calls to QDF lock functions to manipulate the lock such
+ * as qdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
+ * don't use the lock after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: lock was successfully initialized
+ * QDF failure reason codes: lock is not initialized and can't be used
+ */
+QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock)
+{
+	/* check for invalid pointer */
+	if (NULL == lock) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: NULL pointer passed in", __func__);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	if (LINUX_LOCK_COOKIE != lock->cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: uninitialized lock", __func__);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	if (in_interrupt()) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s cannot be called from interrupt context!!!",
+			  __func__);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* check if lock is released */
+	if (!mutex_trylock(&lock->m_lock)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: lock is not released", __func__);
+		return QDF_STATUS_E_BUSY;
+	}
+	lock->cookie = 0;
+	lock->state = LOCK_DESTROYED;
+	lock->process_id = 0;
+	lock->refcount = 0;
+
+	mutex_unlock(&lock->m_lock);
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mutex_destroy);
+
+/**
+ * qdf_spin_trylock_bh_outline() - spin trylock bottomhalf
+ * @lock: spinlock object
+ * Retrun: int
+ */
+int qdf_spin_trylock_bh_outline(qdf_spinlock_t *lock)
+{
+	return qdf_spin_trylock_bh(lock);
+}
+EXPORT_SYMBOL(qdf_spin_trylock_bh_outline);
+
+/**
+ * qdf_spin_lock_bh_outline() - locks the spinlock in soft irq context
+ * @lock: spinlock object pointer
+ * Return: none
+ */
+void qdf_spin_lock_bh_outline(qdf_spinlock_t *lock)
+{
+	qdf_spin_lock_bh(lock);
+}
+EXPORT_SYMBOL(qdf_spin_lock_bh_outline);
+
+/**
+ * qdf_spin_unlock_bh_outline() - unlocks spinlock in soft irq context
+ * @lock: spinlock object pointer
+ * Return: none
+ */
+void qdf_spin_unlock_bh_outline(qdf_spinlock_t *lock)
+{
+	qdf_spin_unlock_bh(lock);
+}
+EXPORT_SYMBOL(qdf_spin_unlock_bh_outline);

+ 702 - 0
qdf/linux/src/qdf_mc_timer.c

@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_mc_timer
+ * QCA driver framework timer APIs serialized to MC thread
+ */
+
+/* Include Files */
+#include <qdf_mc_timer.h>
+#include <qdf_lock.h>
+#include "qdf_lock.h"
+#include "qdf_list.h"
+#include "qdf_mem.h"
+#ifdef CONFIG_MCL
+#include <cds_mc_timer.h>
+#endif
+/* Preprocessor definitions and constants */
+
+#define LINUX_TIMER_COOKIE 0x12341234
+#define LINUX_INVALID_TIMER_COOKIE 0xfeedface
+#define TMR_INVALID_ID (0)
+
+/* Type declarations */
+
+/* Static Variable Definitions */
+static unsigned int persistent_timer_count;
+static qdf_mutex_t persistent_timer_count_lock;
+
+/* Function declarations and documenation */
+
+/**
+ * qdf_try_allowing_sleep() - clean up timer states after it has been deactivated
+ * @type: timer type
+ *
+ * Clean up timer states after it has been deactivated check and try to allow
+ * sleep after a timer has been stopped or expired.
+ *
+ * Return: none
+ */
+void qdf_try_allowing_sleep(QDF_TIMER_TYPE type)
+{
+	if (QDF_TIMER_TYPE_WAKE_APPS == type) {
+
+		persistent_timer_count--;
+		if (0 == persistent_timer_count) {
+			/* since the number of persistent timers has
+			   decreased from 1 to 0, the timer should allow
+			   sleep
+			  */
+		}
+	}
+}
+EXPORT_SYMBOL(qdf_try_allowing_sleep);
+
+/**
+ * qdf_mc_timer_get_current_state() - get the current state of the timer
+ * @timer: Pointer to timer object
+ *
+ * Return:
+ * QDF_TIMER_STATE - qdf timer state
+ */
+QDF_TIMER_STATE qdf_mc_timer_get_current_state(qdf_mc_timer_t *timer)
+{
+	if (NULL == timer) {
+		QDF_ASSERT(0);
+		return QDF_TIMER_STATE_UNUSED;
+	}
+
+	switch (timer->state) {
+	case QDF_TIMER_STATE_STOPPED:
+	case QDF_TIMER_STATE_STARTING:
+	case QDF_TIMER_STATE_RUNNING:
+	case QDF_TIMER_STATE_UNUSED:
+		return timer->state;
+	default:
+		QDF_ASSERT(0);
+		return QDF_TIMER_STATE_UNUSED;
+	}
+}
+EXPORT_SYMBOL(qdf_mc_timer_get_current_state);
+
+/**
+ * qdf_timer_module_init() - initializes a QDF timer module.
+ *
+ * This API initializes the QDF timer module. This needs to be called
+ * exactly once prior to using any QDF timers.
+ *
+ * Return: none
+ */
+void qdf_timer_module_init(void)
+{
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "Initializing the QDF MC timer module");
+	qdf_mutex_create(&persistent_timer_count_lock);
+}
+EXPORT_SYMBOL(qdf_timer_module_init);
+
+#ifdef TIMER_MANAGER
+
+qdf_list_t qdf_timer_list;
+qdf_spinlock_t qdf_timer_list_lock;
+
+static void qdf_timer_clean(void);
+
+/**
+ * qdf_mc_timer_manager_init() - initialize QDF debug timer manager
+ *
+ * This API initializes QDF timer debug functionality.
+ *
+ * Return: none
+ */
+void qdf_mc_timer_manager_init(void)
+{
+	qdf_list_create(&qdf_timer_list, 1000);
+	qdf_spinlock_create(&qdf_timer_list_lock);
+	return;
+}
+EXPORT_SYMBOL(qdf_mc_timer_manager_init);
+
+/**
+ * qdf_timer_clean() - clean up QDF timer debug functionality
+ *
+ * This API cleans up QDF timer debug functionality and prints which QDF timers
+ * are leaked. This is called during driver unload.
+ *
+ * Return: none
+ */
+static void qdf_timer_clean(void)
+{
+	uint32_t list_size;
+	qdf_list_node_t *node;
+	QDF_STATUS qdf_status;
+
+	qdf_mc_timer_node_t *timer_node;
+
+	list_size = qdf_list_size(&qdf_timer_list);
+
+	if (!list_size)
+		return;
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: List is not Empty. list_size %d ",
+		  __func__, (int)list_size);
+
+	do {
+		qdf_spin_lock_irqsave(&qdf_timer_list_lock);
+		qdf_status = qdf_list_remove_front(&qdf_timer_list, &node);
+		qdf_spin_unlock_irqrestore(&qdf_timer_list_lock);
+		if (QDF_STATUS_SUCCESS == qdf_status) {
+			timer_node = (qdf_mc_timer_node_t *) node;
+			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
+			"timer Leak@ File %s, @Line %d",
+				  timer_node->file_name,
+				  (int)timer_node->line_num);
+			qdf_mem_free(timer_node);
+		}
+	} while (qdf_status == QDF_STATUS_SUCCESS);
+}
+EXPORT_SYMBOL(qdf_timer_clean);
+
+/**
+ * qdf_mc_timer_manager_exit() - exit QDF timer debug functionality
+ *
+ * This API exists QDF timer debug functionality
+ *
+ * Return: none
+ */
+void qdf_mc_timer_manager_exit(void)
+{
+	qdf_timer_clean();
+	qdf_list_destroy(&qdf_timer_list);
+}
+EXPORT_SYMBOL(qdf_mc_timer_manager_exit);
+#endif
+
+/**
+ * qdf_mc_timer_init() - initialize a QDF timer
+ * @timer: Pointer to timer object
+ * @timer_type: Type of timer
+ * @callback: Callback to be called after timer expiry
+ * @ser_data: User data which will be passed to callback function
+ *
+ * This API initializes a QDF timer object.
+ *
+ * qdf_mc_timer_init() initializes a QDF timer object. A timer must be
+ * initialized by calling qdf_mc_timer_initialize() before it may be used in
+ * any other timer functions.
+ *
+ * Attempting to initialize timer that is already initialized results in
+ * a failure. A destroyed timer object can be re-initialized with a call to
+ * qdf_mc_timer_init(). The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ *  Calls to QDF timer functions to manipulate the timer such
+ *  as qdf_mc_timer_set() will fail if the timer is not initialized or has
+ *  been destroyed. Therefore, don't use the timer after it has been
+ *  destroyed until it has been re-initialized.
+ *
+ *  All callback will be executed within the CDS main thread unless it is
+ *  initialized from the Tx thread flow, in which case it will be executed
+ *  within the tx thread flow.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: timer is initialized successfully
+ * QDF failure status: timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+QDF_STATUS qdf_mc_timer_init_debug(qdf_mc_timer_t *timer,
+				   QDF_TIMER_TYPE timer_type,
+				   qdf_mc_timer_callback_t callback,
+				   void *user_data, char *file_name,
+				   uint32_t line_num)
+{
+	QDF_STATUS qdf_status;
+
+	/* check for invalid pointer */
+	if ((timer == NULL) || (callback == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	timer->timer_node = qdf_mem_malloc(sizeof(qdf_mc_timer_node_t));
+
+	if (timer->timer_node == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Not able to allocate memory for time_node",
+			  __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	qdf_mem_set(timer->timer_node, sizeof(qdf_mc_timer_node_t), 0);
+
+	timer->timer_node->file_name = file_name;
+	timer->timer_node->line_num = line_num;
+	timer->timer_node->qdf_timer = timer;
+
+	qdf_spin_lock_irqsave(&qdf_timer_list_lock);
+	qdf_status = qdf_list_insert_front(&qdf_timer_list,
+					   &timer->timer_node->node);
+	qdf_spin_unlock_irqrestore(&qdf_timer_list_lock);
+	if (QDF_STATUS_SUCCESS != qdf_status) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Unable to insert node into List qdf_status %d",
+			  __func__, qdf_status);
+	}
+
+	/* set the various members of the timer structure
+	 * with arguments passed or with default values
+	 */
+	spin_lock_init(&timer->platform_info.spinlock);
+	if (QDF_TIMER_TYPE_SW == timer_type)
+		init_timer_deferrable(&(timer->platform_info.timer));
+	else
+		init_timer(&(timer->platform_info.timer));
+#ifdef CONFIG_MCL
+	timer->platform_info.timer.function = cds_linux_timer_callback;
+#else
+	timer->platform_info.timer.function = NULL;
+#endif
+	timer->platform_info.timer.data = (unsigned long)timer;
+	timer->callback = callback;
+	timer->user_data = user_data;
+	timer->type = timer_type;
+	timer->platform_info.cookie = LINUX_TIMER_COOKIE;
+	timer->platform_info.thread_id = 0;
+	timer->state = QDF_TIMER_STATE_STOPPED;
+
+	return QDF_STATUS_SUCCESS;
+}
+#else
+QDF_STATUS qdf_mc_timer_init(qdf_mc_timer_t *timer, QDF_TIMER_TYPE timer_type,
+			     qdf_mc_timer_callback_t callback,
+			     void *user_data)
+{
+	/* check for invalid pointer */
+	if ((timer == NULL) || (callback == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null params being passed", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* set the various members of the timer structure
+	 * with arguments passed or with default values
+	 */
+	spin_lock_init(&timer->platform_info.spinlock);
+	if (QDF_TIMER_TYPE_SW == timer_type)
+		init_timer_deferrable(&(timer->platform_info.timer));
+	else
+		init_timer(&(timer->platform_info.timer));
+#ifdef CONFIG_MCL
+	timer->platform_info.timer.function = cds_linux_timer_callback;
+#else
+	timer->platform_info.timer.function = NULL;
+#endif
+	timer->platform_info.timer.data = (unsigned long)timer;
+	timer->callback = callback;
+	timer->user_data = user_data;
+	timer->type = timer_type;
+	timer->platform_info.cookie = LINUX_TIMER_COOKIE;
+	timer->platform_info.thread_id = 0;
+	timer->state = QDF_TIMER_STATE_STOPPED;
+
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * qdf_mc_timer_destroy() - destroy QDF timer
+ * @timer: Pointer to timer object
+ *
+ * qdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a qdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * qdf_mc_timer_init().  The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to QDF timer functions to manipulate the timer, such
+ * as qdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS - timer is initialized successfully
+ * QDF failure status - timer initialization failed
+ */
+#ifdef TIMER_MANAGER
+QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer)
+{
+	QDF_STATUS v_status = QDF_STATUS_SUCCESS;
+	unsigned long flags;
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null timer pointer being passed", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* Check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot destroy uninitialized timer", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_spin_lock_irqsave(&qdf_timer_list_lock);
+	v_status = qdf_list_remove_node(&qdf_timer_list,
+					&timer->timer_node->node);
+	qdf_spin_unlock_irqrestore(&qdf_timer_list_lock);
+	if (v_status != QDF_STATUS_SUCCESS) {
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+	qdf_mem_free(timer->timer_node);
+
+	spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+	switch (timer->state) {
+
+	case QDF_TIMER_STATE_STARTING:
+		v_status = QDF_STATUS_E_BUSY;
+		break;
+
+	case QDF_TIMER_STATE_RUNNING:
+		/* Stop the timer first */
+		del_timer(&(timer->platform_info.timer));
+		v_status = QDF_STATUS_SUCCESS;
+		break;
+	case QDF_TIMER_STATE_STOPPED:
+		v_status = QDF_STATUS_SUCCESS;
+		break;
+
+	case QDF_TIMER_STATE_UNUSED:
+		v_status = QDF_STATUS_E_ALREADY;
+		break;
+
+	default:
+		v_status = QDF_STATUS_E_FAULT;
+		break;
+	}
+
+	if (QDF_STATUS_SUCCESS == v_status) {
+		timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE;
+		timer->state = QDF_TIMER_STATE_UNUSED;
+		spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+		return v_status;
+	}
+
+	spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Cannot destroy timer in state = %d", __func__,
+		  timer->state);
+	QDF_ASSERT(0);
+
+	return v_status;
+}
+EXPORT_SYMBOL(qdf_mc_timer_destroy);
+
+#else
+
+/**
+ * qdf_mc_timer_destroy() - destroy QDF timer
+ * @timer: Pointer to timer object
+ *
+ * qdf_mc_timer_destroy() function shall destroy the timer object.
+ * After a successful return from \a qdf_mc_timer_destroy() the timer
+ * object becomes, in effect, uninitialized.
+ *
+ * A destroyed timer object can be re-initialized by calling
+ * qdf_mc_timer_init(). The results of otherwise referencing the object
+ * after it has been destroyed are undefined.
+ *
+ * Calls to QDF timer functions to manipulate the timer, such
+ * as qdf_mc_timer_set() will fail if the lock is destroyed. Therefore,
+ * don't use the timer after it has been destroyed until it has
+ * been re-initialized.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS - timer is initialized successfully
+ * QDF failure status - timer initialization failed
+ */
+QDF_STATUS qdf_mc_timer_destroy(qdf_mc_timer_t *timer)
+{
+	QDF_STATUS v_status = QDF_STATUS_SUCCESS;
+	unsigned long flags;
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Null timer pointer being passed", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_FAULT;
+	}
+
+	/* check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot destroy uninitialized timer", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+	spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+	switch (timer->state) {
+
+	case QDF_TIMER_STATE_STARTING:
+		v_status = QDF_STATUS_E_BUSY;
+		break;
+
+	case QDF_TIMER_STATE_RUNNING:
+		/* Stop the timer first */
+		del_timer(&(timer->platform_info.timer));
+		v_status = QDF_STATUS_SUCCESS;
+		break;
+
+	case QDF_TIMER_STATE_STOPPED:
+		v_status = QDF_STATUS_SUCCESS;
+		break;
+
+	case QDF_TIMER_STATE_UNUSED:
+		v_status = QDF_STATUS_E_ALREADY;
+		break;
+
+	default:
+		v_status = QDF_STATUS_E_FAULT;
+		break;
+	}
+
+	if (QDF_STATUS_SUCCESS == v_status) {
+		timer->platform_info.cookie = LINUX_INVALID_TIMER_COOKIE;
+		timer->state = QDF_TIMER_STATE_UNUSED;
+		spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+		return v_status;
+	}
+
+	spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: Cannot destroy timer in state = %d", __func__,
+		  timer->state);
+	QDF_ASSERT(0);
+
+	return v_status;
+}
+EXPORT_SYMBOL(qdf_mc_timer_destroy);
+#endif
+
+/**
+ * qdf_mc_timer_start() - start a QDF timer object
+ * @timer: Pointer to timer object
+ * @expiration_time: Time to expire
+ *
+ * qdf_mc_timer_start() function starts a timer to expire after the
+ * specified interval, thus running the timer callback function when
+ * the interval expires.
+ *
+ * A timer only runs once (a one-shot timer). To re-start the
+ * timer, qdf_mc_timer_start() has to be called after the timer runs
+ * or has been cancelled.
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: timer is initialized successfully
+ * QDF failure status: timer initialization failed
+ */
+QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time)
+{
+	unsigned long flags;
+
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "timer Addr inside qdf_mc_timer_start : 0x%p ", timer);
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s Null timer pointer being passed", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot start uninitialized timer", __func__);
+		QDF_ASSERT(0);
+
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* check if timer has expiration time less than 10 ms */
+	if (expiration_time < 10) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot start a timer with expiration less than 10 ms",
+			  __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* make sure the remainer of the logic isn't interrupted */
+	spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+	/* ensure if the timer can be started */
+	if (QDF_TIMER_STATE_STOPPED != timer->state) {
+		spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot start timer in state = %d ", __func__,
+			  timer->state);
+		return QDF_STATUS_E_ALREADY;
+	}
+
+	/* start the timer */
+	mod_timer(&(timer->platform_info.timer),
+		  jiffies + msecs_to_jiffies(expiration_time));
+
+	timer->state = QDF_TIMER_STATE_RUNNING;
+
+	/* get the thread ID on which the timer is being started */
+	timer->platform_info.thread_id = current->pid;
+
+	if (QDF_TIMER_TYPE_WAKE_APPS == timer->type) {
+		persistent_timer_count++;
+		if (1 == persistent_timer_count) {
+			/* since we now have one persistent timer,
+			 * we need to disallow sleep
+			 * sleep_negate_okts(sleep_client_handle);
+			 */
+		}
+	}
+
+	spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mc_timer_start);
+
+/**
+ * qdf_mc_timer_stop() - stop a QDF timer
+ * @timer: Pointer to timer object
+ * qdf_mc_timer_stop() function stops a timer that has been started but
+ * has not expired, essentially cancelling the 'start' request.
+ *
+ * After a timer is stopped, it goes back to the state it was in after it
+ * was created and can be started again via a call to qdf_mc_timer_start().
+ *
+ * Return:
+ * QDF_STATUS_SUCCESS: timer is initialized successfully
+ * QDF failure status: timer initialization failed
+ */
+QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer)
+{
+	unsigned long flags;
+
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+		  "%s: timer Addr inside qdf_mc_timer_stop : 0x%p",
+		 __func__, timer);
+
+	/* check for invalid pointer */
+	if (NULL == timer) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s Null timer pointer being passed", __func__);
+		QDF_ASSERT(0);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* check if timer refers to an uninitialized object */
+	if (LINUX_TIMER_COOKIE != timer->platform_info.cookie) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: Cannot stop uninitialized timer", __func__);
+		QDF_ASSERT(0);
+
+		return QDF_STATUS_E_INVAL;
+	}
+
+	/* ensure the timer state is correct */
+	spin_lock_irqsave(&timer->platform_info.spinlock, flags);
+
+	if (QDF_TIMER_STATE_RUNNING != timer->state) {
+		spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
+			  "%s: Cannot stop timer in state = %d",
+			  __func__, timer->state);
+		return QDF_STATUS_SUCCESS;
+	}
+
+	timer->state = QDF_TIMER_STATE_STOPPED;
+
+	del_timer(&(timer->platform_info.timer));
+
+	spin_unlock_irqrestore(&timer->platform_info.spinlock, flags);
+
+	qdf_try_allowing_sleep(timer->type);
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_mc_timer_stop);
+
+/**
+ * qdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
+
+ * qdf_mc_timer_get_system_ticks() function returns the current number
+ * of timer ticks in 10msec intervals. This function is suitable timestamping
+ * and calculating time intervals by calculating the difference between two
+ * timestamps.
+ *
+ * Return:
+ * The current system tick count (in 10msec intervals).  This
+ * function cannot fail.
+ */
+unsigned long qdf_mc_timer_get_system_ticks(void)
+{
+	return jiffies_to_msecs(jiffies) / 10;
+}
+EXPORT_SYMBOL(qdf_mc_timer_get_system_ticks);
+
+/**
+ * qdf_mc_timer_get_system_time() - Get the system time in milliseconds
+ *
+ * qdf_mc_timer_get_system_time() function returns the number of milliseconds
+ * that have elapsed since the system was started
+ *
+ * Return:
+ * The current system time in milliseconds
+ */
+unsigned long qdf_mc_timer_get_system_time(void)
+{
+	struct timeval tv;
+	do_gettimeofday(&tv);
+	return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+EXPORT_SYMBOL(qdf_mc_timer_get_system_time);

+ 951 - 0
qdf/linux/src/qdf_mem.c

@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_mem
+ * This file provides OS dependent memory management APIs
+ */
+
+#include "qdf_mem.h"
+#include "qdf_nbuf.h"
+#include "qdf_lock.h"
+#include "qdf_mc_timer.h"
+#include "qdf_module.h"
+
+#if defined(CONFIG_CNSS)
+#include <net/cnss.h>
+#endif
+
+#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
+#include <net/cnss_prealloc.h>
+#endif
+
+#ifdef MEMORY_DEBUG
+#include <qdf_list.h>
+qdf_list_t qdf_mem_list;
+qdf_spinlock_t qdf_mem_list_lock;
+
+static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
+					0x67, 0x68 };
+static uint8_t WLAN_MEM_TAIL[] = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
+					0x86, 0x87 };
+
+/**
+ * struct s_qdf_mem_struct - memory object to dubug
+ * @node: node to the list
+ * @filename: name of file
+ * @line_num: line number
+ * @size: size of the file
+ * @header: array that contains header
+ */
+struct s_qdf_mem_struct {
+	qdf_list_node_t node;
+	char *file_name;
+	unsigned int line_num;
+	unsigned int size;
+	uint8_t header[8];
+};
+#endif
+
+/* Preprocessor Definitions and Constants */
+#define QDF_GET_MEMORY_TIME_THRESHOLD 3000
+
+int qdf_dbg_mask;
+qdf_declare_param(qdf_dbg_mask, int);
+
+u_int8_t prealloc_disabled = 1;
+qdf_declare_param(prealloc_disabled, byte);
+
+/**
+ * __qdf_mempool_init() - Create and initialize memory pool
+ *
+ * @osdev: platform device object
+ * @pool_addr: address of the pool created
+ * @elem_cnt: no. of elements in pool
+ * @elem_size: size of each pool element in bytes
+ * @flags: flags
+ *
+ * return: Handle to memory pool or NULL if allocation failed
+ */
+int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
+		       int elem_cnt, size_t elem_size, u_int32_t flags)
+{
+	__qdf_mempool_ctxt_t *new_pool = NULL;
+	u_int32_t align = L1_CACHE_BYTES;
+	unsigned long aligned_pool_mem;
+	int pool_id;
+	int i;
+
+	if (prealloc_disabled) {
+		/* TBD: We can maintain a list of pools in qdf_device_t
+		 * to help debugging
+		 * when pre-allocation is not enabled
+		 */
+		new_pool = (__qdf_mempool_ctxt_t *)
+			kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
+		if (new_pool == NULL)
+			return QDF_STATUS_E_NOMEM;
+
+		memset(new_pool, 0, sizeof(*new_pool));
+		/* TBD: define flags for zeroing buffers etc */
+		new_pool->flags = flags;
+		new_pool->elem_size = elem_size;
+		new_pool->max_elem = elem_cnt;
+		*pool_addr = new_pool;
+		return 0;
+	}
+
+	for (pool_id = 0; pool_id < MAX_MEM_POOLS; pool_id++) {
+		if (osdev->mem_pool[pool_id] == NULL)
+			break;
+	}
+
+	if (pool_id == MAX_MEM_POOLS)
+		return -ENOMEM;
+
+	new_pool = osdev->mem_pool[pool_id] = (__qdf_mempool_ctxt_t *)
+		kmalloc(sizeof(__qdf_mempool_ctxt_t), GFP_KERNEL);
+	if (new_pool == NULL)
+		return -ENOMEM;
+
+	memset(new_pool, 0, sizeof(*new_pool));
+	/* TBD: define flags for zeroing buffers etc */
+	new_pool->flags = flags;
+	new_pool->pool_id = pool_id;
+
+	/* Round up the element size to cacheline */
+	new_pool->elem_size = roundup(elem_size, L1_CACHE_BYTES);
+	new_pool->mem_size = elem_cnt * new_pool->elem_size +
+				((align)?(align - 1):0);
+
+	new_pool->pool_mem = kzalloc(new_pool->mem_size, GFP_KERNEL);
+	if (new_pool->pool_mem == NULL) {
+			/* TBD: Check if we need get_free_pages above */
+		kfree(new_pool);
+		osdev->mem_pool[pool_id] = NULL;
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&new_pool->lock);
+
+	/* Initialize free list */
+	aligned_pool_mem = (unsigned long)(new_pool->pool_mem) +
+			((align) ? (unsigned long)(new_pool->pool_mem)%align:0);
+	STAILQ_INIT(&new_pool->free_list);
+
+	for (i = 0; i < elem_cnt; i++)
+		STAILQ_INSERT_TAIL(&(new_pool->free_list),
+			(mempool_elem_t *)(aligned_pool_mem +
+			(new_pool->elem_size * i)), mempool_entry);
+
+
+	new_pool->free_cnt = elem_cnt;
+	*pool_addr = new_pool;
+	return 0;
+}
+EXPORT_SYMBOL(__qdf_mempool_init);
+
+/**
+ * __qdf_mempool_destroy() - Destroy memory pool
+ * @osdev: platform device object
+ * @Handle: to memory pool
+ *
+ * Returns: none
+ */
+void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool)
+{
+	int pool_id = 0;
+
+	if (!pool)
+		return;
+
+	if (prealloc_disabled) {
+		kfree(pool);
+		return;
+	}
+
+	pool_id = pool->pool_id;
+
+	/* TBD: Check if free count matches elem_cnt if debug is enabled */
+	kfree(pool->pool_mem);
+	kfree(pool);
+	osdev->mem_pool[pool_id] = NULL;
+}
+EXPORT_SYMBOL(__qdf_mempool_destroy);
+
+/**
+ * __qdf_mempool_alloc() - Allocate an element memory pool
+ *
+ * @osdev: platform device object
+ * @Handle: to memory pool
+ *
+ * Return: Pointer to the allocated element or NULL if the pool is empty
+ */
+void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool)
+{
+	void *buf = NULL;
+
+	if (!pool)
+		return NULL;
+
+	if (prealloc_disabled)
+		return  qdf_mem_malloc(pool->elem_size);
+
+	spin_lock_bh(&pool->lock);
+
+	buf = STAILQ_FIRST(&pool->free_list);
+	if (buf != NULL) {
+		STAILQ_REMOVE_HEAD(&pool->free_list, mempool_entry);
+		pool->free_cnt--;
+	}
+
+	/* TBD: Update free count if debug is enabled */
+	spin_unlock_bh(&pool->lock);
+
+	return buf;
+}
+EXPORT_SYMBOL(__qdf_mempool_alloc);
+
+/**
+ * __qdf_mempool_free() - Free a memory pool element
+ * @osdev: Platform device object
+ * @pool: Handle to memory pool
+ * @buf: Element to be freed
+ *
+ * Returns: none
+ */
+void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf)
+{
+	if (!pool)
+		return;
+
+
+	if (prealloc_disabled)
+		return qdf_mem_free(buf);
+
+	spin_lock_bh(&pool->lock);
+	pool->free_cnt++;
+
+	STAILQ_INSERT_TAIL
+		(&pool->free_list, (mempool_elem_t *)buf, mempool_entry);
+	spin_unlock_bh(&pool->lock);
+}
+EXPORT_SYMBOL(__qdf_mempool_free);
+
+/**
+ * qdf_mem_alloc_outline() - allocation QDF memory
+ * @osdev: platform device object
+ * @size: Number of bytes of memory to allocate.
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory.
+ *
+ * Return:
+ * Upon successful allocate, returns a non-NULL pointer to the allocated
+ * memory.  If this function is unable to allocate the amount of memory
+ * specified (for any reason) it returns NULL.
+ */
+void *
+qdf_mem_alloc_outline(qdf_device_t osdev, size_t size)
+{
+	return qdf_mem_malloc(size);
+}
+EXPORT_SYMBOL(qdf_mem_alloc_outline);
+
+/**
+ * qdf_mem_free_outline() - QDF memory free API
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'. It also checks
+ * is memory is corrupted or getting double freed and panic.
+ *
+ * Return: none
+ */
+void
+qdf_mem_free_outline(void *buf)
+{
+	qdf_mem_free(buf);
+}
+EXPORT_SYMBOL(qdf_mem_free_outline);
+
+/**
+ * qdf_mem_zero_outline() - zero out memory
+ * @buf: pointer to memory that will be set to zero
+ * @size: number of bytes zero
+ *
+ * This function sets the memory location to all zeros, essentially clearing
+ * the memory.
+ *
+ * Return: none
+ */
+void
+qdf_mem_zero_outline(void *buf, qdf_size_t size)
+{
+	qdf_mem_zero(buf, size);
+}
+EXPORT_SYMBOL(qdf_mem_zero_outline);
+
+/* External Function implementation */
+#ifdef MEMORY_DEBUG
+
+/**
+ * qdf_mem_init() - initialize qdf memory debug functionality
+ *
+ * Return: none
+ */
+void qdf_mem_init(void)
+{
+	/* Initalizing the list with maximum size of 60000 */
+	qdf_list_create(&qdf_mem_list, 60000);
+	qdf_spinlock_create(&qdf_mem_list_lock);
+	qdf_net_buf_debug_init();
+	return;
+}
+EXPORT_SYMBOL(qdf_mem_init);
+
+/**
+ * qdf_mem_clean() - display memory leak debug info and free leaked pointers
+ *
+ * Return: none
+ */
+void qdf_mem_clean(void)
+{
+	uint32_t list_size;
+	list_size = qdf_list_size(&qdf_mem_list);
+	qdf_net_buf_debug_clean();
+	if (list_size) {
+		qdf_list_node_t *node;
+		QDF_STATUS qdf_status;
+
+		struct s_qdf_mem_struct *mem_struct;
+		char *prev_mleak_file = "";
+		unsigned int prev_mleak_line_num = 0;
+		unsigned int prev_mleak_sz = 0;
+		unsigned int mleak_cnt = 0;
+
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
+			  "%s: List is not Empty. list_size %d ",
+			  __func__, (int)list_size);
+
+		do {
+			qdf_spin_lock(&qdf_mem_list_lock);
+			qdf_status =
+				qdf_list_remove_front(&qdf_mem_list, &node);
+			qdf_spin_unlock(&qdf_mem_list_lock);
+			if (QDF_STATUS_SUCCESS == qdf_status) {
+				mem_struct = (struct s_qdf_mem_struct *)node;
+				/* Take care to log only once multiple memory
+				   leaks from the same place */
+				if (strcmp(prev_mleak_file,
+					mem_struct->file_name)
+				    || (prev_mleak_line_num !=
+					mem_struct->line_num)
+				    || (prev_mleak_sz != mem_struct->size)) {
+					if (mleak_cnt != 0) {
+						QDF_TRACE(QDF_MODULE_ID_QDF,
+							  QDF_TRACE_LEVEL_FATAL,
+							  "%d Time Memory Leak@ File %s, @Line %d, size %d",
+							  mleak_cnt,
+							  prev_mleak_file,
+							  prev_mleak_line_num,
+							  prev_mleak_sz);
+					}
+					prev_mleak_file = mem_struct->file_name;
+					prev_mleak_line_num =
+						 mem_struct->line_num;
+					prev_mleak_sz = mem_struct->size;
+					mleak_cnt = 0;
+				}
+				mleak_cnt++;
+				kfree((void *)mem_struct);
+			}
+		} while (qdf_status == QDF_STATUS_SUCCESS);
+
+		/* Print last memory leak from the module */
+		if (mleak_cnt) {
+			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
+				  "%d Time memory Leak@ File %s, @Line %d, size %d",
+				  mleak_cnt, prev_mleak_file,
+				  prev_mleak_line_num, prev_mleak_sz);
+		}
+#ifdef CONFIG_HALT_KMEMLEAK
+		BUG_ON(0);
+#endif
+	}
+}
+EXPORT_SYMBOL(qdf_mem_clean);
+
+/**
+ * qdf_mem_exit() - exit qdf memory debug functionality
+ *
+ * Return: none
+ */
+void qdf_mem_exit(void)
+{
+	qdf_net_buf_debug_exit();
+	qdf_mem_clean();
+	qdf_list_destroy(&qdf_mem_list);
+}
+EXPORT_SYMBOL(qdf_mem_exit);
+
+/**
+ * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
+ * @size: Number of bytes of memory to allocate.
+ * @file_name: File name from which memory allocation is called
+ * @line_num: Line number from which memory allocation is called
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory and ad it in qdf tracking list to check against memory leaks and
+ * corruptions
+ *
+ * Return:
+ * Upon successful allocate, returns a non-NULL pointer to the allocated
+ * memory.  If this function is unable to allocate the amount of memory
+ * specified (for any reason) it returns %NULL.
+ */
+void *qdf_mem_malloc_debug(size_t size,
+			char *file_name, uint32_t line_num)
+{
+	struct s_qdf_mem_struct *mem_struct;
+	void *mem_ptr = NULL;
+	uint32_t new_size;
+	int flags = GFP_KERNEL;
+	unsigned long  time_before_kzalloc;
+
+	if (size > (1024 * 1024) || size == 0) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s: called with invalid arg; passed in %zu !!!",
+			  __func__, size);
+		return NULL;
+	}
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+	if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
+		void *pmem;
+		pmem = wcnss_prealloc_get(size);
+		if (NULL != pmem) {
+			memset(pmem, 0, size);
+			return pmem;
+		}
+	}
+#endif
+
+	if (in_interrupt() || irqs_disabled() || in_atomic())
+		flags = GFP_ATOMIC;
+
+	new_size = size + sizeof(struct s_qdf_mem_struct) + 8;/*TBD: what is 8*/
+	time_before_kzalloc = qdf_mc_timer_get_system_time();
+	mem_struct = (struct s_qdf_mem_struct *)kzalloc(new_size, flags);
+	/**
+	 * If time taken by kmalloc is greater than
+	 * QDF_GET_MEMORY_TIME_THRESHOLD msec
+	 */
+	if (qdf_mc_timer_get_system_time() - time_before_kzalloc >=
+					  QDF_GET_MEMORY_TIME_THRESHOLD)
+			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+				  "%s: kzalloc took %lu msec for size %zu called from %p_s at line %d",
+			 __func__,
+			 qdf_mc_timer_get_system_time() - time_before_kzalloc,
+			 size, (void *)_RET_IP_, line_num);
+
+	if (mem_struct != NULL) {
+		QDF_STATUS qdf_status;
+
+		mem_struct->file_name = file_name;
+		mem_struct->line_num = line_num;
+		mem_struct->size = size;
+
+		qdf_mem_copy(&mem_struct->header[0],
+			     &WLAN_MEM_HEADER[0], sizeof(WLAN_MEM_HEADER));
+
+		qdf_mem_copy((uint8_t *) (mem_struct + 1) + size,
+			     &WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
+
+		qdf_spin_lock_irqsave(&qdf_mem_list_lock);
+		qdf_status = qdf_list_insert_front(&qdf_mem_list,
+						   &mem_struct->node);
+		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
+		if (QDF_STATUS_SUCCESS != qdf_status) {
+			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+				  "%s: Unable to insert node into List qdf_status %d",
+				  __func__, qdf_status);
+		}
+
+		mem_ptr = (void *)(mem_struct + 1);
+	}
+	return mem_ptr;
+}
+EXPORT_SYMBOL(qdf_mem_malloc_debug);
+
+/**
+ * qdf_mem_free() - QDF memory free API
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'. It also checks
+ * is memory is corrupted or getting double freed and panic.
+ *
+ * Return: none
+ */
+void qdf_mem_free(void *ptr)
+{
+	if (ptr != NULL) {
+		QDF_STATUS qdf_status;
+		struct s_qdf_mem_struct *mem_struct =
+			((struct s_qdf_mem_struct *)ptr) - 1;
+
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+		if (wcnss_prealloc_put(ptr))
+			return;
+#endif
+
+		qdf_spin_lock_irqsave(&qdf_mem_list_lock);
+		qdf_status =
+			qdf_list_remove_node(&qdf_mem_list, &mem_struct->node);
+		qdf_spin_unlock_irqrestore(&qdf_mem_list_lock);
+
+		if (QDF_STATUS_SUCCESS == qdf_status) {
+			if (qdf_mem_cmp(mem_struct->header,
+						 &WLAN_MEM_HEADER[0],
+						 sizeof(WLAN_MEM_HEADER))) {
+				QDF_TRACE(QDF_MODULE_ID_QDF,
+					  QDF_TRACE_LEVEL_FATAL,
+					  "Memory Header is corrupted. mem_info: Filename %s, line_num %d",
+					  mem_struct->file_name,
+					  (int)mem_struct->line_num);
+				QDF_BUG(0);
+			}
+			if (qdf_mem_cmp((uint8_t *) ptr + mem_struct->size,
+					    &WLAN_MEM_TAIL[0],
+					    sizeof(WLAN_MEM_TAIL))) {
+				QDF_TRACE(QDF_MODULE_ID_QDF,
+					  QDF_TRACE_LEVEL_FATAL,
+					  "Memory Trailer is corrupted. mem_info: Filename %s, line_num %d",
+					  mem_struct->file_name,
+					  (int)mem_struct->line_num);
+				QDF_BUG(0);
+			}
+			kfree((void *)mem_struct);
+		} else {
+			QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
+				  "%s: Unallocated memory (double free?)",
+				  __func__);
+			QDF_BUG(0);
+		}
+	}
+}
+EXPORT_SYMBOL(qdf_mem_free);
+#else
+
+/**
+ * qdf_mem_malloc() - allocation QDF memory
+ * @size: Number of bytes of memory to allocate.
+ *
+ * This function will dynamicallly allocate the specified number of bytes of
+ * memory.
+ *
+ * Return:
+ * Upon successful allocate, returns a non-NULL pointer to the allocated
+ * memory.  If this function is unable to allocate the amount of memory
+ * specified (for any reason) it returns NULL.
+ */
+void *qdf_mem_malloc(size_t size)
+{
+	int flags = GFP_KERNEL;
+
+	if (in_interrupt() || irqs_disabled())
+		flags = GFP_ATOMIC;
+
+	return kzalloc(size, flags);
+}
+EXPORT_SYMBOL(qdf_mem_malloc);
+
+/**
+ * qdf_mem_free() - free QDF memory
+ * @ptr: Pointer to the starting address of the memory to be free'd.
+ *
+ * This function will free the memory pointed to by 'ptr'.
+ *
+ * Return: None
+ */
+void qdf_mem_free(void *ptr)
+{
+	if (ptr == NULL)
+		return;
+#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
+	if (wcnss_prealloc_put(ptr))
+		return;
+#endif
+	kfree(ptr);
+}
+EXPORT_SYMBOL(qdf_mem_free);
+#endif
+
+/**
+ * qdf_mem_multi_pages_alloc() - allocate large size of kernel memory
+ * @osdev: OS device handle pointer
+ * @pages: Multi page information storage
+ * @element_size: Each element size
+ * @element_num: Total number of elements should be allocated
+ * @memctxt: Memory context
+ * @cacheable: Coherent memory or cacheable memory
+ *
+ * This function will allocate large size of memory over multiple pages.
+ * Large size of contiguous memory allocation will fail frequently, then
+ * instead of allocate large memory by one shot, allocate through multiple, non
+ * contiguous memory and combine pages when actual usage
+ *
+ * Return: None
+ */
+void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
+			       struct qdf_mem_multi_page_t *pages,
+			       size_t element_size, uint16_t element_num,
+			       qdf_dma_context_t memctxt, bool cacheable)
+{
+	uint16_t page_idx;
+	struct qdf_mem_dma_page_t *dma_pages;
+	void **cacheable_pages = NULL;
+	uint16_t i;
+
+	pages->num_element_per_page = PAGE_SIZE / element_size;
+	if (!pages->num_element_per_page) {
+		qdf_print("Invalid page %d or element size %d",
+			  (int)PAGE_SIZE, (int)element_size);
+		goto out_fail;
+	}
+
+	pages->num_pages = element_num / pages->num_element_per_page;
+	if (element_num % pages->num_element_per_page)
+		pages->num_pages++;
+
+	if (cacheable) {
+		/* Pages information storage */
+		pages->cacheable_pages = qdf_mem_malloc(
+			pages->num_pages * sizeof(pages->cacheable_pages));
+		if (!pages->cacheable_pages) {
+			qdf_print("Cacheable page storage alloc fail");
+			goto out_fail;
+		}
+
+		cacheable_pages = pages->cacheable_pages;
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+			cacheable_pages[page_idx] = qdf_mem_malloc(PAGE_SIZE);
+			if (!cacheable_pages[page_idx]) {
+				qdf_print("cacheable page alloc fail, pi %d",
+					  page_idx);
+				goto page_alloc_fail;
+			}
+		}
+		pages->dma_pages = NULL;
+	} else {
+		pages->dma_pages = qdf_mem_malloc(
+			pages->num_pages * sizeof(struct qdf_mem_dma_page_t));
+		if (!pages->dma_pages) {
+			qdf_print("dmaable page storage alloc fail");
+			goto out_fail;
+		}
+
+		dma_pages = pages->dma_pages;
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+			dma_pages->page_v_addr_start =
+				qdf_mem_alloc_consistent(osdev, osdev->dev,
+					 PAGE_SIZE,
+					&dma_pages->page_p_addr);
+			if (!dma_pages->page_v_addr_start) {
+				qdf_print("dmaable page alloc fail pi %d",
+					page_idx);
+				goto page_alloc_fail;
+			}
+			dma_pages->page_v_addr_end =
+				dma_pages->page_v_addr_start + PAGE_SIZE;
+			dma_pages++;
+		}
+		pages->cacheable_pages = NULL;
+	}
+	return;
+
+page_alloc_fail:
+	if (cacheable) {
+		for (i = 0; i < page_idx; i++)
+			qdf_mem_free(pages->cacheable_pages[i]);
+		qdf_mem_free(pages->cacheable_pages);
+	} else {
+		dma_pages = pages->dma_pages;
+		for (i = 0; i < page_idx; i++) {
+			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
+				dma_pages->page_v_addr_start,
+				dma_pages->page_p_addr, memctxt);
+			dma_pages++;
+		}
+		qdf_mem_free(pages->dma_pages);
+	}
+
+out_fail:
+	pages->cacheable_pages = NULL;
+	pages->dma_pages = NULL;
+	pages->num_pages = 0;
+	return;
+}
+EXPORT_SYMBOL(qdf_mem_multi_pages_alloc);
+
+/**
+ * qdf_mem_multi_pages_free() - free large size of kernel memory
+ * @osdev: OS device handle pointer
+ * @pages: Multi page information storage
+ * @memctxt: Memory context
+ * @cacheable: Coherent memory or cacheable memory
+ *
+ * This function will free large size of memory over multiple pages.
+ *
+ * Return: None
+ */
+void qdf_mem_multi_pages_free(qdf_device_t osdev,
+			      struct qdf_mem_multi_page_t *pages,
+			      qdf_dma_context_t memctxt, bool cacheable)
+{
+	unsigned int page_idx;
+	struct qdf_mem_dma_page_t *dma_pages;
+
+	if (cacheable) {
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
+			qdf_mem_free(pages->cacheable_pages[page_idx]);
+		qdf_mem_free(pages->cacheable_pages);
+	} else {
+		dma_pages = pages->dma_pages;
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+			qdf_mem_free_consistent(osdev, osdev->dev, PAGE_SIZE,
+				dma_pages->page_v_addr_start,
+				dma_pages->page_p_addr, memctxt);
+			dma_pages++;
+		}
+		qdf_mem_free(pages->dma_pages);
+	}
+
+	pages->cacheable_pages = NULL;
+	pages->dma_pages = NULL;
+	pages->num_pages = 0;
+	return;
+}
+EXPORT_SYMBOL(qdf_mem_multi_pages_free);
+
+/**
+ * qdf_mem_copy() - copy memory
+ * @dst_addr: Pointer to destination memory location (to copy to)
+ * @src_addr: Pointer to source memory location (to copy from)
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Copy host memory from one location to another, similar to memcpy in
+ * standard C.  Note this function does not specifically handle overlapping
+ * source and destination memory locations.  Calling this function with
+ * overlapping source and destination memory locations will result in
+ * unpredictable results.  Use qdf_mem_move() if the memory locations
+ * for the source and destination are overlapping (or could be overlapping!)
+ *
+ * Return: none
+ */
+void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes)
+{
+	if (0 == num_bytes) {
+		/* special case where dst_addr or src_addr can be NULL */
+		return;
+	}
+
+	if ((dst_addr == NULL) || (src_addr == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter, source:%p destination:%p",
+			  __func__, src_addr, dst_addr);
+		QDF_ASSERT(0);
+		return;
+	}
+	memcpy(dst_addr, src_addr, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_copy);
+
+/**
+ * qdf_mem_zero() - zero out memory
+ * @ptr: pointer to memory that will be set to zero
+ * @num_bytes: number of bytes zero
+ *
+ * This function sets the memory location to all zeros, essentially clearing
+ * the memory.
+ *
+ * Return: None
+ */
+void qdf_mem_zero(void *ptr, uint32_t num_bytes)
+{
+	if (0 == num_bytes) {
+		/* special case where ptr can be NULL */
+		return;
+	}
+
+	if (ptr == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter ptr", __func__);
+		return;
+	}
+	memset(ptr, 0, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_zero);
+
+/**
+ * qdf_mem_set() - set (fill) memory with a specified byte value.
+ * @ptr: Pointer to memory that will be set
+ * @num_bytes: Number of bytes to be set
+ * @value: Byte set in memory
+ *
+ * Return: None
+ */
+void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value)
+{
+	if (ptr == NULL) {
+		qdf_print("%s called with NULL parameter ptr", __func__);
+		return;
+	}
+	memset(ptr, value, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_set);
+
+/**
+ * qdf_mem_move() - move memory
+ * @dst_addr: pointer to destination memory location (to move to)
+ * @src_addr: pointer to source memory location (to move from)
+ * @num_bytes: number of bytes to move.
+ *
+ * Move host memory from one location to another, similar to memmove in
+ * standard C.  Note this function *does* handle overlapping
+ * source and destination memory locations.
+
+ * Return: None
+ */
+void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes)
+{
+	if (0 == num_bytes) {
+		/* special case where dst_addr or src_addr can be NULL */
+		return;
+	}
+
+	if ((dst_addr == NULL) || (src_addr == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter, source:%p destination:%p",
+			  __func__, src_addr, dst_addr);
+		QDF_ASSERT(0);
+		return;
+	}
+	memmove(dst_addr, src_addr, num_bytes);
+}
+EXPORT_SYMBOL(qdf_mem_move);
+
+/**
+ * qdf_mem_alloc_consistent() - allocates consistent qdf memory
+ * @osdev: OS device handle
+ * @dev: Pointer to device handle
+ * @size: Size to be allocated
+ * @phy_addr: Physical address
+ *
+ * Return: pointer of allocated memory or null if memory alloc fails
+ */
+void *qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
+			       qdf_dma_addr_t *phy_addr)
+{
+#if defined(A_SIMOS_DEVHOST)
+	static int first = 1;
+	void *vaddr;
+
+	if (first) {
+		first = 0;
+		qdf_print("Warning: bypassing %s\n", __func__);
+	}
+
+	vaddr = qdf_mem_malloc(size);
+	*phy_addr = ((qdf_dma_addr_t) vaddr);
+	return vaddr;
+#else
+	int flags = GFP_KERNEL;
+	void *alloc_mem = NULL;
+
+	if (in_interrupt() || irqs_disabled() || in_atomic())
+		flags = GFP_ATOMIC;
+
+	alloc_mem = dma_alloc_coherent(dev, size, phy_addr, flags);
+	if (alloc_mem == NULL)
+		qdf_print("%s Warning: unable to alloc consistent memory of size %zu!\n",
+			__func__, size);
+	return alloc_mem;
+#endif
+}
+EXPORT_SYMBOL(qdf_mem_alloc_consistent);
+
+/**
+ * qdf_mem_free_consistent() - free consistent qdf memory
+ * @osdev: OS device handle
+ * @size: Size to be allocated
+ * @vaddr: virtual address
+ * @phy_addr: Physical address
+ * @mctx: Pointer to DMA context
+ *
+ * Return: none
+ */
+inline void qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
+				    qdf_size_t size, void *vaddr,
+				    qdf_dma_addr_t phy_addr,
+				    qdf_dma_context_t memctx)
+{
+#if defined(A_SIMOS_DEVHOST)
+	static int first = 1;
+
+	if (first) {
+		first = 0;
+		qdf_print("Warning: bypassing %s\n", __func__);
+	}
+	qdf_mem_free(vaddr);
+	return;
+#else
+	dma_free_coherent(dev, size, vaddr, phy_addr);
+#endif
+}
+EXPORT_SYMBOL(qdf_mem_free_consistent);
+
+/**
+ * qdf_mem_dma_sync_single_for_device() - assign memory to device
+ * @osdev: OS device handle
+ * @bus_addr: dma address to give to the device
+ * @size: Size of the memory block
+ * @direction: direction data will be dma'ed
+ *
+ * Assign memory to the remote device.
+ * The cache lines are flushed to ram or invalidated as needed.
+ *
+ * Return: none
+ */
+inline void
+qdf_mem_dma_sync_single_for_device(qdf_device_t osdev, qdf_dma_addr_t bus_addr,
+				   qdf_size_t size,
+				   enum dma_data_direction direction)
+{
+	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
+}
+EXPORT_SYMBOL(qdf_mem_dma_sync_single_for_device);
+

+ 34 - 16
qdf/src/qdf_defer.c → qdf/linux/src/qdf_module.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -25,26 +25,44 @@
  * to the Linux Foundation.
  */
 
-#include <linux/kernel.h>
-#include <linux/version.h>
+/**
+ * DOC: i_qdf_module.h
+ * Linux-specific definitions for QDF module API's
+ */
+
 #include <linux/module.h>
-#include <linux/workqueue.h>
+#include <qdf_perf.h>
 
-#include "i_cdf_defer.h"
+MODULE_AUTHOR("Qualcomm Atheros Inc.");
+MODULE_DESCRIPTION("Qualcomm Atheros Device Framework Module");
+MODULE_LICENSE("Dual BSD/GPL");
+
+#ifndef EXPORT_SYMTAB
+#define EXPORT_SYMTAB
+#endif
 
 /**
- * __cdf_defer_func() - defer work handler
- * @work: Pointer to defer work
+ * qdf_mod_init() - module initialization
  *
- * Return: none
+ * Return: int
  */
-void __cdf_defer_func(struct work_struct *work)
+static int __init
+qdf_mod_init(void)
 {
-	__cdf_work_t *ctx = container_of(work, __cdf_work_t, work);
-	if (ctx->fn == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			"No callback registered !!");
-		return;
-	}
-	ctx->fn(ctx->arg);
+	qdf_perfmod_init();
+	return 0;
 }
+module_init(qdf_mod_init);
+
+/**
+ * qdf_mod_exit() - module remove
+ *
+ * Return: int
+ */
+static void __exit
+qdf_mod_exit(void)
+{
+	qdf_perfmod_exit();
+}
+module_exit(qdf_mod_exit);
+

+ 1536 - 0
qdf/linux/src/qdf_nbuf.c

@@ -0,0 +1,1536 @@
+/*
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_nbuf.c
+ * QCA driver framework(QDF) network buffer management APIs
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <qdf_types.h>
+#include <qdf_nbuf.h>
+#include <qdf_mem.h>
+#include <qdf_status.h>
+#include <qdf_lock.h>
+#include <qdf_trace.h>
+
+#if defined(FEATURE_TSO)
+#include <net/ipv6.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#endif /* FEATURE_TSO */
+
+/* Packet Counter */
+static uint32_t nbuf_tx_mgmt[QDF_NBUF_TX_PKT_STATE_MAX];
+static uint32_t nbuf_tx_data[QDF_NBUF_TX_PKT_STATE_MAX];
+
+/**
+ * qdf_nbuf_tx_desc_count_display() - Displays the packet counter
+ *
+ * Return: none
+ */
+void qdf_nbuf_tx_desc_count_display(void)
+{
+	qdf_print("Current Snapshot of the Driver:\n");
+	qdf_print("Data Packets:\n");
+	qdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
+		nbuf_tx_data[QDF_NBUF_TX_PKT_HDD] -
+		(nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] +
+		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
+		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE]),
+		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
+		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
+		nbuf_tx_data[QDF_NBUF_TX_PKT_TXRX] -
+			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTT],
+		nbuf_tx_data[QDF_NBUF_TX_PKT_HTT]  -
+			 nbuf_tx_data[QDF_NBUF_TX_PKT_HTC]);
+	qdf_print(" HTC %d  HIF %d CE %d TX_COMP %d\n",
+		nbuf_tx_data[QDF_NBUF_TX_PKT_HTC] -
+			nbuf_tx_data[QDF_NBUF_TX_PKT_HIF],
+		nbuf_tx_data[QDF_NBUF_TX_PKT_HIF] -
+			 nbuf_tx_data[QDF_NBUF_TX_PKT_CE],
+		nbuf_tx_data[QDF_NBUF_TX_PKT_CE] -
+			 nbuf_tx_data[QDF_NBUF_TX_PKT_FREE],
+		nbuf_tx_data[QDF_NBUF_TX_PKT_FREE]);
+	qdf_print("Mgmt Packets:\n");
+	qdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_ENQUEUE] -
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX_DEQUEUE],
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_TXRX] -
+			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT],
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTT] -
+			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC],
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HTC] -
+			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF],
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_HIF] -
+			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE],
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_CE] -
+			 nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE],
+		nbuf_tx_mgmt[QDF_NBUF_TX_PKT_FREE]);
+}
+EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_display);
+
+/**
+ * qdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
+ * @packet_type   : packet type either mgmt/data
+ * @current_state : layer at which the packet currently present
+ *
+ * Return: none
+ */
+static inline void qdf_nbuf_tx_desc_count_update(uint8_t packet_type,
+			uint8_t current_state)
+{
+	switch (packet_type) {
+	case QDF_NBUF_TX_PKT_MGMT_TRACK:
+		nbuf_tx_mgmt[current_state]++;
+		break;
+	case QDF_NBUF_TX_PKT_DATA_TRACK:
+		nbuf_tx_data[current_state]++;
+		break;
+	default:
+		break;
+	}
+}
+EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_update);
+
+/**
+ * qdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
+ *
+ * Return: none
+ */
+void qdf_nbuf_tx_desc_count_clear(void)
+{
+	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
+	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
+}
+EXPORT_SYMBOL(qdf_nbuf_tx_desc_count_clear);
+
+/**
+ * qdf_nbuf_set_state() - Updates the packet state
+ * @nbuf:            network buffer
+ * @current_state :  layer at which the packet currently is
+ *
+ * This function updates the packet state to the layer at which the packet
+ * currently is
+ *
+ * Return: none
+ */
+void qdf_nbuf_set_state(qdf_nbuf_t nbuf, uint8_t current_state)
+{
+	/*
+	 * Only Mgmt, Data Packets are tracked. WMI messages
+	 * such as scan commands are not tracked
+	 */
+	uint8_t packet_type;
+	packet_type = QDF_NBUF_CB_TX_PACKET_TRACK(nbuf);
+
+	if ((packet_type != QDF_NBUF_TX_PKT_DATA_TRACK) &&
+		(packet_type != QDF_NBUF_TX_PKT_MGMT_TRACK)) {
+		return;
+	}
+	QDF_NBUF_CB_TX_PACKET_STATE(nbuf) = current_state;
+	qdf_nbuf_tx_desc_count_update(packet_type,
+					current_state);
+}
+EXPORT_SYMBOL(qdf_nbuf_set_state);
+
+/* globals do not need to be initialized to NULL/0 */
+qdf_nbuf_trace_update_t qdf_trace_update_cb;
+
+/**
+ * __qdf_nbuf_alloc() - Allocate nbuf
+ * @hdl: Device handle
+ * @size: Netbuf requested size
+ * @reserve: headroom to start with
+ * @align: Align
+ * @prio: Priority
+ *
+ * This allocates an nbuf aligns if needed and reserves some space in the front,
+ * since the reserve is done after alignment the reserve value if being
+ * unaligned will result in an unaligned address.
+ *
+ * Return: nbuf or %NULL if no memory
+ */
+struct sk_buff *__qdf_nbuf_alloc(qdf_device_t osdev, size_t size, int reserve,
+			 int align, int prio)
+{
+	struct sk_buff *skb;
+	unsigned long offset;
+
+	if (align)
+		size += (align - 1);
+
+	skb = dev_alloc_skb(size);
+
+	if (!skb) {
+		pr_err("ERROR:NBUF alloc failed\n");
+		return NULL;
+	}
+	memset(skb->cb, 0x0, sizeof(skb->cb));
+
+	/*
+	 * The default is for netbuf fragments to be interpreted
+	 * as wordstreams rather than bytestreams.
+	 */
+	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
+	QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
+
+	/*
+	 * XXX:how about we reserve first then align
+	 * Align & make sure that the tail & data are adjusted properly
+	 */
+
+	if (align) {
+		offset = ((unsigned long)skb->data) % align;
+		if (offset)
+			skb_reserve(skb, align - offset);
+	}
+
+	/*
+	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
+	 * pointer
+	 */
+	skb_reserve(skb, reserve);
+
+	return skb;
+}
+EXPORT_SYMBOL(__qdf_nbuf_alloc);
+
+/**
+ * __qdf_nbuf_free() - free the nbuf its interrupt safe
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+void __qdf_nbuf_free(struct sk_buff *skb)
+{
+	if (qdf_nbuf_ipa_owned_get(skb))
+		/* IPA cleanup function will need to be called here */
+		QDF_BUG(1);
+	else
+		dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(__qdf_nbuf_free);
+
+/**
+ * __qdf_nbuf_map() - map a buffer to local bus address space
+ * @osdev: OS device
+ * @bmap: Bitmap
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef QDF_OS_DEBUG
+QDF_STATUS
+__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
+{
+	struct skb_shared_info *sh = skb_shinfo(skb);
+	qdf_assert((dir == QDF_DMA_TO_DEVICE)
+			|| (dir == QDF_DMA_FROM_DEVICE));
+
+	/*
+	 * Assume there's only a single fragment.
+	 * To support multiple fragments, it would be necessary to change
+	 * qdf_nbuf_t to be a separate object that stores meta-info
+	 * (including the bus address for each fragment) and a pointer
+	 * to the underlying sk_buff.
+	 */
+	qdf_assert(sh->nr_frags == 0);
+
+	return __qdf_nbuf_map_single(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map);
+
+#else
+QDF_STATUS
+__qdf_nbuf_map(qdf_device_t osdev, struct sk_buff *skb, qdf_dma_dir_t dir)
+{
+	return __qdf_nbuf_map_single(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map);
+#endif
+/**
+ * __qdf_nbuf_unmap() - to unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: dma direction
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_unmap(qdf_device_t osdev, struct sk_buff *skb,
+			qdf_dma_dir_t dir)
+{
+	qdf_assert((dir == QDF_DMA_TO_DEVICE)
+		   || (dir == QDF_DMA_FROM_DEVICE));
+
+	/*
+	 * Assume there's a single fragment.
+	 * If this is not true, the assertion in __qdf_nbuf_map will catch it.
+	 */
+	__qdf_nbuf_unmap_single(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap);
+
+/**
+ * __qdf_nbuf_map_single() - map a single buffer to local bus address space
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef A_SIMOS_DEVHOST
+QDF_STATUS
+__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+	qdf_dma_addr_t paddr;
+
+	QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_single);
+#else
+QDF_STATUS
+__qdf_nbuf_map_single(qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+	qdf_dma_addr_t paddr;
+
+	/* assume that the OS only provides a single fragment */
+	QDF_NBUF_CB_PADDR(buf) = paddr =
+		dma_map_single(osdev->dev, buf->data,
+				skb_end_pointer(buf) - buf->data, dir);
+	return dma_mapping_error(osdev->dev, paddr)
+		? QDF_STATUS_E_FAILURE
+		: QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_single);
+#endif
+/**
+ * __qdf_nbuf_unmap_single() -  unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: Pointer to network buffer
+ * @dir: Direction
+ *
+ * Return: none
+ */
+#if defined(A_SIMOS_DEVHOST)
+void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
+				qdf_dma_dir_t dir)
+{
+	return;
+}
+#else
+void __qdf_nbuf_unmap_single(qdf_device_t osdev, qdf_nbuf_t buf,
+					qdf_dma_dir_t dir)
+{
+	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
+			 skb_end_pointer(buf) - buf->data, dir);
+}
+#endif
+EXPORT_SYMBOL(__qdf_nbuf_unmap_single);
+
+/**
+ * __qdf_nbuf_set_rx_cksum() - set rx checksum
+ * @skb: Pointer to network buffer
+ * @cksum: Pointer to checksum value
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS
+__qdf_nbuf_set_rx_cksum(struct sk_buff *skb, qdf_nbuf_rx_cksum_t *cksum)
+{
+	switch (cksum->l4_result) {
+	case QDF_NBUF_RX_CKSUM_NONE:
+		skb->ip_summed = CHECKSUM_NONE;
+		break;
+	case QDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		break;
+	case QDF_NBUF_RX_CKSUM_TCP_UDP_HW:
+		skb->ip_summed = CHECKSUM_PARTIAL;
+		skb->csum = cksum->val;
+		break;
+	default:
+		pr_err("Unknown checksum type\n");
+		qdf_assert(0);
+		return QDF_STATUS_E_NOSUPPORT;
+	}
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_set_rx_cksum);
+
+/**
+ * __qdf_nbuf_get_tx_cksum() - get tx checksum
+ * @skb: Pointer to network buffer
+ *
+ * Return: TX checksum value
+ */
+qdf_nbuf_tx_cksum_t __qdf_nbuf_get_tx_cksum(struct sk_buff *skb)
+{
+	switch (skb->ip_summed) {
+	case CHECKSUM_NONE:
+		return QDF_NBUF_TX_CKSUM_NONE;
+	case CHECKSUM_PARTIAL:
+		/* XXX ADF and Linux checksum don't map with 1-to-1. This is
+		 * not 100% correct */
+		return QDF_NBUF_TX_CKSUM_TCP_UDP;
+	case CHECKSUM_COMPLETE:
+		return QDF_NBUF_TX_CKSUM_TCP_UDP_IP;
+	default:
+		return QDF_NBUF_TX_CKSUM_NONE;
+	}
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tx_cksum);
+
+/**
+ * __qdf_nbuf_get_tid() - get tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: tid
+ */
+uint8_t __qdf_nbuf_get_tid(struct sk_buff *skb)
+{
+	return skb->priority;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tid);
+
+/**
+ * __qdf_nbuf_set_tid() - set tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+void __qdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
+{
+	skb->priority = tid;
+}
+EXPORT_SYMBOL(__qdf_nbuf_set_tid);
+
+/**
+ * __qdf_nbuf_set_tid() - set tid
+ * @skb: Pointer to network buffer
+ *
+ * Return: none
+ */
+uint8_t __qdf_nbuf_get_exemption_type(struct sk_buff *skb)
+{
+	return QDF_NBUF_EXEMPT_NO_EXEMPTION;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_exemption_type);
+
+/**
+ * __qdf_nbuf_reg_trace_cb() - register trace callback
+ * @cb_func_ptr: Pointer to trace callback function
+ *
+ * Return: none
+ */
+void __qdf_nbuf_reg_trace_cb(qdf_nbuf_trace_update_t cb_func_ptr)
+{
+	qdf_trace_update_cb = cb_func_ptr;
+	return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_reg_trace_cb);
+
+#ifdef QCA_PKT_PROTO_TRACE
+/**
+ * __qdf_nbuf_trace_update() - update trace event
+ * @skb: Pointer to network buffer
+ * @event_string: Pointer to trace callback function
+ *
+ * Return: none
+ */
+void __qdf_nbuf_trace_update(struct sk_buff *buf, char *event_string)
+{
+	char string_buf[QDF_NBUF_PKT_TRAC_MAX_STRING];
+
+	if ((!qdf_trace_update_cb) || (!event_string))
+		return;
+
+	if (!qdf_nbuf_trace_get_proto_type(buf))
+		return;
+
+	/* Buffer over flow */
+	if (QDF_NBUF_PKT_TRAC_MAX_STRING <=
+	    (qdf_str_len(event_string) + QDF_NBUF_PKT_TRAC_PROTO_STRING)) {
+		return;
+	}
+
+	qdf_mem_zero(string_buf, QDF_NBUF_PKT_TRAC_MAX_STRING);
+	qdf_mem_copy(string_buf, event_string, qdf_str_len(event_string));
+	if (QDF_NBUF_PKT_TRAC_TYPE_EAPOL & qdf_nbuf_trace_get_proto_type(buf)) {
+		qdf_mem_copy(string_buf + qdf_str_len(event_string),
+			     "EPL", QDF_NBUF_PKT_TRAC_PROTO_STRING);
+	} else if (QDF_NBUF_PKT_TRAC_TYPE_DHCP &
+		 qdf_nbuf_trace_get_proto_type(buf)) {
+		qdf_mem_copy(string_buf + qdf_str_len(event_string),
+			     "DHC", QDF_NBUF_PKT_TRAC_PROTO_STRING);
+	} else if (QDF_NBUF_PKT_TRAC_TYPE_MGMT_ACTION &
+		   qdf_nbuf_trace_get_proto_type(buf)) {
+		qdf_mem_copy(string_buf + qdf_str_len(event_string),
+			     "MACT", QDF_NBUF_PKT_TRAC_PROTO_STRING);
+	}
+
+	qdf_trace_update_cb(string_buf);
+	return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_trace_update);
+#endif /* QCA_PKT_PROTO_TRACE */
+
+#ifdef MEMORY_DEBUG
+#define QDF_NET_BUF_TRACK_MAX_SIZE    (1024)
+
+/**
+ * struct qdf_nbuf_track_t - Network buffer track structure
+ *
+ * @p_next: Pointer to next
+ * @net_buf: Pointer to network buffer
+ * @file_name: File name
+ * @line_num: Line number
+ * @size: Size
+ */
+struct qdf_nbuf_track_t {
+	struct qdf_nbuf_track_t *p_next;
+	qdf_nbuf_t net_buf;
+	uint8_t *file_name;
+	uint32_t line_num;
+	size_t size;
+};
+
+spinlock_t g_qdf_net_buf_track_lock;
+typedef struct qdf_nbuf_track_t QDF_NBUF_TRACK;
+
+QDF_NBUF_TRACK *gp_qdf_net_buf_track_tbl[QDF_NET_BUF_TRACK_MAX_SIZE];
+
+/**
+ * qdf_net_buf_debug_init() - initialize network buffer debug functionality
+ *
+ * QDF network buffer debug feature tracks all SKBs allocated by WLAN driver
+ * in a hash table and when driver is unloaded it reports about leaked SKBs.
+ * WLAN driver module whose allocated SKB is freed by network stack are
+ * suppose to call qdf_net_buf_debug_release_skb() such that the SKB is not
+ * reported as memory leak.
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_init(void)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+
+	spin_lock_init(&g_qdf_net_buf_track_lock);
+
+	spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++)
+		gp_qdf_net_buf_track_tbl[i] = NULL;
+
+	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_init);
+
+/**
+ * qdf_net_buf_debug_init() - exit network buffer debug functionality
+ *
+ * Exit network buffer tracking debug functionality and log SKB memory leaks
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_exit(void)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+	QDF_NBUF_TRACK *p_node;
+	QDF_NBUF_TRACK *p_prev;
+
+	spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
+		p_node = gp_qdf_net_buf_track_tbl[i];
+		while (p_node) {
+			p_prev = p_node;
+			p_node = p_node->p_next;
+			qdf_print(
+				  "SKB buf memory Leak@ File %s, @Line %d, size %zu\n",
+				  p_prev->file_name, p_prev->line_num,
+				  p_prev->size);
+		}
+	}
+
+	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_exit);
+
+/**
+ * qdf_net_buf_debug_clean() - clean up network buffer debug functionality
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_clean(void)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+	QDF_NBUF_TRACK *p_node;
+	QDF_NBUF_TRACK *p_prev;
+
+	spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+	for (i = 0; i < QDF_NET_BUF_TRACK_MAX_SIZE; i++) {
+		p_node = gp_qdf_net_buf_track_tbl[i];
+		while (p_node) {
+			p_prev = p_node;
+			p_node = p_node->p_next;
+			qdf_mem_free(p_prev);
+		}
+	}
+
+	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_clean);
+
+/**
+ * qdf_net_buf_debug_hash() - hash network buffer pointer
+ *
+ * Return: hash value
+ */
+uint32_t qdf_net_buf_debug_hash(qdf_nbuf_t net_buf)
+{
+	uint32_t i;
+
+	i = (uint32_t) ((uintptr_t) net_buf & (QDF_NET_BUF_TRACK_MAX_SIZE - 1));
+
+	return i;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_hash);
+
+/**
+ * qdf_net_buf_debug_look_up() - look up network buffer in debug hash table
+ *
+ * Return: If skb is found in hash table then return pointer to network buffer
+ *	else return %NULL
+ */
+QDF_NBUF_TRACK *qdf_net_buf_debug_look_up(qdf_nbuf_t net_buf)
+{
+	uint32_t i;
+	QDF_NBUF_TRACK *p_node;
+
+	i = qdf_net_buf_debug_hash(net_buf);
+	p_node = gp_qdf_net_buf_track_tbl[i];
+
+	while (p_node) {
+		if (p_node->net_buf == net_buf)
+			return p_node;
+		p_node = p_node->p_next;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_look_up);
+
+/**
+ * qdf_net_buf_debug_add_node() - store skb in debug hash table
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_add_node(qdf_nbuf_t net_buf, size_t size,
+				uint8_t *file_name, uint32_t line_num)
+{
+	uint32_t i;
+	unsigned long irq_flag;
+	QDF_NBUF_TRACK *p_node;
+
+	spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+	i = qdf_net_buf_debug_hash(net_buf);
+	p_node = qdf_net_buf_debug_look_up(net_buf);
+
+	if (p_node) {
+		qdf_print(
+			  "Double allocation of skb ! Already allocated from %s %d",
+			  p_node->file_name, p_node->line_num);
+		QDF_ASSERT(0);
+		goto done;
+	} else {
+		p_node = (QDF_NBUF_TRACK *)
+			 qdf_mem_malloc(sizeof(*p_node));
+		if (p_node) {
+			p_node->net_buf = net_buf;
+			p_node->file_name = file_name;
+			p_node->line_num = line_num;
+			p_node->size = size;
+			p_node->p_next = gp_qdf_net_buf_track_tbl[i];
+			gp_qdf_net_buf_track_tbl[i] = p_node;
+		} else {
+			qdf_print(
+				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
+				  file_name, line_num, size);
+			QDF_ASSERT(0);
+		}
+	}
+
+done:
+	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_add_node);
+
+/**
+ * qdf_net_buf_debug_delete_node() - remove skb from debug hash table
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_delete_node(qdf_nbuf_t net_buf)
+{
+	uint32_t i;
+	bool found = false;
+	QDF_NBUF_TRACK *p_head;
+	QDF_NBUF_TRACK *p_node;
+	unsigned long irq_flag;
+	QDF_NBUF_TRACK *p_prev;
+
+	spin_lock_irqsave(&g_qdf_net_buf_track_lock, irq_flag);
+
+	i = qdf_net_buf_debug_hash(net_buf);
+	p_head = gp_qdf_net_buf_track_tbl[i];
+
+	/* Unallocated SKB */
+	if (!p_head)
+		goto done;
+
+	p_node = p_head;
+	/* Found at head of the table */
+	if (p_head->net_buf == net_buf) {
+		gp_qdf_net_buf_track_tbl[i] = p_node->p_next;
+		qdf_mem_free((void *)p_node);
+		found = true;
+		goto done;
+	}
+
+	/* Search in collision list */
+	while (p_node) {
+		p_prev = p_node;
+		p_node = p_node->p_next;
+		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
+			p_prev->p_next = p_node->p_next;
+			qdf_mem_free((void *)p_node);
+			found = true;
+			break;
+		}
+	}
+
+done:
+	if (!found) {
+		qdf_print(
+			  "Unallocated buffer ! Double free of net_buf %p ?",
+			  net_buf);
+		QDF_ASSERT(0);
+	}
+
+	spin_unlock_irqrestore(&g_qdf_net_buf_track_lock, irq_flag);
+
+	return;
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_delete_node);
+
+/**
+ * qdf_net_buf_debug_release_skb() - release skb to avoid memory leak
+ *
+ * WLAN driver module whose allocated SKB is freed by network stack are
+ * suppose to call this API before returning SKB to network stack such
+ * that the SKB is not reported as memory leak.
+ *
+ * Return: none
+ */
+void qdf_net_buf_debug_release_skb(qdf_nbuf_t net_buf)
+{
+	qdf_net_buf_debug_delete_node(net_buf);
+}
+EXPORT_SYMBOL(qdf_net_buf_debug_release_skb);
+
+#endif /*MEMORY_DEBUG */
+#if defined(FEATURE_TSO)
+
+struct qdf_tso_cmn_seg_info_t {
+	uint16_t ethproto;
+	uint16_t ip_tcp_hdr_len;
+	uint16_t l2_len;
+	unsigned char *eit_hdr;
+	unsigned int eit_hdr_len;
+	struct tcphdr *tcphdr;
+	uint16_t ipv4_csum_en;
+	uint16_t tcp_ipv4_csum_en;
+	uint16_t tcp_ipv6_csum_en;
+	uint16_t ip_id;
+	uint32_t tcp_seq_num;
+};
+
+/**
+ * __qdf_nbuf_get_tso_cmn_seg_info() - get TSO common
+ * information
+ *
+ * Get the TSO information that is common across all the TCP
+ * segments of the jumbo packet
+ *
+ * Return: 0 - success 1 - failure
+ */
+uint8_t __qdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
+	struct qdf_tso_cmn_seg_info_t *tso_info)
+{
+	/* Get ethernet type and ethernet header length */
+	tso_info->ethproto = vlan_get_protocol(skb);
+
+	/* Determine whether this is an IPv4 or IPv6 packet */
+	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
+		/* for IPv4, get the IP ID and enable TCP and IP csum */
+		struct iphdr *ipv4_hdr = ip_hdr(skb);
+		tso_info->ip_id = ntohs(ipv4_hdr->id);
+		tso_info->ipv4_csum_en = 1;
+		tso_info->tcp_ipv4_csum_en = 1;
+		if (qdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
+			qdf_print("TSO IPV4 proto 0x%x not TCP\n",
+				 ipv4_hdr->protocol);
+			return 1;
+		}
+	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
+		/* for IPv6, enable TCP csum. No IP ID or IP csum */
+		tso_info->tcp_ipv6_csum_en = 1;
+	} else {
+		qdf_print("TSO: ethertype 0x%x is not supported!\n",
+			 tso_info->ethproto);
+		return 1;
+	}
+
+	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
+	tso_info->tcphdr = tcp_hdr(skb);
+	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
+	/* get pointer to the ethernet + IP + TCP header and their length */
+	tso_info->eit_hdr = skb->data;
+	tso_info->eit_hdr_len = (skb_transport_header(skb)
+		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
+	tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
+	return 0;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tso_cmn_seg_info);
+
+
+/**
+ * qdf_dmaaddr_to_32s - return high and low parts of dma_addr
+ *
+ * Returns the high and low 32-bits of the DMA addr in the provided ptrs
+ *
+ * Return: N/A
+ */
+static inline void qdf_dmaaddr_to_32s(qdf_dma_addr_t dmaaddr,
+				      uint32_t *lo, uint32_t *hi)
+{
+	if (sizeof(dmaaddr) > sizeof(uint32_t)) {
+		*lo = (uint32_t) (dmaaddr & 0x0ffffffff);
+		*hi = (uint32_t) (dmaaddr >> 32);
+	} else {
+		*lo = dmaaddr;
+		*hi = 0;
+	}
+}
+EXPORT_SYMBOL(qdf_dmaaddr_to_32s);
+
+
+/**
+ * __qdf_nbuf_get_tso_info() - function to divide a TSO nbuf
+ * into segments
+ * @nbuf:   network buffer to be segmented
+ * @tso_info:  This is the output. The information about the
+ *           TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: number of TSO segments
+ */
+uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
+		struct qdf_tso_info_t *tso_info)
+{
+	/* common accross all segments */
+	struct qdf_tso_cmn_seg_info_t tso_cmn_info;
+
+	/* segment specific */
+	char *tso_frag_vaddr;
+	qdf_dma_addr_t tso_frag_paddr = 0;
+	uint32_t       tso_frag_paddr_lo, tso_frag_paddr_hi;
+	uint32_t num_seg = 0;
+	struct qdf_tso_seg_elem_t *curr_seg;
+	const struct skb_frag_struct *frag = NULL;
+	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
+	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
+	uint32_t foffset = 0; /* offset into the skb's fragment */
+	uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/
+	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
+
+	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
+
+	if (qdf_unlikely(__qdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
+		qdf_print("TSO: error getting common segment info\n");
+		return 0;
+	}
+	curr_seg = tso_info->tso_seg_list;
+
+	/* length of the first chunk of data in the skb */
+	skb_proc = skb_frag_len = skb_headlen(skb);
+
+	/* the 0th tso segment's 0th fragment always contains the EIT header */
+	/* update the remaining skb fragment length and TSO segment length */
+	skb_frag_len -= tso_cmn_info.eit_hdr_len;
+	skb_proc -= tso_cmn_info.eit_hdr_len;
+
+	/* get the address to the next tso fragment */
+	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
+	/* get the length of the next tso fragment */
+	tso_frag_len = min(skb_frag_len, tso_seg_size);
+	tso_frag_paddr = dma_map_single(osdev->dev,
+		 tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
+	qdf_dmaaddr_to_32s(tso_frag_paddr, &tso_frag_paddr_lo,
+						 &tso_frag_paddr_hi);
+
+	num_seg = tso_info->num_segs;
+	tso_info->num_segs = 0;
+	tso_info->is_tso = 1;
+
+	while (num_seg && curr_seg) {
+		int i = 1; /* tso fragment index */
+		int j = 0; /* skb fragment index */
+		uint8_t more_tso_frags = 1;
+		uint8_t from_frag_table = 0;
+
+		/* Initialize the flags to 0 */
+		memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
+		tso_info->num_segs++;
+
+		/* The following fields remain the same across all segments of
+		 a jumbo packet */
+		curr_seg->seg.tso_flags.tso_enable = 1;
+		curr_seg->seg.tso_flags.partial_checksum_en = 0;
+		curr_seg->seg.tso_flags.ipv4_checksum_en =
+			tso_cmn_info.ipv4_csum_en;
+		curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
+			tso_cmn_info.tcp_ipv6_csum_en;
+		curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
+			tso_cmn_info.tcp_ipv4_csum_en;
+		curr_seg->seg.tso_flags.l2_len = 0;
+		curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
+		curr_seg->seg.num_frags = 0;
+
+		/* The following fields change for the segments */
+		curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id;
+		tso_cmn_info.ip_id++;
+
+		curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn;
+		curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst;
+		curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
+		curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack;
+		curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg;
+		curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece;
+		curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr;
+
+		curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num;
+
+		/* First fragment for each segment always contains the ethernet,
+		IP and TCP header */
+		curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
+		curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
+		tso_info->total_len = curr_seg->seg.tso_frags[0].length;
+		{
+			qdf_dma_addr_t mapped;
+			uint32_t       lo, hi;
+
+			mapped = dma_map_single(osdev->dev,
+				tso_cmn_info.eit_hdr,
+				tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
+			qdf_dmaaddr_to_32s(mapped, &lo, &hi);
+			curr_seg->seg.tso_frags[0].paddr_low_32 = lo;
+			curr_seg->seg.tso_frags[0].paddr_upper_16 =
+							 (hi & 0xffff);
+		}
+		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
+		curr_seg->seg.num_frags++;
+
+		while (more_tso_frags) {
+			curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr;
+			curr_seg->seg.tso_frags[i].length = tso_frag_len;
+			tso_info->total_len +=
+				 curr_seg->seg.tso_frags[i].length;
+			curr_seg->seg.tso_flags.ip_len +=
+				 curr_seg->seg.tso_frags[i].length;
+			curr_seg->seg.num_frags++;
+			skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length;
+
+			/* increment the TCP sequence number */
+			tso_cmn_info.tcp_seq_num += tso_frag_len;
+			curr_seg->seg.tso_frags[i].paddr_upper_16 =
+				(tso_frag_paddr_hi & 0xffff);
+			curr_seg->seg.tso_frags[i].paddr_low_32 =
+				 tso_frag_paddr_lo;
+
+			/* if there is no more data left in the skb */
+			if (!skb_proc)
+				return tso_info->num_segs;
+
+			/* get the next payload fragment information */
+			/* check if there are more fragments in this segment */
+			if ((tso_seg_size - tso_frag_len)) {
+				more_tso_frags = 1;
+				i++;
+			} else {
+				more_tso_frags = 0;
+				/* reset i and the tso payload size */
+				i = 1;
+				tso_seg_size = skb_shinfo(skb)->gso_size;
+			}
+
+			/* if the next fragment is contiguous */
+			if (tso_frag_len < skb_frag_len) {
+				skb_frag_len = skb_frag_len - tso_frag_len;
+				tso_frag_len = min(skb_frag_len, tso_seg_size);
+				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
+				if (from_frag_table) {
+					tso_frag_paddr =
+						 skb_frag_dma_map(osdev->dev,
+							 frag, foffset,
+							 tso_frag_len,
+							 DMA_TO_DEVICE);
+					qdf_dmaaddr_to_32s(tso_frag_paddr,
+							&tso_frag_paddr_lo,
+							&tso_frag_paddr_hi);
+				} else {
+					tso_frag_paddr =
+						 dma_map_single(osdev->dev,
+							 tso_frag_vaddr,
+							 tso_frag_len,
+							 DMA_TO_DEVICE);
+					qdf_dmaaddr_to_32s(tso_frag_paddr,
+							&tso_frag_paddr_lo,
+							&tso_frag_paddr_hi);
+				}
+			} else { /* the next fragment is not contiguous */
+				tso_frag_len = min(skb_frag_len, tso_seg_size);
+				frag = &skb_shinfo(skb)->frags[j];
+				skb_frag_len = skb_frag_size(frag);
+
+				tso_frag_vaddr = skb_frag_address(frag);
+				tso_frag_paddr = skb_frag_dma_map(osdev->dev,
+					 frag, 0, tso_frag_len,
+					 DMA_TO_DEVICE);
+				qdf_dmaaddr_to_32s(tso_frag_paddr,
+						 &tso_frag_paddr_lo,
+						 &tso_frag_paddr_hi);
+				foffset += tso_frag_len;
+				from_frag_table = 1;
+				j++;
+			}
+		}
+		num_seg--;
+		/* if TCP FIN flag was set, set it in the last segment */
+		if (!num_seg)
+			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
+
+		curr_seg = curr_seg->next;
+	}
+	return tso_info->num_segs;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tso_info);
+
+/**
+ * __qdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
+ * into segments
+ * @nbuf:   network buffer to be segmented
+ * @tso_info:  This is the output. The information about the
+ *      TSO segments will be populated within this.
+ *
+ * This function fragments a TCP jumbo packet into smaller
+ * segments to be transmitted by the driver. It chains the TSO
+ * segments created into a list.
+ *
+ * Return: 0 - success, 1 - failure
+ */
+uint32_t __qdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
+{
+	uint32_t gso_size, tmp_len, num_segs = 0;
+
+	gso_size = skb_shinfo(skb)->gso_size;
+	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
+		+ tcp_hdrlen(skb));
+	while (tmp_len) {
+		num_segs++;
+		if (tmp_len > gso_size)
+			tmp_len -= gso_size;
+		else
+			break;
+	}
+	return num_segs;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_tso_num_seg);
+
+struct sk_buff *__qdf_nbuf_inc_users(struct sk_buff *skb)
+{
+	atomic_inc(&skb->users);
+	return skb;
+}
+EXPORT_SYMBOL(__qdf_nbuf_inc_users);
+
+#endif /* FEATURE_TSO */
+
+
+/**
+ * __qdf_nbuf_ref() - Reference the nbuf so it can get held until the last free.
+ * @skb: sk_buff handle
+ *
+ * Return: none
+ */
+
+void __qdf_nbuf_ref(struct sk_buff *skb)
+{
+	skb_get(skb);
+}
+EXPORT_SYMBOL(__qdf_nbuf_ref);
+
+/**
+ * __qdf_nbuf_shared() - Check whether the buffer is shared
+ *  @skb: sk_buff buffer
+ *
+ *  Return: true if more than one person has a reference to this buffer.
+ */
+int __qdf_nbuf_shared(struct sk_buff *skb)
+{
+	return skb_shared(skb);
+}
+EXPORT_SYMBOL(__qdf_nbuf_shared);
+
+/**
+ * __qdf_nbuf_dmamap_create() - create a DMA map.
+ * @osdev: qdf device handle
+ * @dmap: dma map handle
+ *
+ * This can later be used to map networking buffers. They :
+ * - need space in adf_drv's software descriptor
+ * - are typically created during adf_drv_create
+ * - need to be created before any API(qdf_nbuf_map) that uses them
+ *
+ * Return: QDF STATUS
+ */
+QDF_STATUS
+__qdf_nbuf_dmamap_create(qdf_device_t osdev, __qdf_dma_map_t *dmap)
+{
+	QDF_STATUS error = QDF_STATUS_SUCCESS;
+	/*
+	 * driver can tell its SG capablity, it must be handled.
+	 * Bounce buffers if they are there
+	 */
+	(*dmap) = kzalloc(sizeof(struct __qdf_dma_map), GFP_KERNEL);
+	if (!(*dmap))
+		error = QDF_STATUS_E_NOMEM;
+
+	return error;
+}
+EXPORT_SYMBOL(__qdf_nbuf_dmamap_create);
+/**
+ * __qdf_nbuf_dmamap_destroy() - delete a dma map
+ * @osdev: qdf device handle
+ * @dmap: dma map handle
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_dmamap_destroy(qdf_device_t osdev, __qdf_dma_map_t dmap)
+{
+	kfree(dmap);
+}
+EXPORT_SYMBOL(__qdf_nbuf_dmamap_destroy);
+
+/**
+ * __qdf_nbuf_map_nbytes_single() - map nbytes
+ * @osdev: os device
+ * @buf: buffer
+ * @dir: direction
+ * @nbytes: number of bytes
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef A_SIMOS_DEVHOST
+QDF_STATUS __qdf_nbuf_map_nbytes_single(
+		qdf_device_t osdev, struct sk_buff *buf,
+		 qdf_dma_dir_t dir, int nbytes)
+{
+	qdf_dma_addr_t paddr;
+
+	QDF_NBUF_CB_PADDR(buf) = paddr = (uint32_t) buf->data;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes_single);
+#else
+QDF_STATUS __qdf_nbuf_map_nbytes_single(
+		qdf_device_t osdev, struct sk_buff *buf,
+		 qdf_dma_dir_t dir, int nbytes)
+{
+	qdf_dma_addr_t paddr;
+
+	/* assume that the OS only provides a single fragment */
+	QDF_NBUF_CB_PADDR(buf) = paddr =
+		dma_map_single(osdev->dev, buf->data,
+			nbytes, dir);
+	return dma_mapping_error(osdev->dev, paddr) ?
+		QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes_single);
+#endif
+/**
+ * __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
+ * @osdev: os device
+ * @buf: buffer
+ * @dir: direction
+ * @nbytes: number of bytes
+ *
+ * Return: none
+ */
+#if defined(A_SIMOS_DEVHOST)
+void
+__qdf_nbuf_unmap_nbytes_single(
+	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
+{
+	return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes_single);
+
+#else
+void
+__qdf_nbuf_unmap_nbytes_single(
+	qdf_device_t osdev, struct sk_buff *buf, qdf_dma_dir_t dir, int nbytes)
+{
+	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
+		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
+		return;
+	}
+	dma_unmap_single(osdev->dev, QDF_NBUF_CB_PADDR(buf),
+			nbytes, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes_single);
+#endif
+/**
+ * __qdf_nbuf_map_nbytes() - get the dma map of the nbuf
+ * @osdev: os device
+ * @skb: skb handle
+ * @dir: dma direction
+ * @nbytes: number of bytes to be mapped
+ *
+ * Return: QDF_STATUS
+ */
+#ifdef QDF_OS_DEBUG
+QDF_STATUS
+__qdf_nbuf_map_nbytes(
+	qdf_device_t osdev,
+	struct sk_buff *skb,
+	qdf_dma_dir_t dir,
+	int nbytes)
+{
+	struct skb_shared_info  *sh = skb_shinfo(skb);
+	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
+
+	/*
+	 * Assume there's only a single fragment.
+	 * To support multiple fragments, it would be necessary to change
+	 * adf_nbuf_t to be a separate object that stores meta-info
+	 * (including the bus address for each fragment) and a pointer
+	 * to the underlying sk_buff.
+	 */
+	qdf_assert(sh->nr_frags == 0);
+
+	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes);
+#else
+QDF_STATUS
+__qdf_nbuf_map_nbytes(
+	qdf_device_t osdev,
+	struct sk_buff *skb,
+	qdf_dma_dir_t dir,
+	int nbytes)
+{
+	return __qdf_nbuf_map_nbytes_single(osdev, skb, dir, nbytes);
+}
+EXPORT_SYMBOL(__qdf_nbuf_map_nbytes);
+#endif
+/**
+ * __qdf_nbuf_unmap_nbytes() - to unmap a previously mapped buf
+ * @osdev: OS device
+ * @skb: skb handle
+ * @dir: direction
+ * @nbytes: number of bytes
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_unmap_nbytes(
+	qdf_device_t osdev,
+	struct sk_buff *skb,
+	qdf_dma_dir_t dir,
+	int nbytes)
+{
+	qdf_assert((dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
+
+	/*
+	 * Assume there's a single fragment.
+	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
+	 */
+	__qdf_nbuf_unmap_nbytes_single(osdev, skb, dir, nbytes);
+}
+EXPORT_SYMBOL(__qdf_nbuf_unmap_nbytes);
+
+/**
+ * __qdf_nbuf_dma_map_info() - return the dma map info
+ * @bmap: dma map
+ * @sg: dma map info
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_dma_map_info(__qdf_dma_map_t bmap, qdf_dmamap_info_t *sg)
+{
+	qdf_assert(bmap->mapped);
+	qdf_assert(bmap->nsegs <= QDF_MAX_SCATTER);
+
+	memcpy(sg->dma_segs, bmap->seg, bmap->nsegs *
+			sizeof(struct __qdf_segment));
+	sg->nsegs = bmap->nsegs;
+}
+EXPORT_SYMBOL(__qdf_nbuf_dma_map_info);
+/**
+ * __qdf_nbuf_frag_info() - return the frag data & len, where frag no. is
+ *			specified by the index
+ * @skb: sk buff
+ * @sg: scatter/gather list of all the frags
+ *
+ * Return: none
+ */
+#if defined(__QDF_SUPPORT_FRAG_MEM)
+void
+__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
+{
+	qdf_assert(skb != NULL);
+	sg->sg_segs[0].vaddr = skb->data;
+	sg->sg_segs[0].len   = skb->len;
+	sg->nsegs            = 1;
+
+	for (int i = 1; i <= sh->nr_frags; i++) {
+		skb_frag_t    *f        = &sh->frags[i - 1];
+		sg->sg_segs[i].vaddr    = (uint8_t *)(page_address(f->page) +
+			f->page_offset);
+		sg->sg_segs[i].len      = f->size;
+
+		qdf_assert(i < QDF_MAX_SGLIST);
+	}
+	sg->nsegs += i;
+
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_info);
+#else
+#ifdef QDF_OS_DEBUG
+void
+__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
+{
+
+	struct skb_shared_info  *sh = skb_shinfo(skb);
+
+	qdf_assert(skb != NULL);
+	sg->sg_segs[0].vaddr = skb->data;
+	sg->sg_segs[0].len   = skb->len;
+	sg->nsegs            = 1;
+
+	qdf_assert(sh->nr_frags == 0);
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_info);
+#else
+void
+__qdf_nbuf_frag_info(struct sk_buff *skb, qdf_sglist_t  *sg)
+{
+	sg->sg_segs[0].vaddr = skb->data;
+	sg->sg_segs[0].len   = skb->len;
+	sg->nsegs            = 1;
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_info);
+#endif
+#endif
+/**
+ * __qdf_nbuf_get_frag_size() - get frag size
+ * @nbuf: sk buffer
+ * @cur_frag: current frag
+ *
+ * Return: frag size
+ */
+uint32_t
+__qdf_nbuf_get_frag_size(__qdf_nbuf_t nbuf, uint32_t cur_frag)
+{
+	struct skb_shared_info  *sh = skb_shinfo(nbuf);
+	const skb_frag_t *frag = sh->frags + cur_frag;
+	return skb_frag_size(frag);
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_frag_size);
+
+/**
+ * __qdf_nbuf_frag_map() - dma map frag
+ * @osdev: os device
+ * @nbuf: sk buff
+ * @offset: offset
+ * @dir: direction
+ * @cur_frag: current fragment
+ *
+ * Return: QDF status
+ */
+#ifdef A_SIMOS_DEVHOST
+QDF_STATUS __qdf_nbuf_frag_map(
+	qdf_device_t osdev, __qdf_nbuf_t nbuf,
+	int offset, qdf_dma_dir_t dir, int cur_frag)
+{
+	int32_t paddr, frag_len;
+
+	QDF_NBUF_CB_PADDR(nbuf) = paddr = (int32_t) nbuf->data;
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_map);
+#else
+QDF_STATUS __qdf_nbuf_frag_map(
+	qdf_device_t osdev, __qdf_nbuf_t nbuf,
+	int offset, qdf_dma_dir_t dir, int cur_frag)
+{
+	int32_t paddr, frag_len;
+
+	struct skb_shared_info *sh = skb_shinfo(nbuf);
+	const skb_frag_t *frag = sh->frags + cur_frag;
+	frag_len = skb_frag_size(frag);
+
+	QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(nbuf) = paddr =
+		skb_frag_dma_map(osdev->dev, frag, offset, frag_len, dir);
+	return dma_mapping_error(osdev->dev, paddr) ?
+			QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_frag_map);
+#endif
+/**
+ * __qdf_nbuf_dmamap_set_cb() - setup the map callback for a dma map
+ * @dmap: dma map
+ * @cb: callback
+ * @arg: argument
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_dmamap_set_cb(__qdf_dma_map_t dmap, void *cb, void *arg)
+{
+	return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_dmamap_set_cb);
+
+
+/**
+ * __qdf_nbuf_get_vlan_info() - get vlan info
+ * @hdl: net handle
+ * @skb: sk buff
+ * @vlan: vlan header
+ *
+ * Return: QDF status
+ */
+QDF_STATUS
+__qdf_nbuf_get_vlan_info(qdf_net_handle_t hdl, struct sk_buff *skb,
+			qdf_net_vlanhdr_t *vlan)
+{
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(__qdf_nbuf_get_vlan_info);
+
+#ifndef REMOVE_INIT_DEBUG_CODE
+/**
+ * __qdf_nbuf_sync_single_for_cpu() - nbuf sync
+ * @osdev: os device
+ * @buf: sk buff
+ * @dir: direction
+ *
+ * Return: none
+ */
+#if defined(A_SIMOS_DEVHOST)
+void __qdf_nbuf_sync_single_for_cpu(
+	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+	return;
+}
+EXPORT_SYMBOL(__qdf_nbuf_sync_single_for_cpu);
+#else
+void __qdf_nbuf_sync_single_for_cpu(
+	qdf_device_t osdev, qdf_nbuf_t buf, qdf_dma_dir_t dir)
+{
+	if (0 ==  QDF_NBUF_CB_PADDR(buf)) {
+		qdf_print("ERROR: NBUF mapped physical address is NULL\n");
+		return;
+	}
+/*    dma_sync_single_for_cpu(osdev->dev, QDF_NBUF_CB_PADDR(buf),
+      buf->end - buf->data, dir);    */
+}
+EXPORT_SYMBOL(__qdf_nbuf_sync_single_for_cpu);
+#endif
+/**
+ * __qdf_nbuf_sync_for_cpu() - nbuf sync
+ * @osdev: os device
+ * @skb: sk buff
+ * @dir: direction
+ *
+ * Return: none
+ */
+void
+__qdf_nbuf_sync_for_cpu(qdf_device_t osdev,
+	struct sk_buff *skb, qdf_dma_dir_t dir)
+{
+	qdf_assert(
+	(dir == QDF_DMA_TO_DEVICE) || (dir == QDF_DMA_FROM_DEVICE));
+
+	/*
+	 * Assume there's a single fragment.
+	 * If this is not true, the assertion in __adf_nbuf_map will catch it.
+	 */
+	__qdf_nbuf_sync_single_for_cpu(osdev, skb, dir);
+}
+EXPORT_SYMBOL(__qdf_nbuf_sync_for_cpu);
+#endif
+

+ 195 - 0
qdf/linux/src/qdf_perf.c

@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2012-2016 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: qdf_perf
+ * This file provides OS dependent perf API's.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <asm/uaccess.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <qdf_perf.h>
+
+qdf_perf_entry_t     perf_root = {{0, 0} };
+
+/**
+ * qdf_perfmod_init() - Module init
+ *
+ * return: int
+ */
+int
+qdf_perfmod_init(void)
+{
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
+		  "Perf Debug Module Init");
+	INIT_LIST_HEAD(&perf_root.list);
+	INIT_LIST_HEAD(&perf_root.child);
+	perf_root.proc = proc_mkdir(PROCFS_PERF_DIRNAME, 0);
+	return 0;
+}
+EXPORT_SYMBOL(qdf_perfmod_init);
+
+/**
+ * qdf_perfmod_exit() - Module exit
+ *
+ * Return: none
+ */
+void
+qdf_perfmod_exit(void)
+{
+	QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
+		  "Perf Debug Module Exit");
+	remove_proc_entry(PROCFS_PERF_DIRNAME, 0);
+}
+EXPORT_SYMBOL(qdf_perfmod_exit);
+
+/**
+ * __qdf_perf_init() - Create the perf entry
+ * @parent: parent perf id
+ * @id_name: name of perf id
+ * @type: type of perf counter
+ *
+ * return: perf id
+ */
+qdf_perf_id_t
+__qdf_perf_init(qdf_perf_id_t parent, uint8_t *id_name,
+		qdf_perf_cntr_t type)
+{
+	qdf_perf_entry_t    *entry  = NULL;
+	qdf_perf_entry_t    *pentry = PERF_ENTRY(parent);
+
+	if (type >= CNTR_LAST) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s:%s Invalid perf-type", __FILE__, __func__);
+		goto done;
+	}
+
+	if (!pentry)
+		pentry = &perf_root;
+	entry = kmalloc(sizeof(struct qdf_perf_entry), GFP_ATOMIC);
+
+	if (!entry) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  " Out of Memory,:%s", __func__);
+		return NULL;
+	}
+
+	memset(entry, 0, sizeof(struct qdf_perf_entry));
+
+	INIT_LIST_HEAD(&entry->list);
+	INIT_LIST_HEAD(&entry->child);
+
+	spin_lock_init(&entry->lock_irq);
+
+	list_add_tail(&entry->list, &pentry->child);
+
+	entry->name = id_name;
+	entry->type = type;
+
+	if (type == CNTR_GROUP) {
+		entry->proc = proc_mkdir(id_name, pentry->proc);
+		goto done;
+	}
+
+	entry->parent   = pentry;
+	entry->proc     = create_proc_entry(id_name, S_IFREG|S_IRUGO|S_IWUSR,
+					pentry->proc);
+	entry->proc->data       = entry;
+	entry->proc->read_proc  = api_tbl[type].proc_read;
+	entry->proc->write_proc = api_tbl[type].proc_write;
+
+	/*
+	 * Initialize the Event with default values
+	 */
+	api_tbl[type].init(entry, api_tbl[type].def_val);
+
+done:
+	return entry;
+}
+EXPORT_SYMBOL(__qdf_perf_init);
+
+/**
+ * __qdf_perf_destroy - Destroy the perf entry
+ * @id: pointer to qdf_perf_id_t
+ *
+ * @return: bool
+ */
+bool __qdf_perf_destroy(qdf_perf_id_t  id)
+{
+	qdf_perf_entry_t     *entry  = PERF_ENTRY(id),
+		*parent = entry->parent;
+
+	if (!list_empty(&entry->child)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "Child's are alive, Can't delete");
+		return A_FALSE;
+	}
+
+	remove_proc_entry(entry->name, parent->proc);
+
+	list_del(&entry->list);
+
+	vfree(entry);
+
+	return true;
+}
+EXPORT_SYMBOL(__qdf_perf_destroy);
+
+/**
+ * __qdf_perf_start - Start the sampling
+ * @id: Instance of qdf_perf_id_t
+ *
+ * Returns: none
+ */
+void __qdf_perf_start(qdf_perf_id_t id)
+{
+	qdf_perf_entry_t *entry = PERF_ENTRY(id);
+
+	api_tbl[entry->type].sample(entry, 0);
+}
+EXPORT_SYMBOL(__qdf_perf_start);
+
+/**
+ * __qdf_perf_end - Stop sampling
+ * @id: Instance of qdf_perf_id_t
+ *
+ * Returns: none
+ */
+void __qdf_perf_end(qdf_perf_id_t id)
+{
+	qdf_perf_entry_t *entry = PERF_ENTRY(id);
+
+	api_tbl[entry->type].sample(entry, 1);
+}
+EXPORT_SYMBOL(__qdf_perf_end);

+ 26 - 28
qdf/src/qdf_threads.c → qdf/linux/src/qdf_threads.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,62 +26,58 @@
  */
 
 /**
- * DOC:  cdf_threads
- *
- * Connectivity driver framework (CDF) thread APIs
- *
+ * DOC: qdf_threads
+ * QCA driver framework (QDF) thread APIs
  */
 
 /* Include Files */
-#include <cdf_threads.h>
-#include <cdf_trace.h>
+#include <qdf_threads.h>
+#include <qdf_types.h>
+#include <qdf_trace.h>
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 
-/* Preprocessor definitions and constants */
-
-/* Type declarations */
-
 /* Function declarations and documenation */
 
 /**
- *  cdf_sleep() - sleep
- *  @msInterval : Number of milliseconds to suspend the current thread.
+ *  qdf_sleep() - sleep
+ *  @ms_interval : Number of milliseconds to suspend the current thread.
  *  A value of 0 may or may not cause the current thread to yield.
  *
  *  This function suspends the execution of the current thread
  *  until the specified time out interval elapses.
  *
- *  Return: nothing
+ *  Return: none
  */
-void cdf_sleep(uint32_t msInterval)
+void qdf_sleep(uint32_t ms_interval)
 {
 	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "%s cannot be called from interrupt context!!!",
 			  __func__);
 		return;
 	}
-	msleep_interruptible(msInterval);
+	msleep_interruptible(ms_interval);
 }
+EXPORT_SYMBOL(qdf_sleep);
 
 /**
- *  cdf_sleep_us() - sleep
- *  @usInterval : Number of microseconds to suspend the current thread.
+ *  qdf_sleep_us() - sleep
+ *  @us_interval : Number of microseconds to suspend the current thread.
  *  A value of 0 may or may not cause the current thread to yield.
  *
  *  This function suspends the execution of the current thread
  *  until the specified time out interval elapses.
  *
- *  Return : nothing
+ *  Return : none
  */
-void cdf_sleep_us(uint32_t usInterval)
+void qdf_sleep_us(uint32_t us_interval)
 {
-	unsigned long timeout = usecs_to_jiffies(usInterval) + 1;
+	unsigned long timeout = usecs_to_jiffies(us_interval) + 1;
 	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
 			  "%s cannot be called from interrupt context!!!",
 			  __func__);
 		return;
@@ -90,18 +86,20 @@ void cdf_sleep_us(uint32_t usInterval)
 	while (timeout && !signal_pending(current))
 		timeout = schedule_timeout_interruptible(timeout);
 }
+EXPORT_SYMBOL(qdf_sleep_us);
 
 /**
- *  cdf_busy_wait() - busy wait
- *  @usInterval : Number of microseconds to busy wait.
+ *  qdf_busy_wait() - busy wait
+ *  @us_interval : Number of microseconds to busy wait.
  *
  *  This function places the current thread in busy wait until the specified
  *  time out interval elapses. If the interval is greater than 50us on WM, the
  *  behaviour is undefined.
  *
- *  Return : nothing
+ *  Return : none
  */
-void cdf_busy_wait(uint32_t usInterval)
+void qdf_busy_wait(uint32_t us_interval)
 {
-	udelay(usInterval);
+	udelay(us_interval);
 }
+EXPORT_SYMBOL(qdf_busy_wait);

+ 1054 - 0
qdf/linux/src/qdf_trace.c

@@ -0,0 +1,1054 @@
+/*
+ * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ *  DOC:  qdf_trace
+ *  QCA driver framework (QDF) trace APIs
+ *  Trace, logging, and debugging definitions and APIs
+ */
+
+/* Include Files */
+#include <qdf_trace.h>
+#include <ani_global.h>
+#include <wlan_logging_sock_svc.h>
+#include "qdf_time.h"
+/* Preprocessor definitions and constants */
+
+#define QDF_TRACE_BUFFER_SIZE (512)
+
+enum qdf_timestamp_unit qdf_log_timestamp_type = QDF_LOG_TIMESTAMP_UNIT;
+
+/* macro to map qdf trace levels into the bitmask */
+#define QDF_TRACE_LEVEL_TO_MODULE_BITMASK(_level) ((1 << (_level)))
+
+/**
+ * typedef struct module_trace_info - Trace level for a module, as a bitmask.
+ * The bits in this mask are ordered by QDF_TRACE_LEVEL.  For example,
+ * each bit represents one of the bits in QDF_TRACE_LEVEL that may be turned
+ * on to have traces at that level logged, i.e. if QDF_TRACE_LEVEL_ERROR is
+ * == 2, then if bit 2 (low order) is turned ON, then ERROR traces will be
+ * printed to the trace log. Note that all bits turned OFF means no traces
+ * @module_trace_level: trace level
+ * @module_name_str: 3 character string name for the module
+ */
+typedef struct {
+	uint16_t module_trace_level;
+	unsigned char module_name_str[4];
+} module_trace_info;
+
+#define QDF_DEFAULT_TRACE_LEVEL	\
+	((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR))
+
+/* Array of static data that contains all of the per module trace
+ * information.  This includes the trace level for the module and
+ * the 3 character 'name' of the module for marking the trace logs
+ */
+module_trace_info g_qdf_trace_info[QDF_MODULE_ID_MAX] = {
+	[QDF_MODULE_ID_TLSHIM] = {QDF_DEFAULT_TRACE_LEVEL, "DP"},
+	[QDF_MODULE_ID_WMI] = {QDF_DEFAULT_TRACE_LEVEL, "WMI"},
+	[QDF_MODULE_ID_HDD] = {QDF_DEFAULT_TRACE_LEVEL, "HDD"},
+	[QDF_MODULE_ID_SME] = {QDF_DEFAULT_TRACE_LEVEL, "SME"},
+	[QDF_MODULE_ID_PE] = {QDF_DEFAULT_TRACE_LEVEL, "PE "},
+	[QDF_MODULE_ID_WMA] = {QDF_DEFAULT_TRACE_LEVEL, "WMA"},
+	[QDF_MODULE_ID_SYS] = {QDF_DEFAULT_TRACE_LEVEL, "SYS"},
+	[QDF_MODULE_ID_QDF] = {QDF_DEFAULT_TRACE_LEVEL, "QDF"},
+	[QDF_MODULE_ID_SAP] = {QDF_DEFAULT_TRACE_LEVEL, "SAP"},
+	[QDF_MODULE_ID_HDD_SOFTAP] = {QDF_DEFAULT_TRACE_LEVEL, "HSP"},
+	[QDF_MODULE_ID_HDD_DATA] = {QDF_DEFAULT_TRACE_LEVEL, "HDP"},
+	[QDF_MODULE_ID_HDD_SAP_DATA] = {QDF_DEFAULT_TRACE_LEVEL, "SDP"},
+	[QDF_MODULE_ID_BMI] = {QDF_DEFAULT_TRACE_LEVEL, "BMI"},
+	[QDF_MODULE_ID_HIF] = {QDF_DEFAULT_TRACE_LEVEL, "HIF"},
+	[QDF_MODULE_ID_TXRX] = {QDF_DEFAULT_TRACE_LEVEL, "TRX"},
+	[QDF_MODULE_ID_HTT] = {QDF_DEFAULT_TRACE_LEVEL, "HTT"},
+};
+
+/* Static and Global variables */
+static spinlock_t ltrace_lock;
+
+static qdf_trace_record_t g_qdf_trace_tbl[MAX_QDF_TRACE_RECORDS];
+/* global qdf trace data */
+static t_qdf_trace_data g_qdf_trace_data;
+/*
+ * all the call back functions for dumping MTRACE messages from ring buffer
+ * are stored in qdf_trace_cb_table,these callbacks are initialized during init
+ * only so, we will make a copy of these call back functions and maintain in to
+ * qdf_trace_restore_cb_table. Incase if we make modifications to
+ * qdf_trace_cb_table, we can certainly retrieve all the call back functions
+ * back from Restore Table
+ */
+static tp_qdf_trace_cb qdf_trace_cb_table[QDF_MODULE_ID_MAX];
+static tp_qdf_trace_cb qdf_trace_restore_cb_table[QDF_MODULE_ID_MAX];
+
+/* Static and Global variables */
+static spinlock_t l_dp_trace_lock;
+
+static struct qdf_dp_trace_record_s
+			g_qdf_dp_trace_tbl[MAX_QDF_DP_TRACE_RECORDS];
+
+/*
+ * all the options to configure/control DP trace are
+ * defined in this structure
+ */
+static struct s_qdf_dp_trace_data g_qdf_dp_trace_data;
+/*
+ * all the call back functions for dumping DPTRACE messages from ring buffer
+ * are stored in qdf_dp_trace_cb_table, callbacks are initialized during init
+ */
+static tp_qdf_dp_trace_cb qdf_dp_trace_cb_table[QDF_DP_TRACE_MAX];
+
+/**
+ * qdf_trace_set_level() - Set the trace level for a particular module
+ * @module: Module id
+ * @level : trace level
+ *
+ * Trace level is a member of the QDF_TRACE_LEVEL enumeration indicating
+ * the severity of the condition causing the trace message to be issued.
+ * More severe conditions are more likely to be logged.
+ *
+ * This is an external API that allows trace levels to be set for each module.
+ *
+ * Return:  None
+ */
+void qdf_trace_set_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level)
+{
+	/* make sure the caller is passing in a valid LEVEL */
+	if (level >= QDF_TRACE_LEVEL_MAX) {
+		pr_err("%s: Invalid trace level %d passed in!\n", __func__,
+		       level);
+		return;
+	}
+
+	/* Treat 'none' differently.  NONE means we have to run off all
+	 * the bits in the bit mask so none of the traces appear. Anything
+	 * other than 'none' means we need to turn ON a bit in the bitmask
+	 */
+	if (QDF_TRACE_LEVEL_NONE == level)
+		g_qdf_trace_info[module].module_trace_level =
+			QDF_TRACE_LEVEL_NONE;
+	else
+		/* set the desired bit in the bit mask for the module trace
+		 * level */
+		g_qdf_trace_info[module].module_trace_level |=
+			QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
+}
+EXPORT_SYMBOL(qdf_trace_set_level);
+
+/**
+ * qdf_trace_set_module_trace_level() - Set module trace level
+ * @module: Module id
+ * @level: Trace level for a module, as a bitmask as per 'module_trace_info'
+ *
+ * Sets the module trace level where the trace level is given as a bit mask
+ *
+ * Return: None
+ */
+void qdf_trace_set_module_trace_level(QDF_MODULE_ID module, uint32_t level)
+{
+	if (module < 0 || module >= QDF_MODULE_ID_MAX) {
+		pr_err("%s: Invalid module id %d passed\n", __func__, module);
+		return;
+	}
+	g_qdf_trace_info[module].module_trace_level = level;
+}
+EXPORT_SYMBOL(qdf_trace_set_module_trace_level);
+
+/**
+ * qdf_trace_set_value() - Set module trace value
+ * @module: Module id
+ * @level: Trace level for a module, as a bitmask as per 'module_trace_info'
+ * @on: set/clear the desired bit in the bit mask
+ *
+ * Return: None
+ */
+void qdf_trace_set_value(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+			 uint8_t on)
+{
+	/* make sure the caller is passing in a valid LEVEL */
+	if (level < 0 || level >= QDF_TRACE_LEVEL_MAX) {
+		pr_err("%s: Invalid trace level %d passed in!\n", __func__,
+		       level);
+		return;
+	}
+
+	/* make sure the caller is passing in a valid module */
+	if (module < 0 || module >= QDF_MODULE_ID_MAX) {
+		pr_err("%s: Invalid module id %d passed in!\n", __func__,
+		       module);
+		return;
+	}
+
+	/* Treat 'none' differently.  NONE means we have to turn off all
+	   the bits in the bit mask so none of the traces appear */
+	if (QDF_TRACE_LEVEL_NONE == level) {
+		g_qdf_trace_info[module].module_trace_level =
+			QDF_TRACE_LEVEL_NONE;
+	}
+	/* Treat 'All' differently.  All means we have to turn on all
+	   the bits in the bit mask so all of the traces appear */
+	else if (QDF_TRACE_LEVEL_ALL == level) {
+		g_qdf_trace_info[module].module_trace_level = 0xFFFF;
+	} else {
+		if (on)
+			/* set the desired bit in the bit mask for the module
+			   trace level */
+			g_qdf_trace_info[module].module_trace_level |=
+				QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
+		else
+			/* clear the desired bit in the bit mask for the module
+			   trace level */
+			g_qdf_trace_info[module].module_trace_level &=
+				~(QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level));
+	}
+}
+EXPORT_SYMBOL(qdf_trace_set_value);
+
+/**
+ * qdf_trace_get_level() - get the trace level
+ * @module: module Id
+ * @level: trace level
+ *
+ * This is an external API that returns a bool value to signify if a
+ * particular trace level is set for the specified module.
+ * A member of the QDF_TRACE_LEVEL enumeration indicating the severity
+ * of the condition causing the trace message to be issued.
+ *
+ * Note that individual trace levels are the only valid values
+ * for this API.  QDF_TRACE_LEVEL_NONE and QDF_TRACE_LEVEL_ALL
+ * are not valid input and will return false
+ *
+ * Return:
+ * false - the specified trace level for the specified module is OFF
+ * true - the specified trace level for the specified module is ON
+ */
+bool qdf_trace_get_level(QDF_MODULE_ID module, QDF_TRACE_LEVEL level)
+{
+	bool trace_on = false;
+
+	if ((QDF_TRACE_LEVEL_NONE == level) ||
+	    (QDF_TRACE_LEVEL_ALL == level) || (level >= QDF_TRACE_LEVEL_MAX)) {
+		trace_on = false;
+	} else {
+		trace_on = (level & g_qdf_trace_info[module].module_trace_level)
+			  ? true : false;
+	}
+
+	return trace_on;
+}
+EXPORT_SYMBOL(qdf_trace_get_level);
+
+/**
+ * qdf_snprintf() - wrapper function to snprintf
+ * @str_buffer: string Buffer
+ * @size: defines the size of the data record
+ * @str_format: Format string in which the message to be logged. This format
+ * string contains printf-like replacement parameters, which follow
+ * this parameter in the variable argument list.
+ *
+ * Return: None
+ */
+void qdf_snprintf(char *str_buffer, unsigned int size, char *str_format, ...)
+{
+	va_list val;
+
+	va_start(val, str_format);
+	snprintf(str_buffer, size, str_format, val);
+	va_end(val);
+}
+EXPORT_SYMBOL(qdf_snprintf);
+
+#ifdef QDF_ENABLE_TRACING
+
+/**
+ * qdf_trace_msg() - externally called trace function
+ * @module: Module identifier a member of the QDF_MODULE_ID
+ * enumeration that identifies the module issuing the trace message.
+ * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration
+ * indicating the severity of the condition causing the trace message
+ * to be issued. More severe conditions are more likely to be logged.
+ * @str_format: Format string in which the message to be logged. This format
+ * string contains printf-like replacement parameters, which follow
+ * this parameter in the variable argument list.
+ *
+ * Checks the level of severity and accordingly prints the trace messages
+ *
+ * Return: None
+ */
+void qdf_trace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+		   char *str_format, ...)
+{
+	char str_buffer[QDF_TRACE_BUFFER_SIZE];
+	int n;
+
+	/* Print the trace message when the desired level bit is set in
+	   the module tracel level mask */
+	if (g_qdf_trace_info[module].module_trace_level &
+	    QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)) {
+		/* the trace level strings in an array.  these are ordered in
+		 * the same order as the trace levels are defined in the enum
+		 * (see QDF_TRACE_LEVEL) so we can index into this array with
+		 * the level and get the right string. The qdf trace levels
+		 * are... none, Fatal, Error, Warning, Info, info_high, info_med,
+		 * info_low, Debug
+		 */
+		static const char *TRACE_LEVEL_STR[] = { "  ", "F ", "E ", "W ",
+						"I ", "IH", "IM", "IL", "D" };
+		va_list val;
+		va_start(val, str_format);
+
+		/* print the prefix string into the string buffer... */
+		n = snprintf(str_buffer, QDF_TRACE_BUFFER_SIZE,
+			     "wlan: [%d:%2s:%3s] ",
+			     in_interrupt() ? 0 : current->pid,
+			     (char *)TRACE_LEVEL_STR[level],
+			     (char *)g_qdf_trace_info[module].module_name_str);
+
+		/* print the formatted log message after the prefix string */
+		if ((n >= 0) && (n < QDF_TRACE_BUFFER_SIZE)) {
+			vsnprintf(str_buffer + n, QDF_TRACE_BUFFER_SIZE - n,
+				  str_format, val);
+#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE)
+			wlan_log_to_user(level, (char *)str_buffer,
+					 strlen(str_buffer));
+#else
+			pr_err("%s\n", str_buffer);
+#endif
+		}
+		va_end(val);
+	}
+}
+EXPORT_SYMBOL(qdf_trace_msg);
+
+/**
+ * qdf_trace_display() - Display trace
+ *
+ * Return:  None
+ */
+void qdf_trace_display(void)
+{
+	QDF_MODULE_ID module_id;
+
+	pr_err
+		("     1)FATAL  2)ERROR  3)WARN  4)INFO  5)INFO_H  6)INFO_M  7)INFO_L 8)DEBUG\n");
+	for (module_id = 0; module_id < QDF_MODULE_ID_MAX; ++module_id) {
+		pr_err
+			("%2d)%s    %s        %s       %s       %s        %s         %s         %s        %s\n",
+			(int)module_id, g_qdf_trace_info[module_id].module_name_str,
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_FATAL)) ? "X" :
+			" ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_ERROR)) ? "X" :
+			" ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_WARN)) ? "X" :
+			" ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_INFO)) ? "X" :
+			" ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_INFO_HIGH)) ? "X"
+			: " ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_INFO_MED)) ? "X"
+			: " ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_INFO_LOW)) ? "X"
+			: " ",
+			(g_qdf_trace_info[module_id].
+			 module_trace_level & (1 << QDF_TRACE_LEVEL_DEBUG)) ? "X" :
+			" ");
+	}
+}
+EXPORT_SYMBOL(qdf_trace_display);
+
+#define ROW_SIZE 16
+/* Buffer size = data bytes(2 hex chars plus space) + NULL */
+#define BUFFER_SIZE ((ROW_SIZE * 3) + 1)
+
+/**
+ * qdf_trace_hex_dump() - externally called hex dump function
+ * @module: Module identifier a member of the QDF_MODULE_ID enumeration that
+ * identifies the module issuing the trace message.
+ * @level: Trace level a member of the QDF_TRACE_LEVEL enumeration indicating
+ * the severity of the condition causing the trace message to be
+ * issued. More severe conditions are more likely to be logged.
+ * @data: The base address of the buffer to be logged.
+ * @buf_len: The size of the buffer to be logged.
+ *
+ * Checks the level of severity and accordingly prints the trace messages
+ *
+ * Return:  None
+ */
+void qdf_trace_hex_dump(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
+			void *data, int buf_len)
+{
+	const u8 *ptr = data;
+	int i, linelen, remaining = buf_len;
+	unsigned char linebuf[BUFFER_SIZE];
+
+	if (!(g_qdf_trace_info[module].module_trace_level &
+		QDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)))
+		return;
+
+	for (i = 0; i < buf_len; i += ROW_SIZE) {
+		linelen = min(remaining, ROW_SIZE);
+		remaining -= ROW_SIZE;
+
+		hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1,
+				linebuf, sizeof(linebuf), false);
+
+		qdf_trace_msg(module, level, "%.8x: %s", i, linebuf);
+	}
+}
+EXPORT_SYMBOL(qdf_trace_hex_dump);
+
+#endif
+
+/**
+ * qdf_trace_enable() - Enable MTRACE for specific modules
+ * @bitmask_of_module_id: Bitmask according to enum of the modules.
+ *  32[dec] = 0010 0000 [bin] <enum of HDD is 5>
+ *  64[dec] = 0100 0000 [bin] <enum of SME is 6>
+ *  128[dec] = 1000 0000 [bin] <enum of PE is 7>
+ * @enable: can be true or false true implies enabling MTRACE false implies
+ *		disabling MTRACE.
+ *
+ * Enable MTRACE for specific modules whose bits are set in bitmask and enable
+ * is true. if enable is false it disables MTRACE for that module. set the
+ * bitmask according to enum value of the modules.
+ * This functions will be called when you issue ioctl as mentioned following
+ * [iwpriv wlan0 setdumplog <value> <enable>].
+ * <value> - Decimal number, i.e. 64 decimal value shows only SME module,
+ * 128 decimal value shows only PE module, 192 decimal value shows PE and SME.
+ *
+ * Return: None
+ */
+void qdf_trace_enable(uint32_t bitmask_of_module_id, uint8_t enable)
+{
+	int i;
+	if (bitmask_of_module_id) {
+		for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+			if (((bitmask_of_module_id >> i) & 1)) {
+				if (enable) {
+					if (NULL !=
+					    qdf_trace_restore_cb_table[i]) {
+						qdf_trace_cb_table[i] =
+						qdf_trace_restore_cb_table[i];
+					}
+				} else {
+					qdf_trace_restore_cb_table[i] =
+						qdf_trace_cb_table[i];
+					qdf_trace_cb_table[i] = NULL;
+				}
+			}
+		}
+	} else {
+		if (enable) {
+			for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+				if (NULL != qdf_trace_restore_cb_table[i]) {
+					qdf_trace_cb_table[i] =
+						qdf_trace_restore_cb_table[i];
+				}
+			}
+		} else {
+			for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+				qdf_trace_restore_cb_table[i] =
+					qdf_trace_cb_table[i];
+				qdf_trace_cb_table[i] = NULL;
+			}
+		}
+	}
+}
+EXPORT_SYMBOL(qdf_trace_enable);
+
+/**
+ * qdf_trace_init() - initializes qdf trace structures and variables
+ *
+ * Called immediately after cds_preopen, so that we can start recording HDD
+ * events ASAP.
+ *
+ * Return: None
+ */
+void qdf_trace_init(void)
+{
+	uint8_t i;
+	g_qdf_trace_data.head = INVALID_QDF_TRACE_ADDR;
+	g_qdf_trace_data.tail = INVALID_QDF_TRACE_ADDR;
+	g_qdf_trace_data.num = 0;
+	g_qdf_trace_data.enable = true;
+	g_qdf_trace_data.dump_count = DEFAULT_QDF_TRACE_DUMP_COUNT;
+	g_qdf_trace_data.num_since_last_dump = 0;
+
+	for (i = 0; i < QDF_MODULE_ID_MAX; i++) {
+		qdf_trace_cb_table[i] = NULL;
+		qdf_trace_restore_cb_table[i] = NULL;
+	}
+}
+EXPORT_SYMBOL(qdf_trace_init);
+
+/**
+ * qdf_trace() - puts the messages in to ring-buffer
+ * @module: Enum of module, basically module id.
+ * @param: Code to be recorded
+ * @session: Session ID of the log
+ * @data: Actual message contents
+ *
+ * This function will be called from each module who wants record the messages
+ * in circular queue. Before calling this functions make sure you have
+ * registered your module with qdf through qdf_trace_register function.
+ *
+ * Return: None
+ */
+void qdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data)
+{
+	tp_qdf_trace_record rec = NULL;
+	unsigned long flags;
+
+	if (!g_qdf_trace_data.enable)
+		return;
+
+	/* if module is not registered, don't record for that module */
+	if (NULL == qdf_trace_cb_table[module])
+		return;
+
+	/* Aquire the lock so that only one thread at a time can fill the ring
+	 * buffer
+	 */
+	spin_lock_irqsave(&ltrace_lock, flags);
+
+	g_qdf_trace_data.num++;
+
+	if (g_qdf_trace_data.num > MAX_QDF_TRACE_RECORDS)
+		g_qdf_trace_data.num = MAX_QDF_TRACE_RECORDS;
+
+	if (INVALID_QDF_TRACE_ADDR == g_qdf_trace_data.head) {
+		/* first record */
+		g_qdf_trace_data.head = 0;
+		g_qdf_trace_data.tail = 0;
+	} else {
+		/* queue is not empty */
+		uint32_t tail = g_qdf_trace_data.tail + 1;
+
+		if (MAX_QDF_TRACE_RECORDS == tail)
+			tail = 0;
+
+		if (g_qdf_trace_data.head == tail) {
+			/* full */
+			if (MAX_QDF_TRACE_RECORDS == ++g_qdf_trace_data.head)
+				g_qdf_trace_data.head = 0;
+		}
+		g_qdf_trace_data.tail = tail;
+	}
+
+	rec = &g_qdf_trace_tbl[g_qdf_trace_data.tail];
+	rec->code = code;
+	rec->session = session;
+	rec->data = data;
+	rec->time = qdf_get_log_timestamp();
+	rec->module = module;
+	rec->pid = (in_interrupt() ? 0 : current->pid);
+	g_qdf_trace_data.num_since_last_dump++;
+	spin_unlock_irqrestore(&ltrace_lock, flags);
+}
+EXPORT_SYMBOL(qdf_trace);
+
+/**
+ * qdf_trace_spin_lock_init() - initializes the lock variable before use
+ *
+ * This function will be called from cds_alloc_global_context, we will have lock
+ * available to use ASAP
+ *
+ * Return: None
+ */
+QDF_STATUS qdf_trace_spin_lock_init(void)
+{
+	spin_lock_init(&ltrace_lock);
+
+	return QDF_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(qdf_trace_spin_lock_init);
+
+/**
+ * qdf_trace_register() - registers the call back functions
+ * @module_iD: enum value of module
+ * @qdf_trace_callback: call back functions to display the messages in
+ * particular format.
+ *
+ * Registers the call back functions to display the messages in particular
+ * format mentioned in these call back functions. This functions should be
+ * called by interested module in their init part as we will be ready to
+ * register as soon as modules are up.
+ *
+ * Return: None
+ */
+void qdf_trace_register(QDF_MODULE_ID module_iD,
+			tp_qdf_trace_cb qdf_trace_callback)
+{
+	qdf_trace_cb_table[module_iD] = qdf_trace_callback;
+}
+EXPORT_SYMBOL(qdf_trace_register);
+
+/**
+ * qdf_trace_dump_all() - Dump data from ring buffer via call back functions
+ * registered with QDF
+ * @p_mac: Context of particular module
+ * @code: Reason code
+ * @session: Session id of log
+ * @count: Number of lines to dump starting from tail to head
+ *
+ * This function will be called up on issueing ioctl call as mentioned following
+ * [iwpriv wlan0 dumplog 0 0 <n> <bitmask_of_module>]
+ *
+ * <n> - number lines to dump starting from tail to head.
+ *
+ * <bitmask_of_module> - if anybody wants to know how many messages were
+ * recorded for particular module/s mentioned by setbit in bitmask from last
+ * <n> messages. It is optional, if you don't provide then it will dump
+ * everything from buffer.
+ *
+ * Return: None
+ */
+void qdf_trace_dump_all(void *p_mac, uint8_t code, uint8_t session,
+			uint32_t count, uint32_t bitmask_of_module)
+{
+	qdf_trace_record_t p_record;
+	int32_t i, tail;
+
+	if (!g_qdf_trace_data.enable) {
+		QDF_TRACE(QDF_MODULE_ID_SYS,
+			  QDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
+		return;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_INFO,
+		  "Total Records: %d, Head: %d, Tail: %d",
+		  g_qdf_trace_data.num, g_qdf_trace_data.head,
+		  g_qdf_trace_data.tail);
+
+	/* aquire the lock so that only one thread at a time can read
+	 * the ring buffer
+	 */
+	spin_lock(&ltrace_lock);
+
+	if (g_qdf_trace_data.head != INVALID_QDF_TRACE_ADDR) {
+		i = g_qdf_trace_data.head;
+		tail = g_qdf_trace_data.tail;
+
+		if (count) {
+			if (count > g_qdf_trace_data.num)
+				count = g_qdf_trace_data.num;
+			if (tail >= (count - 1))
+				i = tail - count + 1;
+			else if (count != MAX_QDF_TRACE_RECORDS)
+				i = MAX_QDF_TRACE_RECORDS - ((count - 1) -
+							     tail);
+		}
+
+		p_record = g_qdf_trace_tbl[i];
+		/* right now we are not using num_since_last_dump member but
+		 * in future we might re-visit and use this member to track
+		 * how many latest messages got added while we were dumping
+		 * from ring buffer
+		 */
+		g_qdf_trace_data.num_since_last_dump = 0;
+		spin_unlock(&ltrace_lock);
+		for (;; ) {
+			if ((code == 0 || (code == p_record.code)) &&
+			    (qdf_trace_cb_table[p_record.module] != NULL)) {
+				if (0 == bitmask_of_module) {
+					qdf_trace_cb_table[p_record.
+							   module] (p_mac,
+								    &p_record,
+								    (uint16_t)
+								    i);
+				} else {
+					if (bitmask_of_module &
+					    (1 << p_record.module)) {
+						qdf_trace_cb_table[p_record.
+								   module]
+							(p_mac, &p_record,
+							(uint16_t) i);
+					}
+				}
+			}
+
+			if (i == tail)
+				break;
+			i += 1;
+
+			spin_lock(&ltrace_lock);
+			if (MAX_QDF_TRACE_RECORDS == i) {
+				i = 0;
+				p_record = g_qdf_trace_tbl[0];
+			} else {
+				p_record = g_qdf_trace_tbl[i];
+			}
+			spin_unlock(&ltrace_lock);
+		}
+	} else {
+		spin_unlock(&ltrace_lock);
+	}
+}
+EXPORT_SYMBOL(qdf_trace_dump_all);
+
+/**
+ * qdf_dp_trace_init() - enables the DP trace
+ * Called during driver load and it enables DP trace
+ *
+ * Return: None
+ */
+void qdf_dp_trace_init(void)
+{
+	uint8_t i;
+
+	qdf_dp_trace_spin_lock_init();
+	g_qdf_dp_trace_data.head = INVALID_QDF_DP_TRACE_ADDR;
+	g_qdf_dp_trace_data.tail = INVALID_QDF_DP_TRACE_ADDR;
+	g_qdf_dp_trace_data.num = 0;
+	g_qdf_dp_trace_data.proto_bitmap = 0;
+	g_qdf_dp_trace_data.no_of_record = 0;
+	g_qdf_dp_trace_data.verbosity    = QDF_DP_TRACE_VERBOSITY_DEFAULT;
+	g_qdf_dp_trace_data.enable = true;
+
+	for (i = 0; i < QDF_DP_TRACE_MAX; i++)
+		qdf_dp_trace_cb_table[i] = qdf_dp_display_record;
+}
+EXPORT_SYMBOL(qdf_dp_trace_init);
+
+/**
+ * qdf_dp_trace_set_value() - Configure the value to control DP trace
+ * @proto_bitmap: defines the protocol to be tracked
+ * @no_of_records: defines the nth packet which is traced
+ * @verbosity: defines the verbosity level
+ *
+ * Return: None
+ */
+void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_record,
+			    uint8_t verbosity)
+{
+	g_qdf_dp_trace_data.proto_bitmap = proto_bitmap;
+	g_qdf_dp_trace_data.no_of_record = no_of_record;
+	g_qdf_dp_trace_data.verbosity    = verbosity;
+	return;
+}
+EXPORT_SYMBOL(qdf_dp_trace_set_value);
+
+/**
+ * qdf_dp_trace_enable_track() - enable the tracing for netbuf
+ * @code: defines the event
+ *
+ * Return: true or false depends on whether tracing enabled
+ */
+static bool qdf_dp_trace_enable_track(enum QDF_DP_TRACE_ID code)
+{
+	if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_HIGH)
+		return true;
+	if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_MEDIUM
+		&& (code <= QDF_DP_TRACE_HIF_PACKET_PTR_RECORD))
+		return true;
+	if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_LOW
+		&& (code <= QDF_DP_TRACE_CE_PACKET_RECORD))
+		return true;
+	if (g_qdf_dp_trace_data.verbosity == QDF_DP_TRACE_VERBOSITY_DEFAULT
+		&& (code == QDF_DP_TRACE_DROP_PACKET_RECORD))
+		return true;
+	return false;
+}
+EXPORT_SYMBOL(qdf_dp_trace_enable_track);
+
+/**
+ * qdf_dp_trace_set_track() - Marks whether the packet needs to be traced
+ * @nbuf: defines the netbuf
+ *
+ * Return: None
+ */
+void qdf_dp_trace_set_track(qdf_nbuf_t nbuf)
+{
+	spin_lock_bh(&l_dp_trace_lock);
+	g_qdf_dp_trace_data.count++;
+	if (g_qdf_dp_trace_data.proto_bitmap != 0) {
+		if (cds_pkt_get_proto_type(nbuf,
+			g_qdf_dp_trace_data.proto_bitmap, 0)) {
+			QDF_NBUF_CB_TX_DP_TRACE(nbuf) = 1;
+		}
+	}
+	if ((g_qdf_dp_trace_data.no_of_record != 0) &&
+		(g_qdf_dp_trace_data.count %
+			g_qdf_dp_trace_data.no_of_record == 0)) {
+		QDF_NBUF_CB_TX_DP_TRACE(nbuf) = 1;
+	}
+	spin_unlock_bh(&l_dp_trace_lock);
+	return;
+}
+EXPORT_SYMBOL(qdf_dp_trace_set_track);
+
+/**
+ * dump_hex_trace() - Display the data in buffer
+ * @buf:     buffer which contains data to be displayed
+ * @buf_len: defines the size of the data to be displayed
+ *
+ * Return: None
+ */
+static void dump_hex_trace(uint8_t *buf, uint8_t buf_len)
+{
+	uint8_t i = 0;
+	/* Dump the bytes in the last line */
+	qdf_print("DATA: ");
+	for (i = 0; i < buf_len; i++)
+		qdf_print("%02x ", buf[i]);
+	qdf_print("\n");
+}
+EXPORT_SYMBOL(dump_hex_trace);
+
+/**
+ * qdf_dp_display_trace() - Displays a record in DP trace
+ * @p_record: pointer to a record in DP trace
+ * @rec_index: record index
+ *
+ * Return: None
+ */
+void qdf_dp_display_record(struct qdf_dp_trace_record_s *p_record,
+			   uint16_t rec_index)
+{
+	qdf_print("INDEX: %04d TIME: %012llu CODE: %02d\n", rec_index,
+						p_record->time, p_record->code);
+	switch (p_record->code) {
+	case  QDF_DP_TRACE_HDD_TX_TIMEOUT:
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+						"HDD TX Timeout\n");
+		break;
+	case  QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+						"HDD soft_aP TX Timeout\n");
+		break;
+	case  QDF_DP_TRACE_VDEV_PAUSE:
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+						"VDEV Pause\n");
+		break;
+	case  QDF_DP_TRACE_VDEV_UNPAUSE:
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+						"VDEV un_pause\n");
+		break;
+	default:
+		dump_hex_trace(p_record->data, p_record->size);
+	}
+	return;
+}
+EXPORT_SYMBOL(qdf_dp_display_record);
+
+/**
+ * qdf_dp_trace() - Stores the data in buffer
+ * @nbuf: defines the netbuf
+ * @code: defines the event
+ * @data: defines the data to be stored
+ * @size: defines the size of the data record
+ *
+ * Return: None
+ */
+void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
+		  uint8_t *data, uint8_t size)
+{
+	struct qdf_dp_trace_record_s *rec = NULL;
+
+	/* Return when Dp trace is not enabled */
+	if (!g_qdf_dp_trace_data.enable)
+		return;
+
+	/* If nbuf is NULL, check for VDEV PAUSE, UNPAUSE, TIMEOUT */
+	if (!nbuf) {
+		switch (code) {
+		case QDF_DP_TRACE_HDD_TX_TIMEOUT:
+		case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+		case QDF_DP_TRACE_VDEV_PAUSE:
+		case QDF_DP_TRACE_VDEV_UNPAUSE:
+			if (qdf_dp_trace_enable_track(code))
+				goto  register_record;
+			else
+				return;
+
+		default:
+			return;
+		}
+	}
+
+	/* Return when the packet is not a data packet */
+	if (QDF_NBUF_GET_PACKET_TRACK(nbuf) != QDF_NBUF_TX_PKT_DATA_TRACK)
+		return;
+
+	/* Return when nbuf is not marked for dp tracing or
+	 * verbosity does not allow
+	 */
+	if (qdf_dp_trace_enable_track(code) == false ||
+			!QDF_NBUF_CB_TX_DP_TRACE(nbuf))
+		return;
+
+	/* Acquire the lock so that only one thread at a time can fill the ring
+	 * buffer
+	 */
+
+register_record:
+
+	spin_lock_bh(&l_dp_trace_lock);
+
+	g_qdf_dp_trace_data.num++;
+
+	if (g_qdf_dp_trace_data.num > MAX_QDF_DP_TRACE_RECORDS)
+		g_qdf_dp_trace_data.num = MAX_QDF_DP_TRACE_RECORDS;
+
+	if (INVALID_QDF_DP_TRACE_ADDR == g_qdf_dp_trace_data.head) {
+		/* first record */
+		g_qdf_dp_trace_data.head = 0;
+		g_qdf_dp_trace_data.tail = 0;
+	} else {
+		/* queue is not empty */
+		g_qdf_dp_trace_data.tail++;
+
+		if (MAX_QDF_DP_TRACE_RECORDS == g_qdf_dp_trace_data.tail)
+			g_qdf_dp_trace_data.tail = 0;
+
+		if (g_qdf_dp_trace_data.head == g_qdf_dp_trace_data.tail) {
+			/* full */
+			if (MAX_QDF_DP_TRACE_RECORDS ==
+				++g_qdf_dp_trace_data.head)
+				g_qdf_dp_trace_data.head = 0;
+		}
+	}
+
+	rec = &g_qdf_dp_trace_tbl[g_qdf_dp_trace_data.tail];
+	rec->code = code;
+	rec->size = 0;
+	if (data != NULL && size > 0) {
+		if (size > QDF_DP_TRACE_RECORD_SIZE)
+			size = QDF_DP_TRACE_RECORD_SIZE;
+
+		rec->size = size;
+		switch (code) {
+		case QDF_DP_TRACE_HDD_PACKET_PTR_RECORD:
+		case QDF_DP_TRACE_CE_PACKET_PTR_RECORD:
+		case QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD:
+		case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD:
+		case QDF_DP_TRACE_HTT_PACKET_PTR_RECORD:
+		case QDF_DP_TRACE_HTC_PACKET_PTR_RECORD:
+		case QDF_DP_TRACE_HIF_PACKET_PTR_RECORD:
+			qdf_mem_copy(rec->data, (uint8_t *)(&data), size);
+			break;
+
+		case QDF_DP_TRACE_DROP_PACKET_RECORD:
+		case QDF_DP_TRACE_HDD_PACKET_RECORD:
+		case QDF_DP_TRACE_CE_PACKET_RECORD:
+			qdf_mem_copy(rec->data, data, size);
+			break;
+		default:
+			break;
+		}
+	}
+	rec->time = qdf_get_log_timestamp();
+	rec->pid = (in_interrupt() ? 0 : current->pid);
+	spin_unlock_bh(&l_dp_trace_lock);
+}
+EXPORT_SYMBOL(qdf_dp_trace);
+
+/**
+ * qdf_dp_trace_spin_lock_init() - initializes the lock variable before use
+ * This function will be called from cds_alloc_global_context, we will have lock
+ * available to use ASAP
+ *
+ * Return: None
+ */
+void qdf_dp_trace_spin_lock_init(void)
+{
+	spin_lock_init(&l_dp_trace_lock);
+
+	return;
+}
+EXPORT_SYMBOL(qdf_dp_trace_spin_lock_init);
+
+/**
+ * qdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions
+ * registered with QDF
+ * @code: Reason code
+ * @count: Number of lines to dump starting from tail to head
+ *
+ * Return: None
+ */
+void qdf_dp_trace_dump_all(uint32_t count)
+{
+	struct qdf_dp_trace_record_s p_record;
+	int32_t i, tail;
+
+	if (!g_qdf_dp_trace_data.enable) {
+		QDF_TRACE(QDF_MODULE_ID_SYS,
+			  QDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
+		return;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_ERROR,
+		  "Total Records: %d, Head: %d, Tail: %d",
+		  g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head,
+		  g_qdf_dp_trace_data.tail);
+
+	/* aquire the lock so that only one thread at a time can read
+	 * the ring buffer
+	 */
+	spin_lock_bh(&l_dp_trace_lock);
+
+	if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) {
+		i = g_qdf_dp_trace_data.head;
+		tail = g_qdf_dp_trace_data.tail;
+
+		if (count) {
+			if (count > g_qdf_dp_trace_data.num)
+				count = g_qdf_dp_trace_data.num;
+			if (tail >= (count - 1))
+				i = tail - count + 1;
+			else if (count != MAX_QDF_DP_TRACE_RECORDS)
+				i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) -
+							     tail);
+		}
+
+		p_record = g_qdf_dp_trace_tbl[i];
+		spin_unlock_bh(&l_dp_trace_lock);
+		for (;; ) {
+
+			qdf_dp_trace_cb_table[p_record.
+					   code] (&p_record, (uint16_t)i);
+			if (i == tail)
+				break;
+			i += 1;
+
+			spin_lock_bh(&l_dp_trace_lock);
+			if (MAX_QDF_DP_TRACE_RECORDS == i)
+				i = 0;
+
+			p_record = g_qdf_dp_trace_tbl[i];
+			spin_unlock_bh(&l_dp_trace_lock);
+		}
+	} else {
+		spin_unlock_bh(&l_dp_trace_lock);
+	}
+}
+EXPORT_SYMBOL(qdf_dp_trace_dump_all);

+ 0 - 90
qdf/src/i_qdf_atomic.h

@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-#ifndef I_CDF_ATOMIC_H
-#define I_CDF_ATOMIC_H
-
-#include <cdf_status.h>         /* CDF_STATUS */
-
-#include <linux/atomic.h>
-
-typedef atomic_t __cdf_atomic_t;
-
-static inline CDF_STATUS __cdf_atomic_init(__cdf_atomic_t *v)
-{
-	atomic_set(v, 0);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-static inline int32_t __cdf_atomic_read(__cdf_atomic_t *v)
-{
-	return atomic_read(v);
-}
-
-static inline void __cdf_atomic_inc(__cdf_atomic_t *v)
-{
-	atomic_inc(v);
-}
-
-static inline void __cdf_atomic_dec(__cdf_atomic_t *v)
-{
-	atomic_dec(v);
-}
-
-static inline void __cdf_atomic_add(int i, __cdf_atomic_t *v)
-{
-	atomic_add(i, v);
-}
-
-/**
- * cdf_atomic_sub() - Subtract a value from an atomic variable
- * @i: the amount by which to decrease the atomic counter
- * @v: a pointer to an opaque atomic variable
- *
- * Return: none
- */
-static inline void __cdf_atomic_sub(int i, __cdf_atomic_t *v)
-{
-	atomic_sub(i, v);
-}
-
-static inline int32_t __cdf_atomic_dec_and_test(__cdf_atomic_t *v)
-{
-	return atomic_dec_and_test(v);
-}
-
-static inline void __cdf_atomic_set(__cdf_atomic_t *v, int i)
-{
-	atomic_set(v, i);
-}
-
-static inline int32_t __cdf_atomic_inc_return(__cdf_atomic_t *v)
-{
-	return atomic_inc_return(v);
-}
-
-#endif

+ 0 - 99
qdf/src/i_qdf_defer.h

@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#ifndef _I_CDF_DEFER_H
-#define _I_CDF_DEFER_H
-
-#include <linux/version.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#ifdef CONFIG_CNSS
-#include <net/cnss.h>
-#endif
-#include <cdf_types.h>
-#include <cdf_status.h>
-#include <cdf_trace.h>
-
-typedef struct tasklet_struct __cdf_bh_t;
-
-typedef void (*__cdf_bh_fn_t)(unsigned long arg);
-
-/* wrapper around the real task func */
-typedef struct {
-	struct work_struct work;
-	cdf_defer_fn_t fn;
-	void *arg;
-} __cdf_work_t;
-
-extern void __cdf_defer_func(struct work_struct *work);
-
-static inline CDF_STATUS
-__cdf_init_work(__cdf_work_t *work, cdf_defer_fn_t func, void *arg)
-{
-	/*Initilize func and argument in work struct */
-	work->fn = func;
-	work->arg = arg;
-#ifdef CONFIG_CNSS
-	cnss_init_work(&work->work, __cdf_defer_func);
-#else
-	INIT_WORK(&work->work, __cdf_defer_func);
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-static inline CDF_STATUS __cdf_schedule_work(__cdf_work_t *work)
-{
-	schedule_work(&work->work);
-	return CDF_STATUS_SUCCESS;
-}
-
-static inline CDF_STATUS __cdf_init_bh(cdf_handle_t hdl,
-				       struct tasklet_struct *bh,
-				       cdf_defer_fn_t func, void *arg)
-{
-	tasklet_init(bh, (__cdf_bh_fn_t) func, (unsigned long)arg);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-static inline CDF_STATUS
-__cdf_sched_bh(cdf_handle_t hdl, struct tasklet_struct *bh)
-{
-	tasklet_schedule(bh);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-static inline CDF_STATUS
-__cdf_disable_bh(cdf_handle_t hdl, struct tasklet_struct *bh)
-{
-	tasklet_kill(bh);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-#endif /*_I_CDF_DEFER_H*/

+ 0 - 255
qdf/src/i_qdf_lock.h

@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#if !defined(__I_CDF_LOCK_H)
-#define __I_CDF_LOCK_H
-
-/**
- * DOC: i_cdf_lock.h
- *
- * Linux-specific definitions for CDF Locks
- *
- */
-
-/* Include Files */
-#include <cdf_types.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <linux/semaphore.h>
-#include <linux/interrupt.h>
-#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-#include <linux/wakelock.h>
-#endif
-
-/* Preprocessor definitions and constants */
-
-/* define for flag */
-#define ADF_OS_LINUX_UNLOCK_BH  1
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
- * typedef struct - cdf_mutex_t
- * @m_lock: Mutex lock
- * @cookie: Lock cookie
- * @processID: Process ID to track lock
- * @state: Lock status
- * @refcount: Reference count for recursive lock
- */
-typedef struct cdf_lock_s {
-	struct mutex m_lock;
-	uint32_t cookie;
-	int processID;
-	uint32_t state;
-	uint8_t refcount;
-} cdf_mutex_t;
-
-/**
- * typedef struct - cdf_spinlock_t
- * @spinlock: Spin lock
- * @flags: Lock flag
- * @_flags: Internal lock flag
- */
-typedef struct __cdf_spinlock {
-	spinlock_t spinlock;
-	unsigned int flags;
-	unsigned long _flags;
-} cdf_spinlock_t;
-
-typedef cdf_spinlock_t __cdf_spinlock_t;
-typedef struct semaphore __cdf_semaphore_t;
-
-#if defined CONFIG_CNSS
-typedef struct wakeup_source cdf_wake_lock_t;
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-typedef struct wake_lock cdf_wake_lock_t;
-#else
-typedef int cdf_wake_lock_t;
-#endif
-
-/* Function declarations and documenation */
-
-/**
- * __cdf_semaphore_init() - initialize the semaphore
- * @m: Semaphore object
- *
- * Return: CDF_STATUS_SUCCESS
- */
-static inline CDF_STATUS __cdf_semaphore_init(struct semaphore *m)
-{
-	sema_init(m, 1);
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_semaphore_acquire() - acquire semaphore
- * @m: Semaphore object
- *
- * Return: 0
- */
-static inline int
-__cdf_semaphore_acquire(cdf_device_t osdev, struct semaphore *m)
-{
-	down(m);
-	return 0;
-}
-
-/**
- * __cdf_semaphore_release() - release semaphore
- * @m: Semaphore object
- *
- * Return: result of UP operation in integer
- */
-static inline void
-__cdf_semaphore_release(cdf_device_t osdev, struct semaphore *m)
-{
-	up(m);
-}
-
-/**
- * __cdf_spinlock_init() - initialize spin lock
- * @lock: Spin lock object
- *
- * Return: CDF_STATUS_SUCCESS
- */
-static inline CDF_STATUS __cdf_spinlock_init(__cdf_spinlock_t *lock)
-{
-	spin_lock_init(&lock->spinlock);
-	lock->flags = 0;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-#define __cdf_spinlock_destroy(lock)
-/**
- * __cdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_lock(__cdf_spinlock_t *lock)
-{
-	spin_lock(&lock->spinlock);
-}
-
-/**
- * __cdf_spin_unlock() - Unlock the spinlock and enables the Preemption
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_unlock(__cdf_spinlock_t *lock)
-{
-	spin_unlock(&lock->spinlock);
-}
-
-/**
- * __cdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
- *				(Preemptive) and disable IRQs
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_lock_irqsave(__cdf_spinlock_t *lock)
-{
-	spin_lock_irqsave(&lock->spinlock, lock->_flags);
-}
-/**
- * __cdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
- *					Preemption and enable IRQ
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_unlock_irqrestore(__cdf_spinlock_t *lock)
-{
-	spin_unlock_irqrestore(&lock->spinlock, lock->_flags);
-}
-
-/*
- * Synchronous versions - only for OS' that have interrupt disable
- */
-#define __cdf_spin_lock_irq(_pLock, _flags)    spin_lock_irqsave(_pLock, _flags)
-#define __cdf_spin_unlock_irq(_pLock, _flags)  spin_unlock_irqrestore(_pLock, _flags)
-
-/**
- * __cdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_lock_bh(__cdf_spinlock_t *lock)
-{
-	if (likely(irqs_disabled() || in_softirq())) {
-		spin_lock(&lock->spinlock);
-	} else {
-		spin_lock_bh(&lock->spinlock);
-		lock->flags |= ADF_OS_LINUX_UNLOCK_BH;
-	}
-
-}
-
-/**
- * __cdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_unlock_bh(__cdf_spinlock_t *lock)
-{
-	if (unlikely(lock->flags & ADF_OS_LINUX_UNLOCK_BH)) {
-		lock->flags &= ~ADF_OS_LINUX_UNLOCK_BH;
-		spin_unlock_bh(&lock->spinlock);
-	} else
-		spin_unlock(&lock->spinlock);
-}
-
-/**
- * __cdf_in_softirq() - in soft irq context
- *
- * Return: true if in softirs context else false
- */
-static inline bool __cdf_in_softirq(void)
-{
-	return in_softirq();
-}
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __I_CDF_LOCK_H */

+ 0 - 1064
qdf/src/i_qdf_nbuf.h

@@ -1,1064 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: i_cdf_nbuf.h
- *
- * Linux implementation of skbuf
- */
-#ifndef _I_CDF_NET_BUF_H
-#define _I_CDF_NET_BUF_H
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <cdf_types.h>
-#include <cdf_status.h>
-
-#define __CDF_NBUF_NULL   NULL
-
-
-/*
- * Use socket buffer as the underlying implentation as skbuf .
- * Linux use sk_buff to represent both packet and data,
- * so we use sk_buffer to represent both skbuf .
- */
-typedef struct sk_buff *__cdf_nbuf_t;
-
-typedef void (*__cdf_nbuf_callback_fn)(struct sk_buff *skb);
-#define OSDEP_EAPOL_TID 6       /* send it on VO queue */
-
-/* CVG_NBUF_MAX_OS_FRAGS -
- * max tx fragments provided by the OS
- */
-#define CVG_NBUF_MAX_OS_FRAGS 1
-
-/* CVG_NBUF_MAX_EXTRA_FRAGS -
- * max tx fragments added by the driver
- * The driver will always add one tx fragment (the tx descriptor) and may
- * add a second tx fragment (e.g. a TSO segment's modified IP header).
- */
-#define CVG_NBUF_MAX_EXTRA_FRAGS 2
-
-typedef void (*cdf_nbuf_trace_update_t)(char *);
-
-/**
- * struct cvg_nbuf_cb - network buffer control block
- * @data_attr: Value that is programmed in CE descriptor, contains:
- *		1) CE classification enablement bit
- *		2) Pkt type (802.3 or Ethernet Type II)
- *		3) Pkt Offset (Usually the length of HTT/HTC desc.)
- * @trace: info for DP tracing
- * @mapped_paddr_lo: DMA mapping info
- * @extra_frags: Extra tx fragments
- * @owner_id: Owner id
- * @cdf_nbuf_callback_fn: Callback function
- * @priv_data: IPA specific priv data
- * @proto_type: Protocol type
- * @vdev_id: vdev id
- * @tx_htt2_frm: HTT 2 frame
- * @tx_htt2_reserved: HTT 2 reserved bits
- */
-struct cvg_nbuf_cb {
-	uint32_t data_attr;
-	/*
-	 * Store info for data path tracing
-	 */
-	struct {
-		uint8_t packet_state;
-		uint8_t packet_track;
-		uint8_t dp_trace;
-	} trace;
-
-	/*
-	 * Store the DMA mapping info for the network buffer fragments
-	 * provided by the OS.
-	 */
-	uint32_t mapped_paddr_lo[CVG_NBUF_MAX_OS_FRAGS];
-#ifdef DEBUG_RX_RING_BUFFER
-	uint32_t map_index;
-#endif
-
-	/* store extra tx fragments provided by the driver */
-	struct {
-		/* vaddr -
-		 * CPU address (a.k.a. virtual address) of the tx fragments
-		 * added by the driver
-		 */
-		unsigned char *vaddr[CVG_NBUF_MAX_EXTRA_FRAGS];
-		/* paddr_lo -
-		 * bus address (a.k.a. physical address) of the tx fragments
-		 * added by the driver
-		 */
-		uint32_t paddr_lo[CVG_NBUF_MAX_EXTRA_FRAGS];
-		uint16_t len[CVG_NBUF_MAX_EXTRA_FRAGS];
-		uint8_t num;    /* how many extra frags has the driver added */
-		uint8_t
-		/*
-		 * Store a wordstream vs. bytestream flag for each extra
-		 * fragment, plus one more flag for the original fragment(s)
-		 * of the netbuf.
-		 */
-wordstream_flags:CVG_NBUF_MAX_EXTRA_FRAGS + 1;
-	} extra_frags;
-	uint32_t owner_id;
-	__cdf_nbuf_callback_fn cdf_nbuf_callback_fn;
-	unsigned long priv_data;
-#ifdef QCA_PKT_PROTO_TRACE
-	unsigned char proto_type;
-	unsigned char vdev_id;
-#endif /* QCA_PKT_PROTO_TRACE */
-#ifdef QCA_TX_HTT2_SUPPORT
-	unsigned char tx_htt2_frm:1;
-	unsigned char tx_htt2_reserved:7;
-#endif /* QCA_TX_HTT2_SUPPORT */
-};
-#ifdef DEBUG_RX_RING_BUFFER
-#define NBUF_MAP_ID(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->map_index)
-#endif
-#define NBUF_OWNER_ID(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->owner_id)
-#define NBUF_OWNER_PRIV_DATA(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->priv_data)
-#define NBUF_CALLBACK_FN(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->cdf_nbuf_callback_fn)
-#define NBUF_CALLBACK_FN_EXEC(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->cdf_nbuf_callback_fn)(skb)
-#define NBUF_MAPPED_PADDR_LO(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->mapped_paddr_lo[0])
-#define NBUF_NUM_EXTRA_FRAGS(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.num)
-#define NBUF_EXTRA_FRAG_VADDR(skb, frag_num) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.vaddr[(frag_num)])
-#define NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num)	\
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.paddr_lo[(frag_num)])
-#define NBUF_EXTRA_FRAG_LEN(skb, frag_num) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.len[(frag_num)])
-#define NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->extra_frags.wordstream_flags)
-
-#ifdef QCA_PKT_PROTO_TRACE
-#define NBUF_SET_PROTO_TYPE(skb, proto_type) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->proto_type = proto_type)
-#define NBUF_GET_PROTO_TYPE(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->proto_type)
-#else
-#define NBUF_SET_PROTO_TYPE(skb, proto_type);
-#define NBUF_GET_PROTO_TYPE(skb) 0;
-#endif /* QCA_PKT_PROTO_TRACE */
-
-#ifdef QCA_TX_HTT2_SUPPORT
-#define NBUF_SET_TX_HTT2_FRM(skb, candi) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->tx_htt2_frm = candi)
-#define NBUF_GET_TX_HTT2_FRM(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->tx_htt2_frm)
-#else
-#define NBUF_SET_TX_HTT2_FRM(skb, candi)
-#define NBUF_GET_TX_HTT2_FRM(skb) 0
-#endif /* QCA_TX_HTT2_SUPPORT */
-
-#define NBUF_DATA_ATTR_SET(skb, data_attr)	\
-	(((struct cvg_nbuf_cb *)((skb)->cb))->data_attr = data_attr)
-
-#define NBUF_DATA_ATTR_GET(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->data_attr)
-
-#if defined(FEATURE_LRO)
-/**
- * struct nbuf_rx_cb - network buffer control block
- * on the receive path of the skb
- * @lro_eligible: indicates whether the msdu is LRO eligible
- * @tcp_proto: indicates if this is a TCP packet
- * @ipv6_proto: indicates if this is an IPv6 packet
- * @ip_offset: offset to the IP header
- * @tcp_offset: offset to the TCP header
- * @tcp_udp_chksum: TCP payload checksum
- * @tcp_seq_num: TCP sequence number
- * @tcp_ack_num: TCP acknowledgement number
- * @flow_id_toeplitz: 32 bit 5-tuple flow id toeplitz hash
- */
-struct nbuf_rx_cb {
-	uint32_t lro_eligible:1,
-		tcp_proto:1,
-		tcp_pure_ack:1,
-		ipv6_proto:1,
-		ip_offset:7,
-		tcp_offset:7;
-	uint32_t tcp_udp_chksum:16,
-		tcp_win:16;
-	uint32_t tcp_seq_num;
-	uint32_t tcp_ack_num;
-	uint32_t flow_id_toeplitz;
-};
-
-#define NBUF_LRO_ELIGIBLE(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->lro_eligible)
-#define NBUF_TCP_PROTO(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_proto)
-#define NBUF_TCP_PURE_ACK(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_pure_ack)
-#define NBUF_IPV6_PROTO(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->ipv6_proto)
-#define NBUF_IP_OFFSET(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->ip_offset)
-#define NBUF_TCP_OFFSET(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_offset)
-#define NBUF_TCP_CHKSUM(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_udp_chksum)
-#define NBUF_TCP_SEQ_NUM(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_seq_num)
-#define NBUF_TCP_ACK_NUM(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_ack_num)
-#define NBUF_TCP_WIN(skb) \
-	(((struct nbuf_rx_cb *)((skb)->cb))->tcp_win)
-#define NBUF_FLOW_ID_TOEPLITZ(skb)	\
-	(((struct nbuf_rx_cb *)((skb)->cb))->flow_id_toeplitz)
-#endif /* FEATURE_LRO */
-
-#define NBUF_SET_PACKET_STATE(skb, pkt_state) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_state = \
-								pkt_state)
-#define NBUF_GET_PACKET_STATE(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_state)
-
-#define NBUF_SET_PACKET_TRACK(skb, pkt_track) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_track = \
-								pkt_track)
-#define NBUF_GET_PACKET_TRACK(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.packet_track)
-
-#define NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
-	cdf_nbuf_set_state(skb, PACKET_STATE)
-
-#define CDF_NBUF_SET_DP_TRACE(skb, enable) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.dp_trace \
-								= enable)
-#define CDF_NBUF_GET_DP_TRACE(skb) \
-	(((struct cvg_nbuf_cb *)((skb)->cb))->trace.dp_trace)
-
-#define __cdf_nbuf_get_num_frags(skb)		   \
-	/* assume the OS provides a single fragment */ \
-	(NBUF_NUM_EXTRA_FRAGS(skb) + 1)
-
-#if defined(FEATURE_TSO)
-#define __cdf_nbuf_dec_num_frags(skb)		   \
-	(NBUF_NUM_EXTRA_FRAGS(skb)--)
-#endif
-
-#define __cdf_nbuf_frag_push_head( \
-		skb, frag_len, frag_vaddr, frag_paddr_lo, frag_paddr_hi) \
-	do { \
-		int frag_num = NBUF_NUM_EXTRA_FRAGS(skb)++; \
-		NBUF_EXTRA_FRAG_VADDR(skb, frag_num) = frag_vaddr; \
-		NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num) = frag_paddr_lo; \
-		NBUF_EXTRA_FRAG_LEN(skb, frag_num) = frag_len; \
-	} while (0)
-
-#define __cdf_nbuf_get_frag_len(skb, frag_num)		 \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?	     \
-	 NBUF_EXTRA_FRAG_LEN(skb, frag_num) : (skb)->len)
-
-#define __cdf_nbuf_get_frag_vaddr(skb, frag_num)	      \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?		  \
-	 NBUF_EXTRA_FRAG_VADDR(skb, frag_num) : ((skb)->data))
-
-#define __cdf_nbuf_get_frag_paddr_lo(skb, frag_num)		 \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?		     \
-	 NBUF_EXTRA_FRAG_PADDR_LO(skb, frag_num) :		  \
-	/* assume that the OS only provides a single fragment */ \
-	 NBUF_MAPPED_PADDR_LO(skb))
-
-#define __cdf_nbuf_get_frag_is_wordstream(skb, frag_num) \
-	((frag_num < NBUF_NUM_EXTRA_FRAGS(skb)) ?	     \
-	 (NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) >>	  \
-	  (frag_num)) & 0x1 :			       \
-	 (NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) >>	  \
-	  (CVG_NBUF_MAX_EXTRA_FRAGS)) & 0x1)
-
-#define __cdf_nbuf_set_frag_is_wordstream(skb, frag_num, is_wordstream)	\
-	do {								    \
-		if (frag_num >= NBUF_NUM_EXTRA_FRAGS(skb)) {			\
-			frag_num = CVG_NBUF_MAX_EXTRA_FRAGS;			    \
-		}								\
-		/* clear the old value */					\
-		NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) &= ~(1 << frag_num);	\
-		/* set the new value */						\
-		NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) |=			\
-			((is_wordstream) << frag_num);				    \
-	} while (0)
-
-#define __cdf_nbuf_trace_set_proto_type(skb, proto_type) \
-	NBUF_SET_PROTO_TYPE(skb, proto_type)
-#define __cdf_nbuf_trace_get_proto_type(skb) \
-	NBUF_GET_PROTO_TYPE(skb);
-
-/**
- * __cdf_nbuf_data_attr_get() -  Retrieves the data_attr value
- *				 from cvg_nbuf_cb (skb->cb)
- * @skb: Pointer to struct sk_buff
- *
- * Return: data_attr
- */
-#define __cdf_nbuf_data_attr_get(skb)		\
-	NBUF_DATA_ATTR_GET(skb)
-
-/**
- * __cdf_nbuf_data_attr_set()  -  Sets the data_attr value
- *				  in cvg_nbuf_cb (skb->cb)
- * @skb: Pointer to struct sk_buff
- * @data_attr: packet type from the enum cdf_txrx_pkt_type
- *
- * Return:
- */
-static inline void
-__cdf_nbuf_data_attr_set(struct sk_buff *skb,
-			     uint32_t data_attr)
-{
-	NBUF_DATA_ATTR_SET(skb, data_attr);
-}
-
-/**
- * typedef struct __cdf_nbuf_queue_t -  network buffer queue
- * @head: Head pointer
- * @tail: Tail pointer
- * @qlen: Queue length
- */
-typedef struct __cdf_nbuf_qhead {
-	struct sk_buff *head;
-	struct sk_buff *tail;
-	unsigned int qlen;
-} __cdf_nbuf_queue_t;
-
-/*
- * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
- * Because the queue head will most likely put in some structure,
- * we don't use pointer type as the definition.
- */
-
-/*
- * prototypes. Implemented in cdf_nbuf.c
- */
-__cdf_nbuf_t __cdf_nbuf_alloc(__cdf_device_t osdev, size_t size, int reserve,
-			      int align, int prio);
-void __cdf_nbuf_free(struct sk_buff *skb);
-CDF_STATUS __cdf_nbuf_map(__cdf_device_t osdev,
-			  struct sk_buff *skb, cdf_dma_dir_t dir);
-void __cdf_nbuf_unmap(__cdf_device_t osdev,
-		      struct sk_buff *skb, cdf_dma_dir_t dir);
-CDF_STATUS __cdf_nbuf_map_single(__cdf_device_t osdev,
-				 struct sk_buff *skb, cdf_dma_dir_t dir);
-void __cdf_nbuf_unmap_single(__cdf_device_t osdev,
-			     struct sk_buff *skb, cdf_dma_dir_t dir);
-void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr);
-
-#ifdef QCA_PKT_PROTO_TRACE
-void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string);
-#else
-#define __cdf_nbuf_trace_update(skb, event_string)
-#endif /* QCA_PKT_PROTO_TRACE */
-
-/**
- * __cdf_os_to_status() - OS to CDF status conversion
- * @error : OS error
- *
- * Return: CDF status
- */
-static inline CDF_STATUS __cdf_os_to_status(signed int error)
-{
-	switch (error) {
-	case 0:
-		return CDF_STATUS_SUCCESS;
-	case ENOMEM:
-	case -ENOMEM:
-		return CDF_STATUS_E_NOMEM;
-	default:
-		return CDF_STATUS_E_NOSUPPORT;
-	}
-}
-
-/**
- * __cdf_nbuf_len() - return the amount of valid data in the skb
- * @skb: Pointer to network buffer
- *
- * This API returns the amount of valid data in the skb, If there are frags
- * then it returns total length.
- *
- * Return: network buffer length
- */
-static inline size_t __cdf_nbuf_len(struct sk_buff *skb)
-{
-	int i, extra_frag_len = 0;
-
-	i = NBUF_NUM_EXTRA_FRAGS(skb);
-	while (i-- > 0)
-		extra_frag_len += NBUF_EXTRA_FRAG_LEN(skb, i);
-
-	return extra_frag_len + skb->len;
-}
-
-/**
- * __cdf_nbuf_cat() - link two nbufs
- * @dst: Buffer to piggyback into
- * @src: Buffer to put
- *
- * Link tow nbufs the new buf is piggybacked into the older one. The older
- * (src) skb is released.
- *
- * Return: CDF_STATUS (status of the call) if failed the src skb
- *	   is released
- */
-static inline CDF_STATUS
-__cdf_nbuf_cat(struct sk_buff *dst, struct sk_buff *src)
-{
-	CDF_STATUS error = 0;
-
-	cdf_assert(dst && src);
-
-	/*
-	 * Since pskb_expand_head unconditionally reallocates the skb->head
-	 * buffer, first check whether the current buffer is already large
-	 * enough.
-	 */
-	if (skb_tailroom(dst) < src->len) {
-		error = pskb_expand_head(dst, 0, src->len, GFP_ATOMIC);
-		if (error)
-			return __cdf_os_to_status(error);
-	}
-	memcpy(skb_tail_pointer(dst), src->data, src->len);
-
-	skb_put(dst, src->len);
-	dev_kfree_skb_any(src);
-
-	return __cdf_os_to_status(error);
-}
-
-/**************************nbuf manipulation routines*****************/
-
-/**
- * __cdf_nbuf_headroom() - return the amount of tail space available
- * @buf: Pointer to network buffer
- *
- * Return: amount of tail room
- */
-static inline int __cdf_nbuf_headroom(struct sk_buff *skb)
-{
-	return skb_headroom(skb);
-}
-
-/**
- * __cdf_nbuf_tailroom() - return the amount of tail space available
- * @buf: Pointer to network buffer
- *
- * Return: amount of tail room
- */
-static inline uint32_t __cdf_nbuf_tailroom(struct sk_buff *skb)
-{
-	return skb_tailroom(skb);
-}
-
-/**
- * __cdf_nbuf_push_head() - Push data in the front
- * @skb: Pointer to network buffer
- * @size: size to be pushed
- *
- * Return: New data pointer of this buf after data has been pushed,
- *	   or NULL if there is not enough room in this buf.
- */
-static inline uint8_t *__cdf_nbuf_push_head(struct sk_buff *skb, size_t size)
-{
-	if (NBUF_MAPPED_PADDR_LO(skb))
-		NBUF_MAPPED_PADDR_LO(skb) -= size;
-
-	return skb_push(skb, size);
-}
-
-/**
- * __cdf_nbuf_put_tail() - Puts data in the end
- * @skb: Pointer to network buffer
- * @size: size to be pushed
- *
- * Return: data pointer of this buf where new data has to be
- *	   put, or NULL if there is not enough room in this buf.
- */
-static inline uint8_t *__cdf_nbuf_put_tail(struct sk_buff *skb, size_t size)
-{
-	if (skb_tailroom(skb) < size) {
-		if (unlikely(pskb_expand_head(skb, 0,
-			size - skb_tailroom(skb), GFP_ATOMIC))) {
-			dev_kfree_skb_any(skb);
-			return NULL;
-		}
-	}
-	return skb_put(skb, size);
-}
-
-/**
- * __cdf_nbuf_pull_head() - pull data out from the front
- * @skb: Pointer to network buffer
- * @size: size to be popped
- *
- * Return: New data pointer of this buf after data has been popped,
- *	   or NULL if there is not sufficient data to pull.
- */
-static inline uint8_t *__cdf_nbuf_pull_head(struct sk_buff *skb, size_t size)
-{
-	if (NBUF_MAPPED_PADDR_LO(skb))
-		NBUF_MAPPED_PADDR_LO(skb) += size;
-
-	return skb_pull(skb, size);
-}
-
-/**
- * __cdf_nbuf_trim_tail() - trim data out from the end
- * @skb: Pointer to network buffer
- * @size: size to be popped
- *
- * Return: none
- */
-static inline void __cdf_nbuf_trim_tail(struct sk_buff *skb, size_t size)
-{
-	return skb_trim(skb, skb->len - size);
-}
-
-/*********************nbuf private buffer routines*************/
-
-/**
- * __cdf_nbuf_peek_header() - return the header's addr & m_len
- * @skb: Pointer to network buffer
- * @addr: Pointer to store header's addr
- * @m_len: network buffer length
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_peek_header(struct sk_buff *skb, uint8_t **addr, uint32_t *len)
-{
-	*addr = skb->data;
-	*len = skb->len;
-}
-
-/******************Custom queue*************/
-
-/**
- * __cdf_nbuf_queue_init() - initiallize the queue head
- * @qhead: Queue head
- *
- * Return: CDF status
- */
-static inline CDF_STATUS __cdf_nbuf_queue_init(__cdf_nbuf_queue_t *qhead)
-{
-	memset(qhead, 0, sizeof(struct __cdf_nbuf_qhead));
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_queue_add() - add an skb in the tail of the queue
- * @qhead: Queue head
- * @skb: Pointer to network buffer
- *
- * This is a lockless version, driver must acquire locks if it
- * needs to synchronize
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_queue_add(__cdf_nbuf_queue_t *qhead, struct sk_buff *skb)
-{
-	skb->next = NULL;       /*Nullify the next ptr */
-
-	if (!qhead->head)
-		qhead->head = skb;
-	else
-		qhead->tail->next = skb;
-
-	qhead->tail = skb;
-	qhead->qlen++;
-}
-
-/**
- * __cdf_nbuf_queue_insert_head() - add an skb at  the head  of the queue
- * @qhead: Queue head
- * @skb: Pointer to network buffer
- *
- * This is a lockless version, driver must acquire locks if it needs to
- * synchronize
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_queue_insert_head(__cdf_nbuf_queue_t *qhead, __cdf_nbuf_t skb)
-{
-	if (!qhead->head) {
-		/*Empty queue Tail pointer Must be updated */
-		qhead->tail = skb;
-	}
-	skb->next = qhead->head;
-	qhead->head = skb;
-	qhead->qlen++;
-}
-
-/**
- * __cdf_nbuf_queue_remove() - remove a skb from the head of the queue
- * @qhead: Queue head
- *
- * This is a lockless version. Driver should take care of the locks
- *
- * Return: skb or NULL
- */
-static inline
-struct sk_buff *__cdf_nbuf_queue_remove(__cdf_nbuf_queue_t *qhead)
-{
-	__cdf_nbuf_t tmp = NULL;
-
-	if (qhead->head) {
-		qhead->qlen--;
-		tmp = qhead->head;
-		if (qhead->head == qhead->tail) {
-			qhead->head = NULL;
-			qhead->tail = NULL;
-		} else {
-			qhead->head = tmp->next;
-		}
-		tmp->next = NULL;
-	}
-	return tmp;
-}
-
-/**
- * __cdf_nbuf_queue_len() - return the queue length
- * @qhead: Queue head
- *
- * Return: Queue length
- */
-static inline uint32_t __cdf_nbuf_queue_len(__cdf_nbuf_queue_t *qhead)
-{
-	return qhead->qlen;
-}
-
-/**
- * __cdf_nbuf_queue_next() - return the next skb from packet chain
- * @skb: Pointer to network buffer
- *
- * This API returns the next skb from packet chain, remember the skb is
- * still in the queue
- *
- * Return: NULL if no packets are there
- */
-static inline struct sk_buff *__cdf_nbuf_queue_next(struct sk_buff *skb)
-{
-	return skb->next;
-}
-
-/**
- * __cdf_nbuf_is_queue_empty() - check if the queue is empty or not
- * @qhead: Queue head
- *
- * Return: true if length is 0 else false
- */
-static inline bool __cdf_nbuf_is_queue_empty(__cdf_nbuf_queue_t *qhead)
-{
-	return qhead->qlen == 0;
-}
-
-/*
- * Use sk_buff_head as the implementation of cdf_nbuf_queue_t.
- * Because the queue head will most likely put in some structure,
- * we don't use pointer type as the definition.
- */
-
-/*
- * prototypes. Implemented in cdf_nbuf.c
- */
-cdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb);
-CDF_STATUS __cdf_nbuf_set_rx_cksum(struct sk_buff *skb,
-				   cdf_nbuf_rx_cksum_t *cksum);
-uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb);
-void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid);
-uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb);
-
-/*
- * cdf_nbuf_pool_delete() implementation - do nothing in linux
- */
-#define __cdf_nbuf_pool_delete(osdev)
-
-/**
- * __cdf_nbuf_clone() - clone the nbuf (copy is readonly)
- * @skb: Pointer to network buffer
- *
- * if GFP_ATOMIC is overkill then we can check whether its
- * called from interrupt context and then do it or else in
- * normal case use GFP_KERNEL
- *
- * example     use "in_irq() || irqs_disabled()"
- *
- * Return: cloned skb
- */
-static inline struct sk_buff *__cdf_nbuf_clone(struct sk_buff *skb)
-{
-	return skb_clone(skb, GFP_ATOMIC);
-}
-
-/**
- * __cdf_nbuf_copy() - returns a private copy of the skb
- * @skb: Pointer to network buffer
- *
- * This API returns a private copy of the skb, the skb returned is completely
- *  modifiable by callers
- *
- * Return: skb or NULL
- */
-static inline struct sk_buff *__cdf_nbuf_copy(struct sk_buff *skb)
-{
-	return skb_copy(skb, GFP_ATOMIC);
-}
-
-#define __cdf_nbuf_reserve      skb_reserve
-
-/***********************XXX: misc api's************************/
-
-/**
- * __cdf_nbuf_head() - return the pointer the skb's head pointer
- * @skb: Pointer to network buffer
- *
- * Return: Pointer to head buffer
- */
-static inline uint8_t *__cdf_nbuf_head(struct sk_buff *skb)
-{
-	return skb->head;
-}
-
-/**
- * __cdf_nbuf_data() - return the pointer to data header in the skb
- * @skb: Pointer to network buffer
- *
- * Return: Pointer to skb data
- */
-static inline uint8_t *__cdf_nbuf_data(struct sk_buff *skb)
-{
-	return skb->data;
-}
-
-/**
- * __cdf_nbuf_get_protocol() - return the protocol value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb protocol
- */
-static inline uint16_t __cdf_nbuf_get_protocol(struct sk_buff *skb)
-{
-	return skb->protocol;
-}
-
-/**
- * __cdf_nbuf_get_ip_summed() - return the ip checksum value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb ip_summed
- */
-static inline uint8_t __cdf_nbuf_get_ip_summed(struct sk_buff *skb)
-{
-	return skb->ip_summed;
-}
-
-/**
- * __cdf_nbuf_set_ip_summed() - sets the ip_summed value of the skb
- * @skb: Pointer to network buffer
- * @ip_summed: ip checksum
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_ip_summed(struct sk_buff *skb, uint8_t ip_summed)
-{
-	skb->ip_summed = ip_summed;
-}
-
-/**
- * __cdf_nbuf_get_priority() - return the priority value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: skb priority
- */
-static inline uint32_t __cdf_nbuf_get_priority(struct sk_buff *skb)
-{
-	return skb->priority;
-}
-
-/**
- * __cdf_nbuf_set_priority() - sets the priority value of the skb
- * @skb: Pointer to network buffer
- * @p: priority
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_priority(struct sk_buff *skb, uint32_t p)
-{
-	skb->priority = p;
-}
-
-/**
- * __cdf_nbuf_set_next() - sets the next skb pointer of the current skb
- * @skb: Current skb
- * @next_skb: Next skb
- *
- * Return: void
- */
-static inline void
-__cdf_nbuf_set_next(struct sk_buff *skb, struct sk_buff *skb_next)
-{
-	skb->next = skb_next;
-}
-
-/**
- * __cdf_nbuf_next() - return the next skb pointer of the current skb
- * @skb: Current skb
- *
- * Return: the next skb pointed to by the current skb
- */
-static inline struct sk_buff *__cdf_nbuf_next(struct sk_buff *skb)
-{
-	return skb->next;
-}
-
-/**
- * __cdf_nbuf_set_next_ext() - sets the next skb pointer of the current skb
- * @skb: Current skb
- * @next_skb: Next skb
- *
- * This fn is used to link up extensions to the head skb. Does not handle
- * linking to the head
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_set_next_ext(struct sk_buff *skb, struct sk_buff *skb_next)
-{
-	skb->next = skb_next;
-}
-
-/**
- * __cdf_nbuf_next_ext() - return the next skb pointer of the current skb
- * @skb: Current skb
- *
- * Return: the next skb pointed to by the current skb
- */
-static inline struct sk_buff *__cdf_nbuf_next_ext(struct sk_buff *skb)
-{
-	return skb->next;
-}
-
-/**
- * __cdf_nbuf_append_ext_list() - link list of packet extensions to the head
- * @skb_head: head_buf nbuf holding head segment (single)
- * @ext_list: nbuf list holding linked extensions to the head
- * @ext_len: Total length of all buffers in the extension list
- *
- * This function is used to link up a list of packet extensions (seg1, 2,*  ...)
- * to the nbuf holding the head segment (seg0)
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_append_ext_list(struct sk_buff *skb_head,
-			   struct sk_buff *ext_list, size_t ext_len)
-{
-	skb_shinfo(skb_head)->frag_list = ext_list;
-	skb_head->data_len = ext_len;
-	skb_head->len += skb_head->data_len;
-}
-
-/**
- * __cdf_nbuf_tx_free() - free skb list
- * @skb: Pointer to network buffer
- * @tx_err: TX error
- *
- * Return: none
- */
-static inline void __cdf_nbuf_tx_free(struct sk_buff *bufs, int tx_err)
-{
-	while (bufs) {
-		struct sk_buff *next = __cdf_nbuf_next(bufs);
-		__cdf_nbuf_free(bufs);
-		bufs = next;
-	}
-}
-
-/**
- * __cdf_nbuf_get_age() - return the checksum value of the skb
- * @skb: Pointer to network buffer
- *
- * Return: checksum value
- */
-static inline uint32_t __cdf_nbuf_get_age(struct sk_buff *skb)
-{
-	return skb->csum;
-}
-
-/**
- * __cdf_nbuf_set_age() - sets the checksum value of the skb
- * @skb: Pointer to network buffer
- * @v: Value
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_age(struct sk_buff *skb, uint32_t v)
-{
-	skb->csum = v;
-}
-
-/**
- * __cdf_nbuf_adj_age() - adjusts the checksum/age value of the skb
- * @skb: Pointer to network buffer
- * @adj: Adjustment value
- *
- * Return: none
- */
-static inline void __cdf_nbuf_adj_age(struct sk_buff *skb, uint32_t adj)
-{
-	skb->csum -= adj;
-}
-
-/**
- * __cdf_nbuf_copy_bits() - return the length of the copy bits for skb
- * @skb: Pointer to network buffer
- * @offset: Offset value
- * @len: Length
- * @to: Destination pointer
- *
- * Return: length of the copy bits for skb
- */
-static inline int32_t
-__cdf_nbuf_copy_bits(struct sk_buff *skb, int32_t offset, int32_t len, void *to)
-{
-	return skb_copy_bits(skb, offset, to, len);
-}
-
-/**
- * __cdf_nbuf_set_pktlen() - sets the length of the skb and adjust the tail
- * @skb: Pointer to network buffer
- * @len:  Packet length
- *
- * Return: none
- */
-static inline void __cdf_nbuf_set_pktlen(struct sk_buff *skb, uint32_t len)
-{
-	if (skb->len > len) {
-		skb_trim(skb, len);
-	} else {
-		if (skb_tailroom(skb) < len - skb->len) {
-			if (unlikely(pskb_expand_head(skb, 0,
-				len - skb->len - skb_tailroom(skb),
-				GFP_ATOMIC))) {
-				dev_kfree_skb_any(skb);
-				cdf_assert(0);
-			}
-		}
-		skb_put(skb, (len - skb->len));
-	}
-}
-
-/**
- * __cdf_nbuf_set_protocol() - sets the protocol value of the skb
- * @skb: Pointer to network buffer
- * @protocol: Protocol type
- *
- * Return: none
- */
-static inline void
-__cdf_nbuf_set_protocol(struct sk_buff *skb, uint16_t protocol)
-{
-	skb->protocol = protocol;
-}
-
-#define __cdf_nbuf_set_tx_htt2_frm(skb, candi) \
-	NBUF_SET_TX_HTT2_FRM(skb, candi)
-#define __cdf_nbuf_get_tx_htt2_frm(skb)	\
-	NBUF_GET_TX_HTT2_FRM(skb)
-
-#if defined(FEATURE_TSO)
-uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
-	struct cdf_tso_info_t *tso_info);
-
-uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb);
-
-static inline uint8_t __cdf_nbuf_is_tso(struct sk_buff *skb)
-{
-	return skb_is_gso(skb);
-}
-
-struct sk_buff *__cdf_nbuf_inc_users(struct sk_buff *skb);
-#endif /* TSO */
-
-/**
- * __cdf_nbuf_tx_info_get() - Modify pkt_type, set pkt_subtype,
- *			      and get hw_classify by peeking
- *			      into packet
- * @nbuf:		Network buffer (skb on Linux)
- * @pkt_type:		Pkt type (from enum htt_pkt_type)
- * @pkt_subtype:	Bit 4 of this field in HTT descriptor
- *			needs to be set in case of CE classification support
- *			Is set by this macro.
- * @hw_classify:	This is a flag which is set to indicate
- *			CE classification is enabled.
- *			Do not set this bit for VLAN packets
- *			OR for mcast / bcast frames.
- *
- * This macro parses the payload to figure out relevant Tx meta-data e.g.
- * whether to enable tx_classify bit in CE.
- *
- * Overrides pkt_type only if required for 802.3 frames (original ethernet)
- * If protocol is less than ETH_P_802_3_MIN (0x600), then
- * it is the length and a 802.3 frame else it is Ethernet Type II
- * (RFC 894).
- * Bit 4 in pkt_subtype is the tx_classify bit
- *
- * Return:	void
- */
-#define __cdf_nbuf_tx_info_get(skb, pkt_type,			\
-				pkt_subtype, hw_classify)	\
-do {								\
-	struct ethhdr *eh = (struct ethhdr *)skb->data;		\
-	uint16_t ether_type = ntohs(eh->h_proto);		\
-	bool is_mc_bc;						\
-								\
-	is_mc_bc = is_broadcast_ether_addr((uint8_t *)eh) ||	\
-		   is_multicast_ether_addr((uint8_t *)eh);	\
-								\
-	if (likely((ether_type != ETH_P_8021Q) && !is_mc_bc)) {	\
-		hw_classify = 1;				\
-		pkt_subtype = 0x01 <<				\
-			HTT_TX_CLASSIFY_BIT_S;			\
-	}							\
-								\
-	if (unlikely(ether_type < ETH_P_802_3_MIN))		\
-		pkt_type = htt_pkt_type_ethernet;		\
-								\
-} while (0)
-#endif /*_I_CDF_NET_BUF_H */

+ 0 - 145
qdf/src/i_qdf_trace.h

@@ -1,145 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#if !defined(__I_CDF_TRACE_H)
-#define __I_CDF_TRACE_H
-
-#if !defined(__printf)
-#define __printf(a, b)
-#endif
-
-/**
- * DOC: i_cdf_trace.h
- *
- * Linux-specific definitions for CDF trace
- *
- */
-
-/* Include Files */
-
-/**
- * cdf_trace_msg()- logging API
- * @module: Module identifier. A member of the CDF_MODULE_ID enumeration that
- *	    identifies the module issuing the trace message.
- * @level: Trace level. A member of the CDF_TRACE_LEVEL enumeration indicating
- *	   the severity of the condition causing the trace message to be issued.
- *	   More severe conditions are more likely to be logged.
- * @strFormat: Format string. The message to be logged. This format string
- *	       contains printf-like replacement parameters, which follow this
- *	       parameter in the variable argument list.
- *
- * Users wishing to add tracing information to their code should use
- * CDF_TRACE.  CDF_TRACE() will compile into a call to cdf_trace_msg() when
- * tracing is enabled.
- *
- * Return: nothing
- *
- */
-void __printf(3, 4) cdf_trace_msg(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
-				  char *strFormat, ...);
-
-void cdf_trace_hex_dump(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
-			void *data, int buf_len);
-
-void cdf_trace_display(void);
-
-void cdf_trace_set_value(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
-			 uint8_t on);
-
-void cdf_trace_set_module_trace_level(CDF_MODULE_ID module, uint32_t level);
-
-/* CDF_TRACE is the macro invoked to add trace messages to code.  See the
- * documenation for cdf_trace_msg() for the parameters etc. for this function.
- *
- * NOTE:  Code CDF_TRACE() macros into the source code.  Do not code directly
- * to the cdf_trace_msg() function.
- *
- * NOTE 2:  cdf tracing is totally turned off if WLAN_DEBUG is *not* defined.
- * This allows us to build 'performance' builds where we can measure performance
- * without being bogged down by all the tracing in the code
- */
-
-#if defined(WLAN_DEBUG)
-#define CDF_TRACE cdf_trace_msg
-#define CDF_TRACE_HEX_DUMP cdf_trace_hex_dump
-#else
-#define CDF_TRACE(arg ...)
-#define CDF_TRACE_HEX_DUMP(arg ...)
-#endif
-
-void __printf(3, 4) cdf_snprintf(char *strBuffer, unsigned int size,
-				 char *strFormat, ...);
-#define CDF_SNPRINTF cdf_snprintf
-
-#ifdef CDF_ENABLE_TRACING
-
-#define CDF_ASSERT(_condition) \
-	do { \
-		if (!(_condition)) { \
-			pr_err("CDF ASSERT in %s Line %d\n", \
-			       __func__, __LINE__); \
-			WARN_ON(1); \
-		} \
-	} while (0)
-
-#else
-
-/* This code will be used for compilation if tracing is to be compiled out */
-/* of the code so these functions/macros are 'do nothing' */
-CDF_INLINE_FN void cdf_trace_msg(CDF_MODULE_ID module, ...)
-{
-}
-
-#define CDF_ASSERT(_condition)
-
-#endif
-
-#ifdef PANIC_ON_BUG
-
-#define CDF_BUG(_condition) \
-	do { \
-		if (!(_condition)) { \
-			pr_err("CDF BUG in %s Line %d\n", \
-			       __func__, __LINE__); \
-			BUG_ON(1); \
-		} \
-	} while (0)
-
-#else
-
-#define CDF_BUG(_condition) \
-	do { \
-		if (!(_condition)) { \
-			pr_err("CDF BUG in %s Line %d\n", \
-			       __func__, __LINE__); \
-			WARN_ON(1); \
-		} \
-	} while (0)
-
-#endif
-
-#endif

+ 0 - 234
qdf/src/i_qdf_types.h

@@ -1,234 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: i_cdf_types.h
- *
- * Connectivity driver framework (CDF) types
- */
-
-#if !defined(__I_CDF_TYPES_H)
-#define __I_CDF_TYPES_H
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/version.h>
-#include <asm/div64.h>
-
-#ifndef __KERNEL__
-#define __iomem
-#endif
-#include <asm/types.h>
-#include <asm/byteorder.h>
-#include <linux/version.h>
-
-#ifdef __KERNEL__
-#include <generated/autoconf.h>
-#include <linux/compiler.h>
-#include <linux/dma-mapping.h>
-#include <linux/wireless.h>
-#include <linux/if.h>
-#else
-
-/*
- * Hack - coexist with prior defs of dma_addr_t.
- * Eventually all other defs of dma_addr_t should be removed.
- * At that point, the "already_defined" wrapper can be removed.
- */
-#ifndef __dma_addr_t_already_defined__
-#define __dma_addr_t_already_defined__
-typedef unsigned long dma_addr_t;
-#endif
-
-#define SIOCGIWAP       0
-#define IWEVCUSTOM      0
-#define IWEVREGISTERED  0
-#define IWEVEXPIRED     0
-#define SIOCGIWSCAN     0
-#define DMA_TO_DEVICE   0
-#define DMA_FROM_DEVICE 0
-#define __iomem
-#endif /* __KERNEL__ */
-
-/**
- * max sg that we support
- */
-#define __CDF_OS_MAX_SCATTER        1
-
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-#define CDF_LITTLE_ENDIAN_MACHINE
-#elif defined (__BIG_ENDIAN_BITFIELD)
-#define CDF_BIG_ENDIAN_MACHINE
-#else
-#error  "Please fix <asm/byteorder.h>"
-#endif
-
-#define __cdf_packed          __attribute__ ((packed))
-
-typedef int (*__cdf_os_intr)(void *);
-/**
- * Private definitions of general data types
- */
-typedef dma_addr_t __cdf_dma_addr_t;
-typedef dma_addr_t __cdf_dma_context_t;
-
-#define cdf_dma_mem_context(context) dma_addr_t context
-#define cdf_get_dma_mem_context(var, field)   ((cdf_dma_context_t)(var->field))
-
-/**
- * typedef struct __cdf_resource_t - cdf resource type
- * @paddr: Physical address
- * @paddr: Virtual address
- * @len: Length
- */
-typedef struct __cdf_os_resource {
-	unsigned long paddr;
-	void __iomem *vaddr;
-	unsigned long len;
-} __cdf_resource_t;
-
-/**
- * struct __cdf_device - generic cdf device type
- * @drv: Pointer to driver
- * @drv_hdl: Pointer to driver handle
- * @drv_name: Pointer to driver name
- * @irq: IRQ
- * @dev: Pointer to device
- * @res: CDF resource
- * @func: Interrupt handler
- */
-struct __cdf_device {
-	void *drv;
-	void *drv_hdl;
-	char *drv_name;
-	int irq;
-	struct device *dev;
-	__cdf_resource_t res;
-	__cdf_os_intr func;
-};
-
-typedef struct __cdf_device *__cdf_device_t;
-
-typedef size_t __cdf_size_t;
-typedef uint8_t __iomem *__cdf_iomem_t;
-
-/**
- * typedef struct __cdf_segment_t - cdf segment
- * @daddr: DMA address
- * @len: Length
- */
-typedef struct __cdf_segment {
-	dma_addr_t daddr;
-	uint32_t len;
-} __cdf_segment_t;
-
-/**
- * struct __cdf_dma_map - dma map
- * @mapped: dma is mapped or not
- * @nsegs: Number of segments
- * @coherent: Coherent
- * @seg: Segment array
- */
-struct __cdf_dma_map {
-	uint32_t mapped;
-	uint32_t nsegs;
-	uint32_t coherent;
-	__cdf_segment_t seg[__CDF_OS_MAX_SCATTER];
-};
-typedef struct __cdf_dma_map *__cdf_dma_map_t;
-typedef uint32_t ath_dma_addr_t;
-
-#define __cdf_print               printk
-#define __cdf_vprint              vprintk
-#define __cdf_snprint             snprintf
-#define __cdf_vsnprint            vsnprintf
-
-#define __CDF_DMA_BIDIRECTIONAL  DMA_BIDIRECTIONAL
-#define __CDF_DMA_TO_DEVICE      DMA_TO_DEVICE
-#define __CDF_DMA_FROM_DEVICE    DMA_FROM_DEVICE
-#define __cdf_inline             inline
-
-/*
- * 1. GNU C/C++ Compiler
- *
- * How to detect gcc : __GNUC__
- * How to detect gcc version :
- *   major version : __GNUC__ (2 = 2.x, 3 = 3.x, 4 = 4.x)
- *   minor version : __GNUC_MINOR__
- *
- * 2. Microsoft C/C++ Compiler
- *
- * How to detect msc : _MSC_VER
- * How to detect msc version :
- *   _MSC_VER (1200 = MSVC 6.0, 1300 = MSVC 7.0, ...)
- *
- */
-
-/* MACROs to help with compiler and OS specifics. May need to get a little
- * more sophisticated than this and define these to specific 'VERSIONS' of
- * the compiler and OS.  Until we have a need for that, lets go with this
- */
-#if defined(_MSC_VER)
-
-#define CDF_COMPILER_MSC
-/* assuming that if we build with MSC, OS is WinMobile */
-#define CDF_OS_WINMOBILE
-
-#elif defined(__GNUC__)
-
-#define CDF_COMPILER_GNUC
-#define CDF_OS_LINUX /* assuming if building with GNUC, OS is Linux */
-
-#endif
-
-#if defined(CDF_COMPILER_MSC)
-
-#define CDF_INLINE_FN  __inline
-
-/* Does nothing on Windows.  packing individual structs is not
- * supported on the Windows compiler
- */
-#define CDF_PACK_STRUCT_1
-#define CDF_PACK_STRUCT_2
-#define CDF_PACK_STRUCT_4
-#define CDF_PACK_STRUCT_8
-#define CDF_PACK_STRUCT_16
-
-#elif defined(CDF_COMPILER_GNUC)
-
-#define CDF_INLINE_FN  static inline
-
-#else
-#error "Compiling with an unknown compiler!!"
-#endif
-
-#endif /* __I_CDF_TYPES_H */

+ 0 - 107
qdf/src/i_qdf_util.h

@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#ifndef _I_CDF_UTIL_H
-#define _I_CDF_UTIL_H
-
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <errno.h>
-
-#include <linux/random.h>
-
-#include <cdf_types.h>
-#include <cdf_status.h>
-#include <asm/byteorder.h>
-/*
- * Generic compiler-dependent macros if defined by the OS
- */
-
-#define __cdf_unlikely(_expr)   unlikely(_expr)
-#define __cdf_likely(_expr)     likely(_expr)
-
-/**
- * cdf_status_to_os_return(): translates cdf_status types to linux return types
- * @status: status to translate
- *
- * Translates error types that linux may want to handle specially.
- *
- * return: 0 or the linux error code that most closely matches the CDF_STATUS.
- *	defaults to -1 (EPERM)
- */
-static inline int __cdf_status_to_os_return(CDF_STATUS status)
-{
-	switch (status) {
-	case CDF_STATUS_SUCCESS:
-		return 0;
-	case CDF_STATUS_E_NULL_VALUE:
-	case CDF_STATUS_E_FAULT:
-		return -EFAULT;
-	case CDF_STATUS_E_TIMEOUT:
-	case CDF_STATUS_E_BUSY:
-		return -EBUSY;
-	case CDF_STATUS_NOT_INITIALIZED:
-	case CDF_STATUS_E_AGAIN:
-		return -EAGAIN;
-	case CDF_STATUS_E_NOSUPPORT:
-		return -ENOSYS;
-	case CDF_STATUS_E_ALREADY:
-		return -EALREADY;
-	case CDF_STATUS_E_NOMEM:
-		return -ENOMEM;
-	default:
-		return -EPERM;
-	}
-}
-
-
-/**
- * @brief memory barriers.
- */
-
-#define __cdf_min(_a, _b)         ((_a) < (_b) ? _a : _b)
-#define __cdf_max(_a, _b)         ((_a) > (_b) ? _a : _b)
-
-/**
- * @brief Assert
- */
-#define __cdf_assert(expr)  do {    \
-		if (unlikely(!(expr))) {				\
-			pr_err("Assertion failed! %s:%s %s:%d\n",   \
-			       # expr, __func__, __FILE__, __LINE__);      \
-			dump_stack();					   \
-			BUG_ON(1);	   \
-		}     \
-} while (0)
-
-#define __cdf_os_cpu_to_le64                cpu_to_le64
-#define __cdf_le16_to_cpu                le16_to_cpu
-#define __cdf_le32_to_cpu                le32_to_cpu
-#define __cdf_container_of(ptr, type, member) container_of(ptr, type, member)
-
-#endif /*_I_CDF_UTIL_H*/

+ 0 - 236
qdf/src/qdf_list.c

@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: cdf_list.c
- *
- * Connectivity driver framework list manipulation APIs. CDF linked list
- * APIs are NOT thread safe so make sure to use appropriate locking mechanisms
- * to assure operations on the list are thread safe.
- */
-
-/* Include files */
-#include <cdf_list.h>
-#include <cdf_trace.h>
-
-/* Preprocessor definitions and constants */
-
-/* Type declarations */
-
-/* Function declarations and documenation */
-
-/**
- * cdf_list_insert_front() - insert input node at front of the list
- * @pList: Pointer to list
- * @pNode: Pointer to input node
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_insert_front(cdf_list_t *pList, cdf_list_node_t *pNode)
-{
-	list_add(pNode, &pList->anchor);
-	pList->count++;
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_insert_back() - insert input node at back of the list
- * @pList: Pointer to list
- * @pNode: Pointer to input node
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_insert_back(cdf_list_t *pList, cdf_list_node_t *pNode)
-{
-	list_add_tail(pNode, &pList->anchor);
-	pList->count++;
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_insert_back_size() - insert input node at back of list and save
- *				 list size
- * @pList: Pointer to list
- * @pNode: Pointer to input node
- * @pSize: Pointer to store list size
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_insert_back_size(cdf_list_t *pList,
-				     cdf_list_node_t *pNode, uint32_t *pSize)
-{
-	list_add_tail(pNode, &pList->anchor);
-	pList->count++;
-	*pSize = pList->count;
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_remove_front() - remove node from front of the list
- * @pList: Pointer to list
- * @ppNode: Double pointer to store the node which is removed from list
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_remove_front(cdf_list_t *pList, cdf_list_node_t **ppNode)
-{
-	struct list_head *listptr;
-
-	if (list_empty(&pList->anchor))
-		return CDF_STATUS_E_EMPTY;
-
-	listptr = pList->anchor.next;
-	*ppNode = listptr;
-	list_del(pList->anchor.next);
-	pList->count--;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_remove_back() - remove node from end of the list
- * @pList: Pointer to list
- * @ppNode: Double pointer to store node which is removed from list
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_remove_back(cdf_list_t *pList, cdf_list_node_t **ppNode)
-{
-	struct list_head *listptr;
-
-	if (list_empty(&pList->anchor))
-		return CDF_STATUS_E_EMPTY;
-
-	listptr = pList->anchor.prev;
-	*ppNode = listptr;
-	list_del(pList->anchor.prev);
-	pList->count--;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_remove_node() - remove input node from list
- * @pList: Pointer to list
- * @pNodeToRemove: Pointer to node which needs to be removed
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_remove_node(cdf_list_t *pList,
-				cdf_list_node_t *pNodeToRemove)
-{
-	cdf_list_node_t *tmp;
-	int found = 0;
-
-	if (list_empty(&pList->anchor))
-		return CDF_STATUS_E_EMPTY;
-
-	/* verify that pNodeToRemove is indeed part of list pList */
-	list_for_each(tmp, &pList->anchor) {
-		if (tmp == pNodeToRemove) {
-			found = 1;
-			break;
-		}
-	}
-	if (found == 0)
-		return CDF_STATUS_E_INVAL;
-
-	list_del(pNodeToRemove);
-	pList->count--;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_peek_front() - peek front node from list
- * @pList: Pointer to list
- * @ppNode: Double pointer to store peeked node pointer
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_peek_front(cdf_list_t *pList, cdf_list_node_t **ppNode)
-{
-	struct list_head *listptr;
-	if (list_empty(&pList->anchor))
-		return CDF_STATUS_E_EMPTY;
-
-	listptr = pList->anchor.next;
-	*ppNode = listptr;
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_peek_next() - peek next node of input node in the list
- * @pList: Pointer to list
- * @pNode: Pointer to input node
- * @ppNode: Double pointer to store peeked node pointer
- *
- * Return: CDF status
- */
-CDF_STATUS cdf_list_peek_next(cdf_list_t *pList, cdf_list_node_t *pNode,
-			      cdf_list_node_t **ppNode)
-{
-	struct list_head *listptr;
-	int found = 0;
-	cdf_list_node_t *tmp;
-
-	if ((pList == NULL) || (pNode == NULL) || (ppNode == NULL))
-		return CDF_STATUS_E_FAULT;
-
-	if (list_empty(&pList->anchor))
-		return CDF_STATUS_E_EMPTY;
-
-	/* verify that pNode is indeed part of list pList */
-	list_for_each(tmp, &pList->anchor) {
-		if (tmp == pNode) {
-			found = 1;
-			break;
-		}
-	}
-
-	if (found == 0)
-		return CDF_STATUS_E_INVAL;
-
-	listptr = pNode->next;
-	if (listptr == &pList->anchor)
-		return CDF_STATUS_E_EMPTY;
-
-	*ppNode = listptr;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_list_empty() - check if the list is empty
- * @list: pointer to the list
- *
- * Return: true if the list is empty and false otherwise.
- */
-bool cdf_list_empty(cdf_list_t *list)
-{
-	return list_empty(&list->anchor);
-}

+ 0 - 647
qdf/src/qdf_lock.c

@@ -1,647 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: cdf_lock.c
- *
- * OVERVIEW: This source file contains definitions for CDF lock APIs
- *	     The four APIs mentioned in this file are used for
- *	     initializing, acquiring, releasing and destroying a lock.
- *	     the lock are implemented using critical sections
- */
-
-/* Include Files */
-
-#include "cdf_lock.h"
-#include "cdf_memory.h"
-#include "cdf_trace.h"
-#include <cdf_types.h>
-#ifdef CONFIG_CNSS
-#include <net/cnss.h>
-#endif
-#include "i_host_diag_core_event.h"
-#include "cds_api.h"
-#include "ani_global.h"
-#include "hif.h"
-
-/* Preprocessor Definitions and Constants */
-#define LINUX_LOCK_COOKIE 0x12345678
-
-#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
-#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
-#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
-
-/* Type Declarations */
-
-enum {
-	LOCK_RELEASED = 0x11223344,
-	LOCK_ACQUIRED,
-	LOCK_DESTROYED
-};
-
-/* Global Data Definitions */
-
-/* Function Definitions and Documentation */
-
-/**
- * cdf_mutex_init() - initialize a CDF lock
- * @lock:        Pointer to the opaque lock object to initialize
- *
- * cdf_mutex_init() function initializes the specified lock. Upon
- * successful initialization, the state of the lock becomes initialized
- * and unlocked.
- *
- * A lock must be initialized by calling cdf_mutex_init() before it
- * may be used in any other lock functions.
- *
- * Attempting to initialize an already initialized lock results in
- * a failure.
- *
- * Return:
- *      CDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-CDF_STATUS cdf_mutex_init(cdf_mutex_t *lock)
-{
-	/* check for invalid pointer */
-	if (lock == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		return CDF_STATUS_E_FAULT;
-	}
-	/* check for 'already initialized' lock */
-	if (LINUX_LOCK_COOKIE == lock->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: already initialized lock", __func__);
-		return CDF_STATUS_E_BUSY;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* initialize new lock */
-	mutex_init(&lock->m_lock);
-	lock->cookie = LINUX_LOCK_COOKIE;
-	lock->state = LOCK_RELEASED;
-	lock->processID = 0;
-	lock->refcount = 0;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_mutex_acquire() - acquire a CDF lock
- * @lock:        Pointer to the opaque lock object to acquire
- *
- * A lock object is acquired by calling cdf_mutex_acquire().  If the lock
- * is already locked, the calling thread shall block until the lock becomes
- * available. This operation shall return with the lock object referenced by
- * lock in the locked state with the calling thread as its owner.
- *
- * Return:
- *      CDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-CDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock)
-{
-	int rc;
-	/* check for invalid pointer */
-	if (lock == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-	/* check if lock refers to an initialized object */
-	if (LINUX_LOCK_COOKIE != lock->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: uninitialized lock", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-	if ((lock->processID == current->pid) &&
-		(lock->state == LOCK_ACQUIRED)) {
-		lock->refcount++;
-#ifdef CDF_NESTED_LOCK_DEBUG
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-			  "%s: %x %d %d", __func__, lock, current->pid,
-			  lock->refcount);
-#endif
-		return CDF_STATUS_SUCCESS;
-	}
-	/* acquire a Lock */
-	mutex_lock(&lock->m_lock);
-	rc = mutex_is_locked(&lock->m_lock);
-	if (rc == 0) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: unable to lock mutex (rc = %d)", __func__, rc);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAILURE;
-	}
-#ifdef CDF_NESTED_LOCK_DEBUG
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-		  "%s: %x %d", __func__, lock, current->pid);
-#endif
-	if (LOCK_DESTROYED != lock->state) {
-		lock->processID = current->pid;
-		lock->refcount++;
-		lock->state = LOCK_ACQUIRED;
-		return CDF_STATUS_SUCCESS;
-	} else {
-		/* lock is already destroyed */
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Lock is already destroyed", __func__);
-		mutex_unlock(&lock->m_lock);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAILURE;
-	}
-}
-
-/**
- * cdf_mutex_release() - release a CDF lock
- * @lock:        Pointer to the opaque lock object to be released
- *
- * cdf_mutex_release() function shall release the lock object
- * referenced by 'lock'.
- *
- * If a thread attempts to release a lock that it unlocked or is not
- * initialized, an error is returned.
- *
- * Return:
- *      CDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-CDF_STATUS cdf_mutex_release(cdf_mutex_t *lock)
-{
-	/* check for invalid pointer */
-	if (lock == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* check if lock refers to an uninitialized object */
-	if (LINUX_LOCK_COOKIE != lock->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: uninitialized lock", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* CurrentThread = GetCurrentThreadId();
-	 * Check thread ID of caller against thread ID
-	 * of the thread which acquire the lock
-	 */
-	if (lock->processID != current->pid) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: current task pid does not match original task pid!!",
-			  __func__);
-#ifdef CDF_NESTED_LOCK_DEBUG
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-			  "%s: Lock held by=%d being released by=%d",
-			  __func__, lock->processID, current->pid);
-#endif
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_PERM;
-	}
-	if ((lock->processID == current->pid) &&
-		(lock->state == LOCK_ACQUIRED)) {
-		if (lock->refcount > 0)
-			lock->refcount--;
-	}
-#ifdef CDF_NESTED_LOCK_DEBUG
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-		  "%s: %x %d %d", __func__, lock, lock->processID,
-		  lock->refcount);
-#endif
-	if (lock->refcount)
-		return CDF_STATUS_SUCCESS;
-
-	lock->processID = 0;
-	lock->refcount = 0;
-	lock->state = LOCK_RELEASED;
-	/* release a Lock */
-	mutex_unlock(&lock->m_lock);
-#ifdef CDF_NESTED_LOCK_DEBUG
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-		  "%s: Freeing lock %x %d %d", lock, lock->processID,
-		  lock->refcount);
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_mutex_destroy() - destroy a CDF lock
- * @lock:        Pointer to the opaque lock object to be destroyed
- *
- * cdf_mutex_destroy() function shall destroy the lock object
- * referenced by lock.  After a successful return from cdf_mutex_destroy()
- * the lock object becomes, in effect, uninitialized.
- *
- * A destroyed lock object can be reinitialized using cdf_mutex_init();
- * the results of otherwise referencing the object after it has been destroyed
- * are undefined.  Calls to CDF lock functions to manipulate the lock such
- * as cdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
- * don't use the lock after it has been destroyed until it has
- * been re-initialized.
- *
- * Return:
- *      CDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-CDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock)
-{
-	/* check for invalid pointer */
-	if (NULL == lock) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	if (LINUX_LOCK_COOKIE != lock->cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: uninitialized lock", __func__);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* check if lock is released */
-	if (!mutex_trylock(&lock->m_lock)) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: lock is not released", __func__);
-		return CDF_STATUS_E_BUSY;
-	}
-	lock->cookie = 0;
-	lock->state = LOCK_DESTROYED;
-	lock->processID = 0;
-	lock->refcount = 0;
-
-	mutex_unlock(&lock->m_lock);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_spinlock_acquire() - acquires a spin lock
- * @pLock:       Spin lock to acquire
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status failure : if wake lock was not acquired
- */
-CDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock)
-{
-	spin_lock(&pLock->spinlock);
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_spinlock_release() - release a spin lock
- * @pLock:       Spin lock to release
- *
- * Return:
- * CDF status success : if wake lock is acquired
- * CDF status failure : if wake lock was not acquired
- */
-CDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock)
-{
-	spin_unlock(&pLock->spinlock);
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_name() - This function returns the name of the wakelock
- * @pLock: Pointer to the wakelock
- *
- * This function returns the name of the wakelock
- *
- * Return: Pointer to the name if it is valid or a default string
- *
- */
-static const char *cdf_wake_lock_name(cdf_wake_lock_t *pLock)
-{
-#if defined CONFIG_CNSS
-	if (pLock->name)
-		return pLock->name;
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	if (pLock->ws.name)
-		return pLock->ws.name;
-#endif
-	return "UNNAMED_WAKELOCK";
-}
-
-/**
- * cdf_wake_lock_init() - initializes a CDF wake lock
- * @pLock: The wake lock to initialize
- * @name: Name of wake lock
- *
- * Return:
- *    CDF status success : if wake lock is initialized
- *    CDF status failure : if wake lock was not initialized
- */
-CDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *pLock, const char *name)
-{
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_init(pLock, name);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock_init(pLock, WAKE_LOCK_SUSPEND, name);
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_acquire() - acquires a wake lock
- * @pLock:       The wake lock to acquire
- * @reason:      Reason for wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status failure : if wake lock was not acquired
- */
-CDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason)
-{
-	host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
-			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
-			WIFI_POWER_EVENT_WAKELOCK_TAKEN);
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock(pLock);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock(pLock);
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
- * @pLock:       The wake lock to acquire
- * @reason:      Reason for wakelock
- *
- * Return:
- *   CDF status success : if wake lock is acquired
- *   CDF status failure : if wake lock was not acquired
- */
-CDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock, uint32_t msec,
-					 uint32_t reason)
-{
-	/* Wakelock for Rx is frequent.
-	 * It is reported only during active debug
-	 */
-	if (((cds_get_ring_log_level(RING_ID_WAKELOCK) >= WLAN_LOG_LEVEL_ACTIVE)
-			&& (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX == reason)) ||
-			(WIFI_POWER_EVENT_WAKELOCK_HOLD_RX != reason)) {
-		host_diag_log_wlock(reason, cdf_wake_lock_name(pLock), msec,
-				WIFI_POWER_EVENT_WAKELOCK_TAKEN);
-	}
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_timeout(pLock, msec);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock_timeout(pLock, msecs_to_jiffies(msec));
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_release() - releases a wake lock
- * @pLock:       the wake lock to release
- * @reason:      Reason for wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status failure : if wake lock was not acquired
- */
-CDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason)
-{
-	host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
-			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
-			WIFI_POWER_EVENT_WAKELOCK_RELEASED);
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_release(pLock);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_unlock(pLock);
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_destroy() - destroys a wake lock
- * @pLock:       The wake lock to destroy
- *
- * Return:
- * CDF status success : if wake lock is acquired
- * CDF status failure : if wake lock was not acquired
- */
-CDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock)
-{
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_destroy(pLock);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock_destroy(pLock);
-#endif
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_get() - do a get opperation on the device
- *
- * A get opperation will prevent a runtime suspend untill a
- * corresponding put is done.  This api should be used when sending
- * data.
- *
- * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
- * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
- *
- * return: success if the bus is up and a get has been issued
- *   otherwise an error code.
- */
-CDF_STATUS cdf_runtime_pm_get(void)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(CDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_get(ol_sc);
-
-	if (ret)
-		return CDF_STATUS_E_FAILURE;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_put() - do a put opperation on the device
- *
- * A put opperation will allow a runtime suspend after a corresponding
- * get was done.  This api should be used when sending data.
- *
- * This api will return a failure if the hif module hasn't been initialized
- *
- * return: CDF_STATUS_SUCCESS if the put is performed
- */
-CDF_STATUS cdf_runtime_pm_put(void)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(CDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_put(ol_sc);
-
-	if (ret)
-		return CDF_STATUS_E_FAILURE;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
- * @lock: an opaque context for tracking
- *
- * The lock can only be acquired once per lock context and is tracked.
- *
- * return: CDF_STATUS_SUCCESS or failure code.
- */
-CDF_STATUS cdf_runtime_pm_prevent_suspend(cdf_runtime_lock_t lock)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(CDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_prevent_suspend(ol_sc, lock);
-
-	if (ret)
-		return CDF_STATUS_E_FAILURE;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
- * @lock: an opaque context for tracking
- *
- * The lock can only be acquired once per lock context and is tracked.
- *
- * return: CDF_STATUS_SUCCESS or failure code.
- */
-CDF_STATUS cdf_runtime_pm_allow_suspend(cdf_runtime_lock_t lock)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(CDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_allow_suspend(ol_sc, lock);
-
-	if (ret)
-		return CDF_STATUS_E_FAILURE;
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_lock_init() - initialize runtime lock
- * @name: name of the runtime lock
- *
- * Initialize a runtime pm lock.  This lock can be used
- * to prevent the runtime pm system from putting the bus
- * to sleep.
- *
- * Return: runtime_pm_lock_t
- */
-cdf_runtime_lock_t cdf_runtime_lock_init(const char *name)
-{
-	return hif_runtime_lock_init(name);
-}
-
-/**
- * cdf_runtime_lock_deinit() - deinitialize runtime pm lock
- * @lock: the lock to deinitialize
- *
- * Ensures the lock is released. Frees the runtime lock.
- *
- * Return: void
- */
-void cdf_runtime_lock_deinit(cdf_runtime_lock_t lock)
-{
-	hif_runtime_lock_deinit(lock);
-}

+ 0 - 797
qdf/src/qdf_mc_timer.c

@@ -1,797 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- *  DOC: cdf_mc_timer
- *
- *  Connectivity driver framework timer APIs serialized to MC thread
- */
-
-/* Include Files */
-#include <cdf_mc_timer.h>
-#include <cdf_lock.h>
-#include <cds_api.h>
-#include "wlan_qct_sys.h"
-#include "cds_sched.h"
-
-/* Preprocessor definitions and constants */
-
-#define LINUX_TIMER_COOKIE 0x12341234
-#define LINUX_INVALID_TIMER_COOKIE 0xfeedface
-#define TMR_INVALID_ID (0)
-
-/* Type declarations */
-
-/* Static Variable Definitions */
-static unsigned int persistent_timer_count;
-static cdf_mutex_t persistent_timer_count_lock;
-
-/* Function declarations and documenation */
-
-/**
- * try_allowing_sleep() - clean up timer states after it has been deactivated
- * @type: Timer type
- *
- * Clean up timer states after it has been deactivated check and try to allow
- * sleep after a timer has been stopped or expired.
- *
- * Return: none
- */
-static void try_allowing_sleep(CDF_TIMER_TYPE type)
-{
-	if (CDF_TIMER_TYPE_WAKE_APPS == type) {
-		/* cdf_mutex_acquire(&persistent_timer_count_lock); */
-		persistent_timer_count--;
-		if (0 == persistent_timer_count) {
-			/* since the number of persistent timers has
-			   decreased from 1 to 0, the timer should allow
-			   sleep sleep_assert_okts( sleepClientHandle ); */
-		}
-		/* cdf_mutex_release(&persistent_timer_count_lock); */
-	}
-}
-
-/**
- * cdf_linux_timer_callback() - internal cdf entry point which is
- *				called when the timer interval expires
- * @data: pointer to the timer control block which describes the
- *	timer that expired
- *
- * This function in turn calls the CDF client callback and changes the
- * state of the timer from running (ACTIVE) to expired (INIT).
- *
- * Note: function signature is defined by the Linux kernel.  The fact
- * that the argument is "unsigned long" instead of "void *" is
- * unfortunately imposed upon us.  But we can safely pass a pointer via
- * this parameter for LP32 and LP64 architectures.
- *
- *  Return: nothing
- */
-
-static void cdf_linux_timer_callback(unsigned long data)
-{
-	cdf_mc_timer_t *timer = (cdf_mc_timer_t *) data;
-	cds_msg_t msg;
-	CDF_STATUS vStatus;
-	unsigned long flags;
-
-	cdf_mc_timer_callback_t callback = NULL;
-	void *userData = NULL;
-	int threadId;
-	CDF_TIMER_TYPE type = CDF_TIMER_TYPE_SW;
-
-	CDF_ASSERT(timer);
-
-	if (timer == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s Null pointer passed in!", __func__);
-		return;
-	}
-
-	threadId = timer->platformInfo.threadID;
-	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
-
-	switch (timer->state) {
-	case CDF_TIMER_STATE_STARTING:
-		/* we are in this state because someone just started the timer,
-		 * MC timer got started and expired, but the time content have
-		 * not been updated this is a rare race condition!
-		 */
-		timer->state = CDF_TIMER_STATE_STOPPED;
-		vStatus = CDF_STATUS_E_ALREADY;
-		break;
-
-	case CDF_TIMER_STATE_STOPPED:
-		vStatus = CDF_STATUS_E_ALREADY;
-		break;
-
-	case CDF_TIMER_STATE_UNUSED:
-		vStatus = CDF_STATUS_E_EXISTS;
-		break;
-
-	case CDF_TIMER_STATE_RUNNING:
-		/* need to go to stop state here because the call-back function
-		 * may restart timer (to emulate periodic timer)
-		 */
-		timer->state = CDF_TIMER_STATE_STOPPED;
-		/* copy the relevant timer information to local variables;
-		 * once we exist from this critical section, the timer content
-		 * may be modified by other tasks
-		 */
-		callback = timer->callback;
-		userData = timer->userData;
-		threadId = timer->platformInfo.threadID;
-		type = timer->type;
-		vStatus = CDF_STATUS_SUCCESS;
-		break;
-
-	default:
-		CDF_ASSERT(0);
-		vStatus = CDF_STATUS_E_FAULT;
-		break;
-	}
-
-	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-
-	if (CDF_STATUS_SUCCESS != vStatus) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "TIMER callback called in a wrong state=%d",
-			  timer->state);
-		return;
-	}
-
-	try_allowing_sleep(type);
-
-	if (callback == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: No TIMER callback, Could not enqueue timer to any queue",
-			  __func__);
-		return;
-	}
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-		  "TIMER callback: running on MC thread");
-
-	/* serialize to the MC thread */
-	sys_build_message_header(SYS_MSG_ID_MC_TIMER, &msg);
-	msg.callback = callback;
-	msg.bodyptr = userData;
-	msg.bodyval = 0;
-
-	if (cds_mq_post_message(CDS_MQ_ID_SYS, &msg) == CDF_STATUS_SUCCESS)
-		return;
-
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-		  "%s: Could not enqueue timer to any queue", __func__);
-	CDF_ASSERT(0);
-}
-
-/**
- * cdf_mc_timer_get_current_state() - get the current state of the timer
- * @pTimer: Pointer to timer object
- *
- * Return:
- *	CDF_TIMER_STATE - cdf timer state
- */
-CDF_TIMER_STATE cdf_mc_timer_get_current_state(cdf_mc_timer_t *pTimer)
-{
-	if (NULL == pTimer) {
-		CDF_ASSERT(0);
-		return CDF_TIMER_STATE_UNUSED;
-	}
-
-	switch (pTimer->state) {
-	case CDF_TIMER_STATE_STOPPED:
-	case CDF_TIMER_STATE_STARTING:
-	case CDF_TIMER_STATE_RUNNING:
-	case CDF_TIMER_STATE_UNUSED:
-		return pTimer->state;
-	default:
-		CDF_ASSERT(0);
-		return CDF_TIMER_STATE_UNUSED;
-	}
-}
-
-/**
- * cdf_timer_module_init() - initializes a CDF timer module.
- *
- * This API initializes the CDF timer module. This needs to be called
- * exactly once prior to using any CDF timers.
- *
- * Return: none
- */
-void cdf_timer_module_init(void)
-{
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
-		  "Initializing the CDF timer module");
-	cdf_mutex_init(&persistent_timer_count_lock);
-}
-
-#ifdef TIMER_MANAGER
-
-cdf_list_t cdf_timer_list;
-cdf_spinlock_t cdf_timer_list_lock;
-
-static void cdf_timer_clean(void);
-
-/**
- * cdf_mc_timer_manager_init() - initialize CDF debug timer manager
- *
- * This API initializes CDF timer debug functionality.
- *
- * Return: none
- */
-void cdf_mc_timer_manager_init(void)
-{
-	cdf_list_init(&cdf_timer_list, 1000);
-	cdf_spinlock_init(&cdf_timer_list_lock);
-	return;
-}
-
-/**
- * cdf_timer_clean() - clean up CDF timer debug functionality
- *
- * This API cleans up CDF timer debug functionality and prints which CDF timers
- * are leaked. This is called during driver unload.
- *
- * Return: none
- */
-static void cdf_timer_clean(void)
-{
-	uint32_t listSize;
-
-	cdf_list_size(&cdf_timer_list, &listSize);
-
-	if (listSize) {
-		cdf_list_node_t *pNode;
-		CDF_STATUS cdf_status;
-
-		cdf_mc_timer_node_t *ptimerNode;
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: List is not Empty. listSize %d ",
-			  __func__, (int)listSize);
-
-		do {
-			cdf_spin_lock_irqsave(&cdf_timer_list_lock);
-			cdf_status =
-				cdf_list_remove_front(&cdf_timer_list, &pNode);
-			cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
-			if (CDF_STATUS_SUCCESS == cdf_status) {
-				ptimerNode = (cdf_mc_timer_node_t *) pNode;
-				CDF_TRACE(CDF_MODULE_ID_CDF,
-					  CDF_TRACE_LEVEL_FATAL,
-					  "Timer Leak@ File %s, @Line %d",
-					  ptimerNode->fileName,
-					  (int)ptimerNode->lineNum);
-				cdf_mem_free(ptimerNode);
-			}
-		} while (cdf_status == CDF_STATUS_SUCCESS);
-	}
-}
-
-/**
- * cdf_mc_timer_exit() - exit CDF timer debug functionality
- *
- * This API exists CDF timer debug functionality
- *
- * Return: none
- */
-void cdf_mc_timer_exit(void)
-{
-	cdf_timer_clean();
-	cdf_list_destroy(&cdf_timer_list);
-}
-#endif
-
-/**
- * cdf_mc_timer_init() - initialize a CDF timer
- * @pTimer:     Pointer to timer object
- * @timerType:  Type of timer
- * @callback:   Callback to be called after timer expiry
- * @serData:    User data which will be passed to callback function
- *
- * This API initializes a CDF Timer object.
- *
- * cdf_mc_timer_init() initializes a CDF Timer object.  A timer must be
- * initialized by calling cdf_mc_timer_initialize() before it may be used in
- * any other timer functions.
- *
- * Attempting to initialize timer that is already initialized results in
- * a failure. A destroyed timer object can be re-initialized with a call to
- * cdf_mc_timer_init().  The results of otherwise referencing the object
- * after it has been destroyed are undefined.
- *
- *  Calls to CDF timer functions to manipulate the timer such
- *  as cdf_mc_timer_set() will fail if the timer is not initialized or has
- *  been destroyed.  Therefore, don't use the timer after it has been
- *  destroyed until it has been re-initialized.
- *
- *  All callback will be executed within the CDS main thread unless it is
- *  initialized from the Tx thread flow, in which case it will be executed
- *  within the tx thread flow.
- *
- * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
- */
-#ifdef TIMER_MANAGER
-CDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
-				   CDF_TIMER_TYPE timerType,
-				   cdf_mc_timer_callback_t callback,
-				   void *userData, char *fileName,
-				   uint32_t lineNum)
-{
-	CDF_STATUS cdf_status;
-
-	/* check for invalid pointer */
-	if ((timer == NULL) || (callback == NULL)) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Null params being passed", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	timer->ptimerNode = cdf_mem_malloc(sizeof(cdf_mc_timer_node_t));
-
-	if (timer->ptimerNode == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Not able to allocate memory for timeNode",
-			  __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_NOMEM;
-	}
-
-	cdf_mem_set(timer->ptimerNode, sizeof(cdf_mc_timer_node_t), 0);
-
-	timer->ptimerNode->fileName = fileName;
-	timer->ptimerNode->lineNum = lineNum;
-	timer->ptimerNode->cdf_timer = timer;
-
-	cdf_spin_lock_irqsave(&cdf_timer_list_lock);
-	cdf_status = cdf_list_insert_front(&cdf_timer_list,
-					   &timer->ptimerNode->pNode);
-	cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
-	if (CDF_STATUS_SUCCESS != cdf_status) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Unable to insert node into List cdf_status %d",
-			  __func__, cdf_status);
-	}
-
-	/* set the various members of the timer structure
-	 * with arguments passed or with default values
-	 */
-	spin_lock_init(&timer->platformInfo.spinlock);
-	if (CDF_TIMER_TYPE_SW == timerType)
-		init_timer_deferrable(&(timer->platformInfo.Timer));
-	else
-		init_timer(&(timer->platformInfo.Timer));
-	timer->platformInfo.Timer.function = cdf_linux_timer_callback;
-	timer->platformInfo.Timer.data = (unsigned long)timer;
-	timer->callback = callback;
-	timer->userData = userData;
-	timer->type = timerType;
-	timer->platformInfo.cookie = LINUX_TIMER_COOKIE;
-	timer->platformInfo.threadID = 0;
-	timer->state = CDF_TIMER_STATE_STOPPED;
-
-	return CDF_STATUS_SUCCESS;
-}
-#else
-CDF_STATUS cdf_mc_timer_init(cdf_mc_timer_t *timer, CDF_TIMER_TYPE timerType,
-			     cdf_mc_timer_callback_t callback,
-			     void *userData)
-{
-	/* check for invalid pointer */
-	if ((timer == NULL) || (callback == NULL)) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Null params being passed", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* set the various members of the timer structure
-	 * with arguments passed or with default values
-	 */
-	spin_lock_init(&timer->platformInfo.spinlock);
-	if (CDF_TIMER_TYPE_SW == timerType)
-		init_timer_deferrable(&(timer->platformInfo.Timer));
-	else
-		init_timer(&(timer->platformInfo.Timer));
-	timer->platformInfo.Timer.function = cdf_linux_timer_callback;
-	timer->platformInfo.Timer.data = (unsigned long)timer;
-	timer->callback = callback;
-	timer->userData = userData;
-	timer->type = timerType;
-	timer->platformInfo.cookie = LINUX_TIMER_COOKIE;
-	timer->platformInfo.threadID = 0;
-	timer->state = CDF_TIMER_STATE_STOPPED;
-
-	return CDF_STATUS_SUCCESS;
-}
-#endif
-
-/**
- * cdf_mc_timer_destroy() - destroy CDF timer
- * @timer: Pointer to timer object
- *
- * cdf_mc_timer_destroy() function shall destroy the timer object.
- * After a successful return from \a cdf_mc_timer_destroy() the timer
- * object becomes, in effect, uninitialized.
- *
- * A destroyed timer object can be re-initialized by calling
- * cdf_mc_timer_init().  The results of otherwise referencing the object
- * after it has been destroyed are undefined.
- *
- * Calls to CDF timer functions to manipulate the timer, such
- * as cdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
- * don't use the timer after it has been destroyed until it has
- * been re-initialized.
- *
- * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
- */
-#ifdef TIMER_MANAGER
-CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
-{
-	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
-	unsigned long flags;
-
-	/* check for invalid pointer */
-	if (NULL == timer) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Null timer pointer being passed", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* Check if timer refers to an uninitialized object */
-	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Cannot destroy uninitialized timer", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	cdf_spin_lock_irqsave(&cdf_timer_list_lock);
-	vStatus = cdf_list_remove_node(&cdf_timer_list,
-				       &timer->ptimerNode->pNode);
-	cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
-	if (vStatus != CDF_STATUS_SUCCESS) {
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-	cdf_mem_free(timer->ptimerNode);
-
-	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
-
-	switch (timer->state) {
-
-	case CDF_TIMER_STATE_STARTING:
-		vStatus = CDF_STATUS_E_BUSY;
-		break;
-
-	case CDF_TIMER_STATE_RUNNING:
-		/* Stop the timer first */
-		del_timer(&(timer->platformInfo.Timer));
-		vStatus = CDF_STATUS_SUCCESS;
-		break;
-	case CDF_TIMER_STATE_STOPPED:
-		vStatus = CDF_STATUS_SUCCESS;
-		break;
-
-	case CDF_TIMER_STATE_UNUSED:
-		vStatus = CDF_STATUS_E_ALREADY;
-		break;
-
-	default:
-		vStatus = CDF_STATUS_E_FAULT;
-		break;
-	}
-
-	if (CDF_STATUS_SUCCESS == vStatus) {
-		timer->platformInfo.cookie = LINUX_INVALID_TIMER_COOKIE;
-		timer->state = CDF_TIMER_STATE_UNUSED;
-		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-		return vStatus;
-	}
-
-	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-		  "%s: Cannot destroy timer in state = %d", __func__,
-		  timer->state);
-	CDF_ASSERT(0);
-
-	return vStatus;
-}
-
-#else
-
-/**
- * cdf_mc_timer_destroy() - destroy CDF timer
- * @timer: Pointer to timer object
- *
- * cdf_mc_timer_destroy() function shall destroy the timer object.
- * After a successful return from \a cdf_mc_timer_destroy() the timer
- * object becomes, in effect, uninitialized.
- *
- * A destroyed timer object can be re-initialized by calling
- * cdf_mc_timer_init().  The results of otherwise referencing the object
- * after it has been destroyed are undefined.
- *
- * Calls to CDF timer functions to manipulate the timer, such
- * as cdf_mc_timer_set() will fail if the lock is destroyed.  Therefore,
- * don't use the timer after it has been destroyed until it has
- * been re-initialized.
- *
- * Return:
- *      CDF_STATUS_SUCCESS - Timer is initialized successfully
- *      CDF failure status - Timer initialization failed
- */
-CDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
-{
-	CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
-	unsigned long flags;
-
-	/* check for invalid pointer */
-	if (NULL == timer) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Null timer pointer being passed", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_FAULT;
-	}
-
-	/* check if timer refers to an uninitialized object */
-	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Cannot destroy uninitialized timer", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
-
-	switch (timer->state) {
-
-	case CDF_TIMER_STATE_STARTING:
-		vStatus = CDF_STATUS_E_BUSY;
-		break;
-
-	case CDF_TIMER_STATE_RUNNING:
-		/* Stop the timer first */
-		del_timer(&(timer->platformInfo.Timer));
-		vStatus = CDF_STATUS_SUCCESS;
-		break;
-
-	case CDF_TIMER_STATE_STOPPED:
-		vStatus = CDF_STATUS_SUCCESS;
-		break;
-
-	case CDF_TIMER_STATE_UNUSED:
-		vStatus = CDF_STATUS_E_ALREADY;
-		break;
-
-	default:
-		vStatus = CDF_STATUS_E_FAULT;
-		break;
-	}
-
-	if (CDF_STATUS_SUCCESS == vStatus) {
-		timer->platformInfo.cookie = LINUX_INVALID_TIMER_COOKIE;
-		timer->state = CDF_TIMER_STATE_UNUSED;
-		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-		return vStatus;
-	}
-
-	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-		  "%s: Cannot destroy timer in state = %d", __func__,
-		  timer->state);
-	CDF_ASSERT(0);
-
-	return vStatus;
-}
-#endif
-
-/**
- * cdf_mc_timer_start() - start a CDF Timer object
- * @timer:      Pointer to timer object
- * @expirationTime:     Time to expire
- *
- * cdf_mc_timer_start() function starts a timer to expire after the
- * specified interval, thus running the timer callback function when
- * the interval expires.
- *
- * A timer only runs once (a one-shot timer).  To re-start the
- * timer, cdf_mc_timer_start() has to be called after the timer runs
- * or has been cancelled.
- *
- * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
- */
-CDF_STATUS cdf_mc_timer_start(cdf_mc_timer_t *timer, uint32_t expirationTime)
-{
-	unsigned long flags;
-
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
-		  "Timer Addr inside cds_enable : 0x%p ", timer);
-
-	/* check for invalid pointer */
-	if (NULL == timer) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s Null timer pointer being passed", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	/* check if timer refers to an uninitialized object */
-	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Cannot start uninitialized timer", __func__);
-		CDF_ASSERT(0);
-
-		return CDF_STATUS_E_INVAL;
-	}
-
-	/* check if timer has expiration time less than 10 ms */
-	if (expirationTime < 10) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Cannot start a timer with expiration less than 10 ms",
-			  __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	/* make sure the remainer of the logic isn't interrupted */
-	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
-
-	/* ensure if the timer can be started */
-	if (CDF_TIMER_STATE_STOPPED != timer->state) {
-		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
-			  "%s: Cannot start timer in state = %d ", __func__,
-			  timer->state);
-		return CDF_STATUS_E_ALREADY;
-	}
-
-	/* start the timer */
-	mod_timer(&(timer->platformInfo.Timer),
-		  jiffies + msecs_to_jiffies(expirationTime));
-
-	timer->state = CDF_TIMER_STATE_RUNNING;
-
-	/* get the thread ID on which the timer is being started */
-	timer->platformInfo.threadID = current->pid;
-
-	if (CDF_TIMER_TYPE_WAKE_APPS == timer->type) {
-		persistent_timer_count++;
-		if (1 == persistent_timer_count) {
-			/* since we now have one persistent timer,
-			 * we need to disallow sleep
-			 * sleep_negate_okts(sleepClientHandle);
-			 */
-		}
-	}
-
-	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_mc_timer_stop() - stop a CDF Timer
- * @timer:      Pointer to timer object
- * cdf_mc_timer_stop() function stops a timer that has been started but
- * has not expired, essentially cancelling the 'start' request.
- *
- * After a timer is stopped, it goes back to the state it was in after it
- * was created and can be started again via a call to cdf_mc_timer_start().
- *
- * Return:
- *	CDF_STATUS_SUCCESS - Timer is initialized successfully
- *	CDF failure status - Timer initialization failed
- */
-CDF_STATUS cdf_mc_timer_stop(cdf_mc_timer_t *timer)
-{
-	unsigned long flags;
-
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
-		  "%s: Timer Addr inside cds_disable : 0x%p", __func__, timer);
-
-	/* check for invalid pointer */
-	if (NULL == timer) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s Null timer pointer being passed", __func__);
-		CDF_ASSERT(0);
-		return CDF_STATUS_E_INVAL;
-	}
-
-	/* check if timer refers to an uninitialized object */
-	if (LINUX_TIMER_COOKIE != timer->platformInfo.cookie) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Cannot stop uninitialized timer", __func__);
-		CDF_ASSERT(0);
-
-		return CDF_STATUS_E_INVAL;
-	}
-
-	/* ensure the timer state is correct */
-	spin_lock_irqsave(&timer->platformInfo.spinlock, flags);
-
-	if (CDF_TIMER_STATE_RUNNING != timer->state) {
-		spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
-			  "%s: Cannot stop timer in state = %d",
-			  __func__, timer->state);
-		return CDF_STATUS_SUCCESS;
-	}
-
-	timer->state = CDF_TIMER_STATE_STOPPED;
-
-	del_timer(&(timer->platformInfo.Timer));
-
-	spin_unlock_irqrestore(&timer->platformInfo.spinlock, flags);
-
-	try_allowing_sleep(timer->type);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_mc_timer_get_system_ticks() - get the system time in 10ms ticks
-
- * cdf_mc_timer_get_system_ticks() function returns the current number
- * of timer ticks in 10msec intervals. This function is suitable timestamping
- * and calculating time intervals by calculating the difference between two
- * timestamps.
- *
- * Return:
- *	The current system tick count (in 10msec intervals).  This
- *	function cannot fail.
- */
-v_TIME_t cdf_mc_timer_get_system_ticks(void)
-{
-	return jiffies_to_msecs(jiffies) / 10;
-}
-
-/**
- * cdf_mc_timer_get_system_time() - Get the system time in milliseconds
- *
- * cdf_mc_timer_get_system_time() function returns the number of milliseconds
- * that have elapsed since the system was started
- *
- * Return:
- *	The current system time in milliseconds
- */
-v_TIME_t cdf_mc_timer_get_system_time(void)
-{
-	struct timeval tv;
-	do_gettimeofday(&tv);
-	return tv.tv_sec * 1000 + tv.tv_usec / 1000;
-}

+ 0 - 861
qdf/src/qdf_memory.c

@@ -1,861 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC:  cdf_memory
- *
- * Connectivity driver framework (CDF) memory management APIs
- */
-
-/* Include Files */
-#include "cdf_memory.h"
-#include "cdf_nbuf.h"
-#include "cdf_trace.h"
-#include "cdf_lock.h"
-#include "cdf_mc_timer.h"
-
-#if defined(CONFIG_CNSS)
-#include <net/cnss.h>
-#endif
-
-#ifdef CONFIG_WCNSS_MEM_PRE_ALLOC
-#include <net/cnss_prealloc.h>
-#endif
-
-#ifdef MEMORY_DEBUG
-#include <cdf_list.h>
-#include <linux/stacktrace.h>
-
-cdf_list_t cdf_mem_list;
-cdf_spinlock_t cdf_mem_list_lock;
-
-static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
-					0x67, 0x68 };
-static uint8_t WLAN_MEM_TAIL[] = { 0x80, 0x81, 0x82, 0x83, 0x84, 0x85,
-					0x86, 0x87 };
-
-#define CDF_MEM_MAX_STACK_TRACE 16
-
-struct s_cdf_mem_struct {
-	cdf_list_node_t pNode;
-	char *fileName;
-	unsigned int lineNum;
-	unsigned int size;
-#ifdef WLAN_OPEN_SOURCE
-	unsigned long stack_trace[CDF_MEM_MAX_STACK_TRACE];
-	struct stack_trace trace;
-#endif
-	uint8_t header[8];
-};
-#endif
-
-/* Preprocessor Definitions and Constants */
-#define CDF_GET_MEMORY_TIME_THRESHOLD 3000
-
-/* Type Declarations */
-
-/* Data definitions */
-
-/* External Function implementation */
-#ifdef MEMORY_DEBUG
-#ifdef WLAN_OPEN_SOURCE
-/**
- * cdf_mem_save_stack_trace() - Save stack trace of the caller
- * @mem_struct: Pointer to the memory structure where to save the stack trace
- *
- * Return: None
- */
-static inline void cdf_mem_save_stack_trace(struct s_cdf_mem_struct *mem_struct)
-{
-	struct stack_trace *trace = &mem_struct->trace;
-
-	trace->nr_entries = 0;
-	trace->max_entries = CDF_MEM_MAX_STACK_TRACE;
-	trace->entries = mem_struct->stack_trace;
-	trace->skip = 2;
-
-	save_stack_trace(trace);
-}
-
-/**
- * cdf_mem_print_stack_trace() - Print saved stack trace
- * @mem_struct: Pointer to the memory structure which has the saved stack trace
- *              to be printed
- *
- * Return: None
- */
-static inline void cdf_mem_print_stack_trace(struct s_cdf_mem_struct
-					     *mem_struct)
-{
-	CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
-		  "Call stack for the source of leaked memory:");
-
-	print_stack_trace(&mem_struct->trace, 1);
-}
-#else
-static inline void cdf_mem_save_stack_trace(struct s_cdf_mem_struct *mem_struct)
-{
-
-}
-static inline void cdf_mem_print_stack_trace(struct s_cdf_mem_struct
-					     *mem_struct)
-{
-
-}
-#endif
-
-/**
- * cdf_mem_init() - initialize cdf memory debug functionality
- *
- * Return: none
- */
-void cdf_mem_init(void)
-{
-	/* Initalizing the list with maximum size of 60000 */
-	cdf_list_init(&cdf_mem_list, 60000);
-	cdf_spinlock_init(&cdf_mem_list_lock);
-	cdf_net_buf_debug_init();
-	return;
-}
-
-/**
- * cdf_mem_clean() - display memory leak debug info and free leaked pointers
- *
- * Return: none
- */
-void cdf_mem_clean(void)
-{
-	uint32_t listSize;
-	cdf_list_size(&cdf_mem_list, &listSize);
-
-	cdf_net_buf_debug_clean();
-
-	if (listSize) {
-		cdf_list_node_t *pNode;
-		CDF_STATUS cdf_status;
-
-		struct s_cdf_mem_struct *memStruct;
-		char *prev_mleak_file = "";
-		unsigned int prev_mleak_lineNum = 0;
-		unsigned int prev_mleak_sz = 0;
-		unsigned int mleak_cnt = 0;
-
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: List is not Empty. listSize %d ",
-			  __func__, (int)listSize);
-
-		do {
-			cdf_spin_lock(&cdf_mem_list_lock);
-			cdf_status =
-				cdf_list_remove_front(&cdf_mem_list, &pNode);
-			cdf_spin_unlock(&cdf_mem_list_lock);
-			if (CDF_STATUS_SUCCESS == cdf_status) {
-				memStruct = (struct s_cdf_mem_struct *)pNode;
-				/* Take care to log only once multiple memory
-				   leaks from the same place */
-				if (strcmp(prev_mleak_file, memStruct->fileName)
-				    || (prev_mleak_lineNum !=
-					memStruct->lineNum)
-				    || (prev_mleak_sz != memStruct->size)) {
-					if (mleak_cnt != 0) {
-						CDF_TRACE(CDF_MODULE_ID_CDF,
-							  CDF_TRACE_LEVEL_FATAL,
-							  "%d Time Memory Leak@ File %s, @Line %d, size %d",
-							  mleak_cnt,
-							  prev_mleak_file,
-							  prev_mleak_lineNum,
-							  prev_mleak_sz);
-					}
-					prev_mleak_file = memStruct->fileName;
-					prev_mleak_lineNum = memStruct->lineNum;
-					prev_mleak_sz = memStruct->size;
-					mleak_cnt = 0;
-				}
-				mleak_cnt++;
-				cdf_mem_print_stack_trace(memStruct);
-				kfree((void *)memStruct);
-			}
-		} while (cdf_status == CDF_STATUS_SUCCESS);
-
-		/* Print last memory leak from the module */
-		if (mleak_cnt) {
-			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
-				  "%d Time memory Leak@ File %s, @Line %d, size %d",
-				  mleak_cnt, prev_mleak_file,
-				  prev_mleak_lineNum, prev_mleak_sz);
-		}
-#ifdef CONFIG_HALT_KMEMLEAK
-		BUG_ON(0);
-#endif
-	}
-}
-
-/**
- * cdf_mem_exit() - exit cdf memory debug functionality
- *
- * Return: none
- */
-void cdf_mem_exit(void)
-{
-	cdf_net_buf_debug_exit();
-	cdf_mem_clean();
-	cdf_list_destroy(&cdf_mem_list);
-}
-
-/**
- * cdf_mem_malloc_debug() - debug version of CDF memory allocation API
- * @size: Number of bytes of memory to allocate.
- * @fileName: File name from which memory allocation is called
- * @lineNum: Line number from which memory allocation is called
- *
- * This function will dynamicallly allocate the specified number of bytes of
- * memory and ad it in cdf tracking list to check against memory leaks and
- * corruptions
- *
- *
- * Return:
- *      Upon successful allocate, returns a non-NULL pointer to the allocated
- *      memory.  If this function is unable to allocate the amount of memory
- *      specified (for any reason) it returns %NULL.
- *
- */
-void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum)
-{
-	struct s_cdf_mem_struct *memStruct;
-	void *memPtr = NULL;
-	uint32_t new_size;
-	int flags = GFP_KERNEL;
-	unsigned long  time_before_kzalloc;
-
-	if (size > (1024 * 1024) || size == 0) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: called with invalid arg; passed in %zu !!!",
-			  __func__, size);
-		return NULL;
-	}
-
-#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
-	if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
-		void *pmem;
-		pmem = wcnss_prealloc_get(size);
-		if (NULL != pmem) {
-			memset(pmem, 0, size);
-			return pmem;
-		}
-	}
-#endif
-
-	if (in_interrupt() || irqs_disabled() || in_atomic())
-		flags = GFP_ATOMIC;
-
-	new_size = size + sizeof(struct s_cdf_mem_struct) + 8;
-	time_before_kzalloc = cdf_mc_timer_get_system_time();
-	memStruct = (struct s_cdf_mem_struct *)kzalloc(new_size, flags);
-	/**
-	 * If time taken by kmalloc is greater than
-	 * CDF_GET_MEMORY_TIME_THRESHOLD msec
-	 */
-	if (cdf_mc_timer_get_system_time() - time_before_kzalloc >=
-					  CDF_GET_MEMORY_TIME_THRESHOLD)
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			 "%s: kzalloc took %lu msec for size %zu called from %pS at line %d",
-			 __func__,
-			 cdf_mc_timer_get_system_time() - time_before_kzalloc,
-			 size, (void *)_RET_IP_, lineNum);
-
-	if (memStruct != NULL) {
-		CDF_STATUS cdf_status;
-
-		memStruct->fileName = fileName;
-		memStruct->lineNum = lineNum;
-		memStruct->size = size;
-		cdf_mem_save_stack_trace(memStruct);
-
-		cdf_mem_copy(&memStruct->header[0],
-			     &WLAN_MEM_HEADER[0], sizeof(WLAN_MEM_HEADER));
-
-		cdf_mem_copy((uint8_t *) (memStruct + 1) + size,
-			     &WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
-
-		cdf_spin_lock_irqsave(&cdf_mem_list_lock);
-		cdf_status = cdf_list_insert_front(&cdf_mem_list,
-						   &memStruct->pNode);
-		cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
-		if (CDF_STATUS_SUCCESS != cdf_status) {
-			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-				  "%s: Unable to insert node into List cdf_status %d",
-				  __func__, cdf_status);
-		}
-
-		memPtr = (void *)(memStruct + 1);
-	}
-	return memPtr;
-}
-
-/**
- *  cdf_mem_free() - debug version of CDF memory free API
- *  @ptr: Pointer to the starting address of the memory to be free'd.
- *
- *  This function will free the memory pointed to by 'ptr'. It also checks
- *  is memory is corrupted or getting double freed and panic.
- *
- *  Return:
- *       Nothing
- */
-void cdf_mem_free(void *ptr)
-{
-	if (ptr != NULL) {
-		CDF_STATUS cdf_status;
-		struct s_cdf_mem_struct *memStruct =
-			((struct s_cdf_mem_struct *)ptr) - 1;
-
-#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
-		if (wcnss_prealloc_put(ptr))
-			return;
-#endif
-
-		cdf_spin_lock_irqsave(&cdf_mem_list_lock);
-		cdf_status =
-			cdf_list_remove_node(&cdf_mem_list, &memStruct->pNode);
-		cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
-
-		if (CDF_STATUS_SUCCESS == cdf_status) {
-			if (0 == cdf_mem_compare(memStruct->header,
-						 &WLAN_MEM_HEADER[0],
-						 sizeof(WLAN_MEM_HEADER))) {
-				CDF_TRACE(CDF_MODULE_ID_CDF,
-					  CDF_TRACE_LEVEL_FATAL,
-					  "Memory Header is corrupted. MemInfo: Filename %s, LineNum %d",
-					  memStruct->fileName,
-					  (int)memStruct->lineNum);
-				CDF_BUG(0);
-			}
-			if (0 ==
-			    cdf_mem_compare((uint8_t *) ptr + memStruct->size,
-					    &WLAN_MEM_TAIL[0],
-					    sizeof(WLAN_MEM_TAIL))) {
-				CDF_TRACE(CDF_MODULE_ID_CDF,
-					  CDF_TRACE_LEVEL_FATAL,
-					  "Memory Trailer is corrupted. MemInfo: Filename %s, LineNum %d",
-					  memStruct->fileName,
-					  (int)memStruct->lineNum);
-				CDF_BUG(0);
-			}
-			kfree((void *)memStruct);
-		} else {
-			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
-				  "%s: Unallocated memory (double free?)",
-				  __func__);
-			CDF_BUG(0);
-		}
-	}
-}
-#else
-/**
- * cdf_mem_malloc() - allocation CDF memory
- * @size: Number of bytes of memory to allocate.
- *
- * This function will dynamicallly allocate the specified number of bytes of
- * memory.
- *
- *
- * Return:
- *	Upon successful allocate, returns a non-NULL pointer to the allocated
- *	memory.  If this function is unable to allocate the amount of memory
- *	specified (for any reason) it returns %NULL.
- *
- */
-void *cdf_mem_malloc(size_t size)
-{
-	int flags = GFP_KERNEL;
-	void *memPtr = NULL;
-	unsigned long  time_before_kzalloc;
-
-	if (size > (1024 * 1024) || size == 0) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: called with invalid arg; passed in %zu !!",
-			  __func__, size);
-		return NULL;
-	}
-
-#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
-	if (size > WCNSS_PRE_ALLOC_GET_THRESHOLD) {
-		void *pmem;
-		pmem = wcnss_prealloc_get(size);
-		if (NULL != pmem) {
-			memset(pmem, 0, size);
-			return pmem;
-		}
-	}
-#endif
-
-	if (in_interrupt() || irqs_disabled() || in_atomic())
-		flags = GFP_ATOMIC;
-	time_before_kzalloc = cdf_mc_timer_get_system_time();
-	memPtr = kzalloc(size, flags);
-	/**
-	 * If time taken by kmalloc is greater than
-	 * CDF_GET_MEMORY_TIME_THRESHOLD msec
-	 */
-	if (cdf_mc_timer_get_system_time() - time_before_kzalloc >=
-					   CDF_GET_MEMORY_TIME_THRESHOLD)
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			 "%s: kzalloc took %lu msec for size %zu called from %pS",
-			 __func__,
-			 cdf_mc_timer_get_system_time() - time_before_kzalloc,
-			 size, (void *)_RET_IP_);
-	return memPtr;
-}
-
-/**
- * cdf_mem_free() - free CDF memory
- * @ptr: Pointer to the starting address of the memory to be free'd.
- *
- * This function will free the memory pointed to by 'ptr'.
- *
- * Return:
- *	Nothing
- *
- */
-void cdf_mem_free(void *ptr)
-{
-	if (ptr == NULL)
-		return;
-
-#if defined(CONFIG_CNSS) && defined(CONFIG_WCNSS_MEM_PRE_ALLOC)
-	if (wcnss_prealloc_put(ptr))
-		return;
-#endif
-
-	kfree(ptr);
-}
-#endif
-
-/**
- * cdf_mem_multi_pages_alloc() - allocate large size of kernel memory
- * @osdev:		OS device handle pointer
- * @pages:		Multi page information storage
- * @element_size:	Each element size
- * @element_num:	Total number of elements should be allocated
- * @memctxt:		Memory context
- * @cacheable:		Coherent memory or cacheable memory
- *
- * This function will allocate large size of memory over multiple pages.
- * Large size of contiguous memory allocation will fail frequentely, then
- * instead of allocate large memory by one shot, allocate through multiple, non
- * contiguous memory and combine pages when actual usage
- *
- * Return: None
- */
-void cdf_mem_multi_pages_alloc(cdf_device_t osdev,
-				struct cdf_mem_multi_page_t *pages,
-				size_t element_size,
-				uint16_t element_num,
-				cdf_dma_context_t memctxt,
-				bool cacheable)
-{
-	uint16_t page_idx;
-	struct cdf_mem_dma_page_t *dma_pages;
-	void **cacheable_pages = NULL;
-	uint16_t i;
-
-	pages->num_element_per_page = PAGE_SIZE / element_size;
-	if (!pages->num_element_per_page) {
-		cdf_print("Invalid page %d or element size %d",
-			(int)PAGE_SIZE, (int)element_size);
-		goto out_fail;
-	}
-
-	pages->num_pages = element_num / pages->num_element_per_page;
-	if (element_num % pages->num_element_per_page)
-		pages->num_pages++;
-
-	if (cacheable) {
-		/* Pages information storage */
-		pages->cacheable_pages = cdf_mem_malloc(
-			pages->num_pages * sizeof(pages->cacheable_pages));
-		if (!pages->cacheable_pages) {
-			cdf_print("Cacheable page storage alloc fail");
-			goto out_fail;
-		}
-
-		cacheable_pages = pages->cacheable_pages;
-		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
-			cacheable_pages[page_idx] = cdf_mem_malloc(PAGE_SIZE);
-			if (!cacheable_pages[page_idx]) {
-				cdf_print("cacheable page alloc fail, pi %d",
-					page_idx);
-				goto page_alloc_fail;
-			}
-		}
-		pages->dma_pages = NULL;
-	} else {
-		pages->dma_pages = cdf_mem_malloc(
-			pages->num_pages * sizeof(struct cdf_mem_dma_page_t));
-		if (!pages->dma_pages) {
-			cdf_print("dmaable page storage alloc fail");
-			goto out_fail;
-		}
-
-		dma_pages = pages->dma_pages;
-		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
-			dma_pages->page_v_addr_start =
-				cdf_os_mem_alloc_consistent(osdev, PAGE_SIZE,
-					&dma_pages->page_p_addr, memctxt);
-			if (!dma_pages->page_v_addr_start) {
-				cdf_print("dmaable page alloc fail pi %d",
-					page_idx);
-				goto page_alloc_fail;
-			}
-			dma_pages->page_v_addr_end =
-				dma_pages->page_v_addr_start + PAGE_SIZE;
-			dma_pages++;
-		}
-		pages->cacheable_pages = NULL;
-	}
-	return;
-
-page_alloc_fail:
-	if (cacheable) {
-		for (i = 0; i < page_idx; i++)
-			cdf_mem_free(pages->cacheable_pages[i]);
-		cdf_mem_free(pages->cacheable_pages);
-	} else {
-		dma_pages = pages->dma_pages;
-		for (i = 0; i < page_idx; i++) {
-			cdf_os_mem_free_consistent(osdev, PAGE_SIZE,
-				dma_pages->page_v_addr_start,
-				dma_pages->page_p_addr, memctxt);
-			dma_pages++;
-		}
-		cdf_mem_free(pages->dma_pages);
-	}
-
-out_fail:
-	pages->cacheable_pages = NULL;
-	pages->dma_pages = NULL;
-	pages->num_pages = 0;
-	return;
-}
-
-/**
- * cdf_mem_multi_pages_free() - free large size of kernel memory
- * @osdev:	OS device handle pointer
- * @pages:	Multi page information storage
- * @memctxt:	Memory context
- * @cacheable:	Coherent memory or cacheable memory
- *
- * This function will free large size of memory over multiple pages.
- *
- * Return: None
- */
-void cdf_mem_multi_pages_free(cdf_device_t osdev,
-				struct cdf_mem_multi_page_t *pages,
-				cdf_dma_context_t memctxt,
-				bool cacheable)
-{
-	unsigned int page_idx;
-	struct cdf_mem_dma_page_t *dma_pages;
-
-	if (cacheable) {
-		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
-			cdf_mem_free(pages->cacheable_pages[page_idx]);
-		cdf_mem_free(pages->cacheable_pages);
-	} else {
-		dma_pages = pages->dma_pages;
-		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
-			cdf_os_mem_free_consistent(osdev, PAGE_SIZE,
-				dma_pages->page_v_addr_start,
-				dma_pages->page_p_addr, memctxt);
-			dma_pages++;
-		}
-		cdf_mem_free(pages->dma_pages);
-	}
-
-	pages->cacheable_pages = NULL;
-	pages->dma_pages = NULL;
-	pages->num_pages = 0;
-	return;
-}
-
-
-/**
- * cdf_mem_set() - set (fill) memory with a specified byte value.
- * @pMemory:    Pointer to memory that will be set
- * @numBytes:   Number of bytes to be set
- * @value:      Byte set in memory
- *
- * Return:
- *    Nothing
- *
- */
-void cdf_mem_set(void *ptr, uint32_t numBytes, uint32_t value)
-{
-	if (ptr == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s called with NULL parameter ptr", __func__);
-		return;
-	}
-	memset(ptr, value, numBytes);
-}
-
-/**
- * cdf_mem_zero() - zero out memory
- * @pMemory:    pointer to memory that will be set to zero
- * @numBytes:   number of bytes zero
- * @value:      byte set in memory
- *
- *  This function sets the memory location to all zeros, essentially clearing
- *  the memory.
- *
- * Return:
- *      Nothing
- *
- */
-void cdf_mem_zero(void *ptr, uint32_t numBytes)
-{
-	if (0 == numBytes) {
-		/* special case where ptr can be NULL */
-		return;
-	}
-
-	if (ptr == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s called with NULL parameter ptr", __func__);
-		return;
-	}
-	memset(ptr, 0, numBytes);
-}
-
-/**
- * cdf_mem_copy() - copy memory
- * @pDst:       Pointer to destination memory location (to copy to)
- * @pSrc:       Pointer to source memory location (to copy from)
- * @numBytes:   Number of bytes to copy.
- *
- * Copy host memory from one location to another, similar to memcpy in
- * standard C.  Note this function does not specifically handle overlapping
- * source and destination memory locations.  Calling this function with
- * overlapping source and destination memory locations will result in
- * unpredictable results.  Use cdf_mem_move() if the memory locations
- * for the source and destination are overlapping (or could be overlapping!)
- *
- * Return:
- *    Nothing
- *
- */
-void cdf_mem_copy(void *pDst, const void *pSrc, uint32_t numBytes)
-{
-	if (0 == numBytes) {
-		/* special case where pDst or pSrc can be NULL */
-		return;
-	}
-
-	if ((pDst == NULL) || (pSrc == NULL)) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s called with NULL parameter, source:%p destination:%p",
-			  __func__, pSrc, pDst);
-		CDF_ASSERT(0);
-		return;
-	}
-	memcpy(pDst, pSrc, numBytes);
-}
-
-/**
- * cdf_mem_move() - move memory
- * @pDst:       pointer to destination memory location (to move to)
- * @pSrc:       pointer to source memory location (to move from)
- * @numBytes:   number of bytes to move.
- *
- * Move host memory from one location to another, similar to memmove in
- * standard C.  Note this function *does* handle overlapping
- * source and destination memory locations.
-
- * Return:
- *      Nothing
- */
-void cdf_mem_move(void *pDst, const void *pSrc, uint32_t numBytes)
-{
-	if (0 == numBytes) {
-		/* special case where pDst or pSrc can be NULL */
-		return;
-	}
-
-	if ((pDst == NULL) || (pSrc == NULL)) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s called with NULL parameter, source:%p destination:%p",
-			  __func__, pSrc, pDst);
-		CDF_ASSERT(0);
-		return;
-	}
-	memmove(pDst, pSrc, numBytes);
-}
-
-/**
- * cdf_mem_compare() - memory compare
- * @pMemory1:   pointer to one location in memory to compare.
- * @pMemory2:   pointer to second location in memory to compare.
- * @numBytes:   the number of bytes to compare.
- *
- * Function to compare two pieces of memory, similar to memcmp function
- * in standard C.
- *
- * Return:
- *      bool - returns a bool value that tells if the memory locations
- *      are equal or not equal.
- *
- */
-bool cdf_mem_compare(const void *pMemory1, const void *pMemory2,
-		     uint32_t numBytes)
-{
-	if (0 == numBytes) {
-		/* special case where pMemory1 or pMemory2 can be NULL */
-		return true;
-	}
-
-	if ((pMemory1 == NULL) || (pMemory2 == NULL)) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s called with NULL parameter, p1:%p p2:%p",
-			  __func__, pMemory1, pMemory2);
-		CDF_ASSERT(0);
-		return false;
-	}
-	return memcmp(pMemory1, pMemory2, numBytes) ? false : true;
-}
-
-/**
- * cdf_mem_compare2() - memory compare
- * @pMemory1: pointer to one location in memory to compare.
- * @pMemory2:   pointer to second location in memory to compare.
- * @numBytes:   the number of bytes to compare.
- *
- * Function to compare two pieces of memory, similar to memcmp function
- * in standard C.
- * Return:
- *       int32_t - returns a bool value that tells if the memory
- *       locations are equal or not equal.
- *       0 -- equal
- *       < 0 -- *pMemory1 is less than *pMemory2
- *       > 0 -- *pMemory1 is bigger than *pMemory2
- */
-int32_t cdf_mem_compare2(const void *pMemory1, const void *pMemory2,
-			 uint32_t numBytes)
-{
-	return (int32_t) memcmp(pMemory1, pMemory2, numBytes);
-}
-
-/**
- * cdf_os_mem_alloc_consistent() - allocates consistent cdf memory
- * @osdev: OS device handle
- * @size: Size to be allocated
- * @paddr: Physical address
- * @mctx: Pointer to DMA context
- *
- * Return: pointer of allocated memory or null if memory alloc fails
- */
-inline void *cdf_os_mem_alloc_consistent(cdf_device_t osdev, cdf_size_t size,
-					 cdf_dma_addr_t *paddr,
-					 cdf_dma_context_t memctx)
-{
-#if defined(A_SIMOS_DEVHOST)
-	static int first = 1;
-	void *vaddr;
-
-	if (first) {
-		first = 0;
-		pr_err("Warning: bypassing %s\n", __func__);
-	}
-	vaddr = cdf_mem_malloc(size);
-	*paddr = ((cdf_dma_addr_t) vaddr);
-	return vaddr;
-#else
-	int flags = GFP_KERNEL;
-	void *alloc_mem = NULL;
-
-	if (in_interrupt() || irqs_disabled() || in_atomic())
-		flags = GFP_ATOMIC;
-
-	alloc_mem = dma_alloc_coherent(osdev->dev, size, paddr, flags);
-	if (alloc_mem == NULL)
-		pr_err("%s Warning: unable to alloc consistent memory of size %zu!\n",
-			__func__, size);
-	return alloc_mem;
-#endif
-}
-
-/**
- * cdf_os_mem_free_consistent() - free consistent cdf memory
- * @osdev: OS device handle
- * @size: Size to be allocated
- * @paddr: Physical address
- * @mctx: Pointer to DMA context
- *
- * Return: none
- */
-inline void
-cdf_os_mem_free_consistent(cdf_device_t osdev,
-			   cdf_size_t size,
-			   void *vaddr,
-			   cdf_dma_addr_t paddr, cdf_dma_context_t memctx)
-{
-#if defined(A_SIMOS_DEVHOST)
-	static int first = 1;
-
-	if (first) {
-		first = 0;
-		pr_err("Warning: bypassing %s\n", __func__);
-	}
-	cdf_mem_free(vaddr);
-	return;
-#else
-	dma_free_coherent(osdev->dev, size, vaddr, paddr);
-#endif
-}
-
-
-/**
- * cdf_os_mem_dma_sync_single_for_device() - assign memory to device
- * @osdev: OS device handle
- * @bus_addr: dma address to give to the device
- * @size: Size of the memory block
- * @direction: direction data will be dma'ed
- *
- * Assgin memory to the remote device.
- * The cache lines are flushed to ram or invalidated as needed.
- *
- * Return: none
- */
-
-inline void
-cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
-				      cdf_dma_addr_t bus_addr,
-				      cdf_size_t size,
-				      enum dma_data_direction direction)
-{
-	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
-}
-

+ 0 - 1017
qdf/src/qdf_nbuf.c

@@ -1,1017 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: cdf_nbuf.c
- *
- * Connectivity driver framework(CDF) network buffer management APIs
- */
-
-#include <linux/kernel.h>
-#include <linux/version.h>
-#include <linux/skbuff.h>
-#include <linux/module.h>
-#include <cdf_types.h>
-#include <cdf_nbuf.h>
-#include <cdf_memory.h>
-#include <cdf_trace.h>
-#include <cdf_status.h>
-#include <cdf_lock.h>
-
-#if defined(FEATURE_TSO)
-#include <net/ipv6.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <linux/ip.h>
-#endif /* FEATURE_TSO */
-
-/* Packet Counter */
-static uint32_t nbuf_tx_mgmt[NBUF_TX_PKT_STATE_MAX];
-static uint32_t nbuf_tx_data[NBUF_TX_PKT_STATE_MAX];
-
-/**
- * cdf_nbuf_tx_desc_count_display() - Displays the packet counter
- *
- * Return: none
- */
-void cdf_nbuf_tx_desc_count_display(void)
-{
-	cdf_print("Current Snapshot of the Driver:\n");
-	cdf_print("Data Packets:\n");
-	cdf_print("HDD %d TXRX_Q %d TXRX %d HTT %d",
-		nbuf_tx_data[NBUF_TX_PKT_HDD] -
-		(nbuf_tx_data[NBUF_TX_PKT_TXRX] +
-		nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
-		nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE]),
-		nbuf_tx_data[NBUF_TX_PKT_TXRX_ENQUEUE] -
-		nbuf_tx_data[NBUF_TX_PKT_TXRX_DEQUEUE],
-		nbuf_tx_data[NBUF_TX_PKT_TXRX] - nbuf_tx_data[NBUF_TX_PKT_HTT],
-		nbuf_tx_data[NBUF_TX_PKT_HTT]  - nbuf_tx_data[NBUF_TX_PKT_HTC]);
-	cdf_print(" HTC %d  HIF %d CE %d TX_COMP %d\n",
-		nbuf_tx_data[NBUF_TX_PKT_HTC]  - nbuf_tx_data[NBUF_TX_PKT_HIF],
-		nbuf_tx_data[NBUF_TX_PKT_HIF]  - nbuf_tx_data[NBUF_TX_PKT_CE],
-		nbuf_tx_data[NBUF_TX_PKT_CE]   - nbuf_tx_data[NBUF_TX_PKT_FREE],
-		nbuf_tx_data[NBUF_TX_PKT_FREE]);
-	cdf_print("Mgmt Packets:\n");
-	cdf_print("TXRX_Q %d TXRX %d HTT %d HTC %d HIF %d CE %d TX_COMP %d\n",
-		nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_ENQUEUE] -
-		nbuf_tx_mgmt[NBUF_TX_PKT_TXRX_DEQUEUE],
-		nbuf_tx_mgmt[NBUF_TX_PKT_TXRX] - nbuf_tx_mgmt[NBUF_TX_PKT_HTT],
-		nbuf_tx_mgmt[NBUF_TX_PKT_HTT]  - nbuf_tx_mgmt[NBUF_TX_PKT_HTC],
-		nbuf_tx_mgmt[NBUF_TX_PKT_HTC]  - nbuf_tx_mgmt[NBUF_TX_PKT_HIF],
-		nbuf_tx_mgmt[NBUF_TX_PKT_HIF]  - nbuf_tx_mgmt[NBUF_TX_PKT_CE],
-		nbuf_tx_mgmt[NBUF_TX_PKT_CE]   - nbuf_tx_mgmt[NBUF_TX_PKT_FREE],
-		nbuf_tx_mgmt[NBUF_TX_PKT_FREE]);
-}
-
-/**
- * cdf_nbuf_tx_desc_count_update() - Updates the layer packet counter
- * @packet_type   : packet type either mgmt/data
- * @current_state : layer at which the packet currently present
- *
- * Return: none
- */
-static inline void cdf_nbuf_tx_desc_count_update(uint8_t packet_type,
-							uint8_t current_state)
-{
-	switch (packet_type) {
-	case NBUF_TX_PKT_MGMT_TRACK:
-		nbuf_tx_mgmt[current_state]++;
-		break;
-	case NBUF_TX_PKT_DATA_TRACK:
-		nbuf_tx_data[current_state]++;
-		break;
-	default:
-		break;
-	}
-}
-
-/**
- * cdf_nbuf_tx_desc_count_clear() - Clears packet counter for both data, mgmt
- *
- * Return: none
- */
-void cdf_nbuf_tx_desc_count_clear(void)
-{
-	memset(nbuf_tx_mgmt, 0, sizeof(nbuf_tx_mgmt));
-	memset(nbuf_tx_data, 0, sizeof(nbuf_tx_data));
-}
-
-/**
- * cdf_nbuf_set_state() - Updates the packet state
- * @nbuf:            network buffer
- * @current_state :  layer at which the packet currently is
- *
- * This function updates the packet state to the layer at which the packet
- * currently is
- *
- * Return: none
- */
-void cdf_nbuf_set_state(cdf_nbuf_t nbuf, uint8_t current_state)
-{
-	/*
-	 * Only Mgmt, Data Packets are tracked. WMI messages
-	 * such as scan commands are not tracked
-	 */
-	uint8_t packet_type;
-	packet_type = NBUF_GET_PACKET_TRACK(nbuf);
-
-	if ((packet_type != NBUF_TX_PKT_DATA_TRACK) &&
-		(packet_type != NBUF_TX_PKT_MGMT_TRACK)) {
-		return;
-	}
-	NBUF_SET_PACKET_STATE(nbuf, current_state);
-	cdf_nbuf_tx_desc_count_update(packet_type,
-					current_state);
-}
-
-cdf_nbuf_trace_update_t trace_update_cb = NULL;
-
-/**
- * __cdf_nbuf_alloc() - Allocate nbuf
- * @hdl: Device handle
- * @size: Netbuf requested size
- * @reserve: Reserve
- * @align: Align
- * @prio: Priority
- *
- * This allocates an nbuf aligns if needed and reserves some space in the front,
- * since the reserve is done after alignment the reserve value if being
- * unaligned will result in an unaligned address.
- *
- * Return: nbuf or %NULL if no memory
- */
-struct sk_buff *__cdf_nbuf_alloc(cdf_device_t osdev, size_t size, int reserve,
-				 int align, int prio)
-{
-	struct sk_buff *skb;
-	unsigned long offset;
-
-	if (align)
-		size += (align - 1);
-
-	skb = dev_alloc_skb(size);
-
-	if (!skb) {
-		pr_err("ERROR:NBUF alloc failed\n");
-		return NULL;
-	}
-	memset(skb->cb, 0x0, sizeof(skb->cb));
-
-	/*
-	 * The default is for netbuf fragments to be interpreted
-	 * as wordstreams rather than bytestreams.
-	 * Set the CVG_NBUF_MAX_EXTRA_FRAGS+1 wordstream_flags bits,
-	 * to provide this default.
-	 */
-	NBUF_EXTRA_FRAG_WORDSTREAM_FLAGS(skb) =
-		(1 << (CVG_NBUF_MAX_EXTRA_FRAGS + 1)) - 1;
-
-	/*
-	 * XXX:how about we reserve first then align
-	 * Align & make sure that the tail & data are adjusted properly
-	 */
-
-	if (align) {
-		offset = ((unsigned long)skb->data) % align;
-		if (offset)
-			skb_reserve(skb, align - offset);
-	}
-
-	/*
-	 * NOTE:alloc doesn't take responsibility if reserve unaligns the data
-	 * pointer
-	 */
-	skb_reserve(skb, reserve);
-
-	return skb;
-}
-
-/**
- * __cdf_nbuf_free() - free the nbuf its interrupt safe
- * @skb: Pointer to network buffer
- *
- * Return: none
- */
-void __cdf_nbuf_free(struct sk_buff *skb)
-{
-	if ((NBUF_OWNER_ID(skb) == IPA_NBUF_OWNER_ID) && NBUF_CALLBACK_FN(skb))
-		NBUF_CALLBACK_FN_EXEC(skb);
-	else
-		dev_kfree_skb_any(skb);
-}
-
-/**
- * __cdf_nbuf_map() - get the dma map of the nbuf
- * @osdev: OS device
- * @bmap: Bitmap
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: CDF_STATUS
- */
-CDF_STATUS
-__cdf_nbuf_map(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
-{
-#ifdef CDF_OS_DEBUG
-	struct skb_shared_info *sh = skb_shinfo(skb);
-#endif
-	cdf_assert((dir == CDF_DMA_TO_DEVICE)
-		   || (dir == CDF_DMA_FROM_DEVICE));
-
-	/*
-	 * Assume there's only a single fragment.
-	 * To support multiple fragments, it would be necessary to change
-	 * cdf_nbuf_t to be a separate object that stores meta-info
-	 * (including the bus address for each fragment) and a pointer
-	 * to the underlying sk_buff.
-	 */
-	cdf_assert(sh->nr_frags == 0);
-
-	return __cdf_nbuf_map_single(osdev, skb, dir);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_unmap() - to unmap a previously mapped buf
- * @osdev: OS device
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: none
- */
-void
-__cdf_nbuf_unmap(cdf_device_t osdev, struct sk_buff *skb, cdf_dma_dir_t dir)
-{
-	cdf_assert((dir == CDF_DMA_TO_DEVICE)
-		   || (dir == CDF_DMA_FROM_DEVICE));
-
-	cdf_assert(((dir == CDF_DMA_TO_DEVICE)
-		    || (dir == CDF_DMA_FROM_DEVICE)));
-	/*
-	 * Assume there's a single fragment.
-	 * If this is not true, the assertion in __cdf_nbuf_map will catch it.
-	 */
-	__cdf_nbuf_unmap_single(osdev, skb, dir);
-}
-
-/**
- * __cdf_nbuf_map_single() - dma map of the nbuf
- * @osdev: OS device
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: CDF_STATUS
- */
-CDF_STATUS
-__cdf_nbuf_map_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
-{
-	uint32_t paddr_lo;
-
-/* tempory hack for simulation */
-#ifdef A_SIMOS_DEVHOST
-	NBUF_MAPPED_PADDR_LO(buf) = paddr_lo = (uint32_t) buf->data;
-	return CDF_STATUS_SUCCESS;
-#else
-	/* assume that the OS only provides a single fragment */
-	NBUF_MAPPED_PADDR_LO(buf) = paddr_lo =
-					dma_map_single(osdev->dev, buf->data,
-					skb_end_pointer(buf) - buf->data, dir);
-	return dma_mapping_error(osdev->dev, paddr_lo) ?
-	       CDF_STATUS_E_FAILURE : CDF_STATUS_SUCCESS;
-#endif /* #ifdef A_SIMOS_DEVHOST */
-}
-
-/**
- * __cdf_nbuf_unmap_single() - dma unmap nbuf
- * @osdev: OS device
- * @skb: Pointer to network buffer
- * @dir: Direction
- *
- * Return: none
- */
-void
-__cdf_nbuf_unmap_single(cdf_device_t osdev, cdf_nbuf_t buf, cdf_dma_dir_t dir)
-{
-#if !defined(A_SIMOS_DEVHOST)
-	dma_unmap_single(osdev->dev, NBUF_MAPPED_PADDR_LO(buf),
-			 skb_end_pointer(buf) - buf->data, dir);
-#endif /* #if !defined(A_SIMOS_DEVHOST) */
-}
-
-/**
- * __cdf_nbuf_set_rx_cksum() - set rx checksum
- * @skb: Pointer to network buffer
- * @cksum: Pointer to checksum value
- *
- * Return: CDF_STATUS
- */
-CDF_STATUS
-__cdf_nbuf_set_rx_cksum(struct sk_buff *skb, cdf_nbuf_rx_cksum_t *cksum)
-{
-	switch (cksum->l4_result) {
-	case CDF_NBUF_RX_CKSUM_NONE:
-		skb->ip_summed = CHECKSUM_NONE;
-		break;
-	case CDF_NBUF_RX_CKSUM_TCP_UDP_UNNECESSARY:
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		break;
-	case CDF_NBUF_RX_CKSUM_TCP_UDP_HW:
-		skb->ip_summed = CHECKSUM_PARTIAL;
-		skb->csum = cksum->val;
-		break;
-	default:
-		pr_err("ADF_NET:Unknown checksum type\n");
-		cdf_assert(0);
-		return CDF_STATUS_E_NOSUPPORT;
-	}
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_nbuf_get_tx_cksum() - get tx checksum
- * @skb: Pointer to network buffer
- *
- * Return: TX checksum value
- */
-cdf_nbuf_tx_cksum_t __cdf_nbuf_get_tx_cksum(struct sk_buff *skb)
-{
-	switch (skb->ip_summed) {
-	case CHECKSUM_NONE:
-		return CDF_NBUF_TX_CKSUM_NONE;
-	case CHECKSUM_PARTIAL:
-		/* XXX ADF and Linux checksum don't map with 1-to-1. This is
-		 * not 100% correct */
-		return CDF_NBUF_TX_CKSUM_TCP_UDP;
-	case CHECKSUM_COMPLETE:
-		return CDF_NBUF_TX_CKSUM_TCP_UDP_IP;
-	default:
-		return CDF_NBUF_TX_CKSUM_NONE;
-	}
-}
-
-/**
- * __cdf_nbuf_get_tid() - get tid
- * @skb: Pointer to network buffer
- *
- * Return: tid
- */
-uint8_t __cdf_nbuf_get_tid(struct sk_buff *skb)
-{
-	return skb->priority;
-}
-
-/**
- * __cdf_nbuf_set_tid() - set tid
- * @skb: Pointer to network buffer
- *
- * Return: none
- */
-void __cdf_nbuf_set_tid(struct sk_buff *skb, uint8_t tid)
-{
-	skb->priority = tid;
-}
-
-/**
- * __cdf_nbuf_set_tid() - set tid
- * @skb: Pointer to network buffer
- *
- * Return: none
- */
-uint8_t __cdf_nbuf_get_exemption_type(struct sk_buff *skb)
-{
-	return CDF_NBUF_EXEMPT_NO_EXEMPTION;
-}
-
-/**
- * __cdf_nbuf_reg_trace_cb() - register trace callback
- * @cb_func_ptr: Pointer to trace callback function
- *
- * Return: none
- */
-void __cdf_nbuf_reg_trace_cb(cdf_nbuf_trace_update_t cb_func_ptr)
-{
-	trace_update_cb = cb_func_ptr;
-	return;
-}
-
-#ifdef QCA_PKT_PROTO_TRACE
-/**
- * __cdf_nbuf_trace_update() - update trace event
- * @skb: Pointer to network buffer
- * @event_string: Pointer to trace callback function
- *
- * Return: none
- */
-void __cdf_nbuf_trace_update(struct sk_buff *buf, char *event_string)
-{
-	char string_buf[NBUF_PKT_TRAC_MAX_STRING];
-
-	if ((!trace_update_cb) || (!event_string))
-		return;
-
-	if (!cdf_nbuf_trace_get_proto_type(buf))
-		return;
-
-	/* Buffer over flow */
-	if (NBUF_PKT_TRAC_MAX_STRING <=
-	    (cdf_str_len(event_string) + NBUF_PKT_TRAC_PROTO_STRING)) {
-		return;
-	}
-
-	cdf_mem_zero(string_buf, NBUF_PKT_TRAC_MAX_STRING);
-	cdf_mem_copy(string_buf, event_string, cdf_str_len(event_string));
-	if (NBUF_PKT_TRAC_TYPE_EAPOL & cdf_nbuf_trace_get_proto_type(buf)) {
-		cdf_mem_copy(string_buf + cdf_str_len(event_string),
-			     "EPL", NBUF_PKT_TRAC_PROTO_STRING);
-	} else if (NBUF_PKT_TRAC_TYPE_DHCP & cdf_nbuf_trace_get_proto_type(buf)) {
-		cdf_mem_copy(string_buf + cdf_str_len(event_string),
-			     "DHC", NBUF_PKT_TRAC_PROTO_STRING);
-	} else if (NBUF_PKT_TRAC_TYPE_MGMT_ACTION &
-		   cdf_nbuf_trace_get_proto_type(buf)) {
-		cdf_mem_copy(string_buf + cdf_str_len(event_string),
-			     "MACT", NBUF_PKT_TRAC_PROTO_STRING);
-	}
-
-	trace_update_cb(string_buf);
-	return;
-}
-#endif /* QCA_PKT_PROTO_TRACE */
-
-#ifdef MEMORY_DEBUG
-#define CDF_NET_BUF_TRACK_MAX_SIZE    (1024)
-
-/**
- * struct cdf_nbuf_track_t - Network buffer track structure
- *
- * @p_next: Pointer to next
- * @net_buf: Pointer to network buffer
- * @file_name: File name
- * @line_num: Line number
- * @size: Size
- */
-struct cdf_nbuf_track_t {
-	struct cdf_nbuf_track_t *p_next;
-	cdf_nbuf_t net_buf;
-	uint8_t *file_name;
-	uint32_t line_num;
-	size_t size;
-};
-
-spinlock_t g_cdf_net_buf_track_lock;
-typedef struct cdf_nbuf_track_t CDF_NBUF_TRACK;
-
-CDF_NBUF_TRACK *gp_cdf_net_buf_track_tbl[CDF_NET_BUF_TRACK_MAX_SIZE];
-
-/**
- * cdf_net_buf_debug_init() - initialize network buffer debug functionality
- *
- * CDF network buffer debug feature tracks all SKBs allocated by WLAN driver
- * in a hash table and when driver is unloaded it reports about leaked SKBs.
- * WLAN driver module whose allocated SKB is freed by network stack are
- * suppose to call cdf_net_buf_debug_release_skb() such that the SKB is not
- * reported as memory leak.
- *
- * Return: none
- */
-void cdf_net_buf_debug_init(void)
-{
-	uint32_t i;
-	unsigned long irq_flag;
-
-	spin_lock_init(&g_cdf_net_buf_track_lock);
-
-	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
-	for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++)
-		gp_cdf_net_buf_track_tbl[i] = NULL;
-
-	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
-	return;
-}
-
-/**
- * cdf_net_buf_debug_init() - exit network buffer debug functionality
- *
- * Exit network buffer tracking debug functionality and log SKB memory leaks
- *
- * Return: none
- */
-void cdf_net_buf_debug_exit(void)
-{
-	uint32_t i;
-	unsigned long irq_flag;
-	CDF_NBUF_TRACK *p_node;
-	CDF_NBUF_TRACK *p_prev;
-
-	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
-	for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
-		p_node = gp_cdf_net_buf_track_tbl[i];
-		while (p_node) {
-			p_prev = p_node;
-			p_node = p_node->p_next;
-			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
-				  "SKB buf memory Leak@ File %s, @Line %d, size %zu\n",
-				  p_prev->file_name, p_prev->line_num,
-				  p_prev->size);
-		}
-	}
-
-	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
-	return;
-}
-
-/**
- * cdf_net_buf_debug_clean() - clean up network buffer debug functionality
- *
- * Return: none
- */
-void cdf_net_buf_debug_clean(void)
-{
-	uint32_t i;
-	unsigned long irq_flag;
-	CDF_NBUF_TRACK *p_node;
-	CDF_NBUF_TRACK *p_prev;
-
-	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
-	for (i = 0; i < CDF_NET_BUF_TRACK_MAX_SIZE; i++) {
-		p_node = gp_cdf_net_buf_track_tbl[i];
-		while (p_node) {
-			p_prev = p_node;
-			p_node = p_node->p_next;
-			cdf_mem_free(p_prev);
-		}
-	}
-
-	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
-	return;
-}
-
-/**
- * cdf_net_buf_debug_hash() - hash network buffer pointer
- *
- * Return: hash value
- */
-uint32_t cdf_net_buf_debug_hash(cdf_nbuf_t net_buf)
-{
-	uint32_t i;
-
-	i = (uint32_t) ((uintptr_t) net_buf & (CDF_NET_BUF_TRACK_MAX_SIZE - 1));
-
-	return i;
-}
-
-/**
- * cdf_net_buf_debug_look_up() - look up network buffer in debug hash table
- *
- * Return: If skb is found in hash table then return pointer to network buffer
- *	else return %NULL
- */
-CDF_NBUF_TRACK *cdf_net_buf_debug_look_up(cdf_nbuf_t net_buf)
-{
-	uint32_t i;
-	CDF_NBUF_TRACK *p_node;
-
-	i = cdf_net_buf_debug_hash(net_buf);
-	p_node = gp_cdf_net_buf_track_tbl[i];
-
-	while (p_node) {
-		if (p_node->net_buf == net_buf)
-			return p_node;
-		p_node = p_node->p_next;
-	}
-
-	return NULL;
-
-}
-
-/**
- * cdf_net_buf_debug_add_node() - store skb in debug hash table
- *
- * Return: none
- */
-void cdf_net_buf_debug_add_node(cdf_nbuf_t net_buf, size_t size,
-				uint8_t *file_name, uint32_t line_num)
-{
-	uint32_t i;
-	unsigned long irq_flag;
-	CDF_NBUF_TRACK *p_node;
-
-	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
-	i = cdf_net_buf_debug_hash(net_buf);
-	p_node = cdf_net_buf_debug_look_up(net_buf);
-
-	if (p_node) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "Double allocation of skb ! Already allocated from %s %d",
-			  p_node->file_name, p_node->line_num);
-		CDF_ASSERT(0);
-		goto done;
-	} else {
-		p_node = (CDF_NBUF_TRACK *) cdf_mem_malloc(sizeof(*p_node));
-		if (p_node) {
-			p_node->net_buf = net_buf;
-			p_node->file_name = file_name;
-			p_node->line_num = line_num;
-			p_node->size = size;
-			p_node->p_next = gp_cdf_net_buf_track_tbl[i];
-			gp_cdf_net_buf_track_tbl[i] = p_node;
-		} else {
-			CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-				  "Mem alloc failed ! Could not track skb from %s %d of size %zu",
-				  file_name, line_num, size);
-			CDF_ASSERT(0);
-		}
-	}
-
-done:
-	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
-	return;
-}
-
-/**
- * cdf_net_buf_debug_delete_node() - remove skb from debug hash table
- *
- * Return: none
- */
-void cdf_net_buf_debug_delete_node(cdf_nbuf_t net_buf)
-{
-	uint32_t i;
-	bool found = false;
-	CDF_NBUF_TRACK *p_head;
-	CDF_NBUF_TRACK *p_node;
-	unsigned long irq_flag;
-	CDF_NBUF_TRACK *p_prev;
-
-	spin_lock_irqsave(&g_cdf_net_buf_track_lock, irq_flag);
-
-	i = cdf_net_buf_debug_hash(net_buf);
-	p_head = gp_cdf_net_buf_track_tbl[i];
-
-	/* Unallocated SKB */
-	if (!p_head)
-		goto done;
-
-	p_node = p_head;
-	/* Found at head of the table */
-	if (p_head->net_buf == net_buf) {
-		gp_cdf_net_buf_track_tbl[i] = p_node->p_next;
-		cdf_mem_free((void *)p_node);
-		found = true;
-		goto done;
-	}
-
-	/* Search in collision list */
-	while (p_node) {
-		p_prev = p_node;
-		p_node = p_node->p_next;
-		if ((NULL != p_node) && (p_node->net_buf == net_buf)) {
-			p_prev->p_next = p_node->p_next;
-			cdf_mem_free((void *)p_node);
-			found = true;
-			break;
-		}
-	}
-
-done:
-	if (!found) {
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-			  "Unallocated buffer ! Double free of net_buf %p ?",
-			  net_buf);
-		CDF_ASSERT(0);
-	}
-
-	spin_unlock_irqrestore(&g_cdf_net_buf_track_lock, irq_flag);
-
-	return;
-}
-
-/**
- * cdf_net_buf_debug_release_skb() - release skb to avoid memory leak
- *
- * WLAN driver module whose allocated SKB is freed by network stack are
- * suppose to call this API before returning SKB to network stack such
- * that the SKB is not reported as memory leak.
- *
- * Return: none
- */
-void cdf_net_buf_debug_release_skb(cdf_nbuf_t net_buf)
-{
-	cdf_net_buf_debug_delete_node(net_buf);
-}
-
-#endif /*MEMORY_DEBUG */
-#if defined(FEATURE_TSO)
-
-struct cdf_tso_cmn_seg_info_t {
-	uint16_t ethproto;
-	uint16_t ip_tcp_hdr_len;
-	uint16_t l2_len;
-	unsigned char *eit_hdr;
-	unsigned int eit_hdr_len;
-	struct tcphdr *tcphdr;
-	uint16_t ipv4_csum_en;
-	uint16_t tcp_ipv4_csum_en;
-	uint16_t tcp_ipv6_csum_en;
-	uint16_t ip_id;
-	uint32_t tcp_seq_num;
-};
-
-/**
- * __cdf_nbuf_get_tso_cmn_seg_info() - get TSO common
- * information
- *
- * Get the TSO information that is common across all the TCP
- * segments of the jumbo packet
- *
- * Return: 0 - success 1 - failure
- */
-uint8_t __cdf_nbuf_get_tso_cmn_seg_info(struct sk_buff *skb,
-	struct cdf_tso_cmn_seg_info_t *tso_info)
-{
-	/* Get ethernet type and ethernet header length */
-	tso_info->ethproto = vlan_get_protocol(skb);
-
-	/* Determine whether this is an IPv4 or IPv6 packet */
-	if (tso_info->ethproto == htons(ETH_P_IP)) { /* IPv4 */
-		/* for IPv4, get the IP ID and enable TCP and IP csum */
-		struct iphdr *ipv4_hdr = ip_hdr(skb);
-		tso_info->ip_id = ntohs(ipv4_hdr->id);
-		tso_info->ipv4_csum_en = 1;
-		tso_info->tcp_ipv4_csum_en = 1;
-		if (cdf_unlikely(ipv4_hdr->protocol != IPPROTO_TCP)) {
-			cdf_print("TSO IPV4 proto 0x%x not TCP\n",
-				 ipv4_hdr->protocol);
-			return 1;
-		}
-	} else if (tso_info->ethproto == htons(ETH_P_IPV6)) { /* IPv6 */
-		/* for IPv6, enable TCP csum. No IP ID or IP csum */
-		tso_info->tcp_ipv6_csum_en = 1;
-	} else {
-		cdf_print("TSO: ethertype 0x%x is not supported!\n",
-			 tso_info->ethproto);
-		return 1;
-	}
-
-	tso_info->l2_len = (skb_network_header(skb) - skb_mac_header(skb));
-	tso_info->tcphdr = tcp_hdr(skb);
-	tso_info->tcp_seq_num = ntohl(tcp_hdr(skb)->seq);
-	/* get pointer to the ethernet + IP + TCP header and their length */
-	tso_info->eit_hdr = skb->data;
-	tso_info->eit_hdr_len = (skb_transport_header(skb)
-		 - skb_mac_header(skb)) + tcp_hdrlen(skb);
-	tso_info->ip_tcp_hdr_len = tso_info->eit_hdr_len - tso_info->l2_len;
-	return 0;
-}
-
-/**
- * __cdf_nbuf_get_tso_info() - function to divide a TSO nbuf
- * into segments
- * @nbuf:   network buffer to be segmented
- * @tso_info:  This is the output. The information about the
- *      TSO segments will be populated within this.
- *
- * This function fragments a TCP jumbo packet into smaller
- * segments to be transmitted by the driver. It chains the TSO
- * segments created into a list.
- *
- * Return: number of TSO segments
- */
-uint32_t __cdf_nbuf_get_tso_info(cdf_device_t osdev, struct sk_buff *skb,
-		struct cdf_tso_info_t *tso_info)
-{
-	/* common accross all segments */
-	struct cdf_tso_cmn_seg_info_t tso_cmn_info;
-
-	/* segment specific */
-	char *tso_frag_vaddr;
-	uint32_t tso_frag_paddr_32 = 0;
-	uint32_t num_seg = 0;
-	struct cdf_tso_seg_elem_t *curr_seg;
-	const struct skb_frag_struct *frag = NULL;
-	uint32_t tso_frag_len = 0; /* tso segment's fragment length*/
-	uint32_t skb_frag_len = 0; /* skb's fragment length (continous memory)*/
-	uint32_t foffset = 0; /* offset into the skb's fragment */
-	uint32_t skb_proc = 0; /* bytes of the skb that have been processed*/
-	uint32_t tso_seg_size = skb_shinfo(skb)->gso_size;
-
-	memset(&tso_cmn_info, 0x0, sizeof(tso_cmn_info));
-
-	if (cdf_unlikely(__cdf_nbuf_get_tso_cmn_seg_info(skb, &tso_cmn_info))) {
-		cdf_print("TSO: error getting common segment info\n");
-		return 0;
-	}
-	curr_seg = tso_info->tso_seg_list;
-
-	/* length of the first chunk of data in the skb */
-	skb_proc = skb_frag_len = skb->len - skb->data_len;
-
-	/* the 0th tso segment's 0th fragment always contains the EIT header */
-	/* update the remaining skb fragment length and TSO segment length */
-	skb_frag_len -= tso_cmn_info.eit_hdr_len;
-	skb_proc -= tso_cmn_info.eit_hdr_len;
-
-	/* get the address to the next tso fragment */
-	tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
-	/* get the length of the next tso fragment */
-	tso_frag_len = min(skb_frag_len, tso_seg_size);
-	tso_frag_paddr_32 = dma_map_single(osdev->dev,
-		 tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
-
-	num_seg = tso_info->num_segs;
-	tso_info->num_segs = 0;
-	tso_info->is_tso = 1;
-
-	while (num_seg && curr_seg) {
-		int i = 1; /* tso fragment index */
-		int j = 0; /* skb fragment index */
-		uint8_t more_tso_frags = 1;
-		uint8_t from_frag_table = 0;
-
-		/* Initialize the flags to 0 */
-		memset(&curr_seg->seg, 0x0, sizeof(curr_seg->seg));
-		tso_info->num_segs++;
-
-		/* The following fields remain the same across all segments of
-		 a jumbo packet */
-		curr_seg->seg.tso_flags.tso_enable = 1;
-		curr_seg->seg.tso_flags.partial_checksum_en = 0;
-		curr_seg->seg.tso_flags.ipv4_checksum_en =
-			tso_cmn_info.ipv4_csum_en;
-		curr_seg->seg.tso_flags.tcp_ipv6_checksum_en =
-			tso_cmn_info.tcp_ipv6_csum_en;
-		curr_seg->seg.tso_flags.tcp_ipv4_checksum_en =
-			tso_cmn_info.tcp_ipv4_csum_en;
-		curr_seg->seg.tso_flags.l2_len = 0;
-		curr_seg->seg.tso_flags.tcp_flags_mask = 0x1FF;
-		curr_seg->seg.num_frags = 0;
-
-		/* The following fields change for the segments */
-		curr_seg->seg.tso_flags.ip_id = tso_cmn_info.ip_id;
-		tso_cmn_info.ip_id++;
-
-		curr_seg->seg.tso_flags.syn = tso_cmn_info.tcphdr->syn;
-		curr_seg->seg.tso_flags.rst = tso_cmn_info.tcphdr->rst;
-		curr_seg->seg.tso_flags.psh = tso_cmn_info.tcphdr->psh;
-		curr_seg->seg.tso_flags.ack = tso_cmn_info.tcphdr->ack;
-		curr_seg->seg.tso_flags.urg = tso_cmn_info.tcphdr->urg;
-		curr_seg->seg.tso_flags.ece = tso_cmn_info.tcphdr->ece;
-		curr_seg->seg.tso_flags.cwr = tso_cmn_info.tcphdr->cwr;
-
-		curr_seg->seg.tso_flags.tcp_seq_num = tso_cmn_info.tcp_seq_num;
-
-		/* First fragment for each segment always contains the ethernet,
-		IP and TCP header */
-		curr_seg->seg.tso_frags[0].vaddr = tso_cmn_info.eit_hdr;
-		curr_seg->seg.tso_frags[0].length = tso_cmn_info.eit_hdr_len;
-		tso_info->total_len = curr_seg->seg.tso_frags[0].length;
-		curr_seg->seg.tso_frags[0].paddr_low_32 =
-			 dma_map_single(osdev->dev, tso_cmn_info.eit_hdr,
-				tso_cmn_info.eit_hdr_len, DMA_TO_DEVICE);
-		curr_seg->seg.tso_flags.ip_len = tso_cmn_info.ip_tcp_hdr_len;
-		curr_seg->seg.num_frags++;
-
-		while (more_tso_frags) {
-			curr_seg->seg.tso_frags[i].vaddr = tso_frag_vaddr;
-			curr_seg->seg.tso_frags[i].length = tso_frag_len;
-			tso_info->total_len +=
-				 curr_seg->seg.tso_frags[i].length;
-			curr_seg->seg.tso_flags.ip_len +=
-				 curr_seg->seg.tso_frags[i].length;
-			curr_seg->seg.num_frags++;
-			skb_proc = skb_proc - curr_seg->seg.tso_frags[i].length;
-
-			/* increment the TCP sequence number */
-			tso_cmn_info.tcp_seq_num += tso_frag_len;
-			curr_seg->seg.tso_frags[i].paddr_upper_16 = 0;
-			curr_seg->seg.tso_frags[i].paddr_low_32 =
-				 tso_frag_paddr_32;
-
-			/* if there is no more data left in the skb */
-			if (!skb_proc)
-				return tso_info->num_segs;
-
-			/* get the next payload fragment information */
-			/* check if there are more fragments in this segment */
-			if ((tso_seg_size - tso_frag_len)) {
-				more_tso_frags = 1;
-				i++;
-			} else {
-				more_tso_frags = 0;
-				/* reset i and the tso payload size */
-				i = 1;
-				tso_seg_size = skb_shinfo(skb)->gso_size;
-			}
-
-			/* if the next fragment is contiguous */
-			if (tso_frag_len < skb_frag_len) {
-				skb_frag_len = skb_frag_len - tso_frag_len;
-				tso_frag_len = min(skb_frag_len, tso_seg_size);
-				tso_frag_vaddr = tso_frag_vaddr + tso_frag_len;
-				if (from_frag_table) {
-					tso_frag_paddr_32 =
-						 skb_frag_dma_map(osdev->dev,
-							 frag, foffset,
-							 tso_frag_len,
-							 DMA_TO_DEVICE);
-				} else {
-					tso_frag_paddr_32 =
-						 dma_map_single(osdev->dev,
-							 tso_frag_vaddr,
-							 tso_frag_len,
-							 DMA_TO_DEVICE);
-				}
-			} else { /* the next fragment is not contiguous */
-				tso_frag_len = min(skb_frag_len, tso_seg_size);
-				frag = &skb_shinfo(skb)->frags[j];
-				skb_frag_len = skb_frag_size(frag);
-
-				tso_frag_vaddr = skb_frag_address(frag);
-				tso_frag_paddr_32 = skb_frag_dma_map(osdev->dev,
-					 frag, 0, tso_frag_len,
-					 DMA_TO_DEVICE);
-				foffset += tso_frag_len;
-				from_frag_table = 1;
-				j++;
-			}
-		}
-		num_seg--;
-		/* if TCP FIN flag was set, set it in the last segment */
-		if (!num_seg)
-			curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
-
-		curr_seg = curr_seg->next;
-	}
-	return tso_info->num_segs;
-}
-
-/**
- * __cdf_nbuf_get_tso_num_seg() - function to divide a TSO nbuf
- * into segments
- * @nbuf:   network buffer to be segmented
- * @tso_info:  This is the output. The information about the
- *      TSO segments will be populated within this.
- *
- * This function fragments a TCP jumbo packet into smaller
- * segments to be transmitted by the driver. It chains the TSO
- * segments created into a list.
- *
- * Return: 0 - success, 1 - failure
- */
-uint32_t __cdf_nbuf_get_tso_num_seg(struct sk_buff *skb)
-{
-	uint32_t gso_size, tmp_len, num_segs = 0;
-
-	gso_size = skb_shinfo(skb)->gso_size;
-	tmp_len = skb->len - ((skb_transport_header(skb) - skb_mac_header(skb))
-		+ tcp_hdrlen(skb));
-	while (tmp_len) {
-		num_segs++;
-		if (tmp_len > gso_size)
-			tmp_len -= gso_size;
-		else
-			break;
-	}
-	return num_segs;
-}
-
-struct sk_buff *__cdf_nbuf_inc_users(struct sk_buff *skb)
-{
-	atomic_inc(&skb->users);
-	return skb;
-}
-
-#endif /* FEATURE_TSO */

+ 0 - 1012
qdf/src/qdf_trace.c

@@ -1,1012 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- *  DOC:  cdf_trace
- *
- *  Connectivity driver framework (CDF) trace APIs
- *
- *  Trace, logging, and debugging definitions and APIs
- *
- */
-
-/* Include Files */
-#include <cdf_trace.h>
-#include <ani_global.h>
-#include <wlan_logging_sock_svc.h>
-#include "cdf_time.h"
-/* Preprocessor definitions and constants */
-
-#define CDF_TRACE_BUFFER_SIZE (512)
-
-enum cdf_timestamp_unit cdf_log_timestamp_type = CDF_LOG_TIMESTAMP_UNIT;
-
-/* macro to map cdf trace levels into the bitmask */
-#define CDF_TRACE_LEVEL_TO_MODULE_BITMASK(_level) ((1 << (_level)))
-
-typedef struct {
-	/* Trace level for a module, as a bitmask.  The bits in this mask
-	 * are ordered by CDF_TRACE_LEVEL.  For example, each bit represents
-	 * one of the bits in CDF_TRACE_LEVEL that may be turned on to have
-	 * traces at that level logged, i.e. if CDF_TRACE_LEVEL_ERROR is
-	 * == 2, then if bit 2 (low order) is turned ON, then ERROR traces
-	 * will be printed to the trace log.
-	 * Note that all bits turned OFF means no traces
-	 */
-	uint16_t moduleTraceLevel;
-
-	/* 3 character string name for the module */
-	unsigned char moduleNameStr[4]; /* 3 chars plus the NULL */
-} moduleTraceInfo;
-
-#define CDF_DEFAULT_TRACE_LEVEL	\
-	((1 << CDF_TRACE_LEVEL_FATAL) | (1 << CDF_TRACE_LEVEL_ERROR))
-
-/* Array of static data that contains all of the per module trace
- * information.  This includes the trace level for the module and
- * the 3 character 'name' of the module for marking the trace logs
- */
-moduleTraceInfo g_cdf_trace_info[CDF_MODULE_ID_MAX] = {
-	[CDF_MODULE_ID_TLSHIM] = {CDF_DEFAULT_TRACE_LEVEL, "DP"},
-	[CDF_MODULE_ID_WMI] = {CDF_DEFAULT_TRACE_LEVEL, "WMI"},
-	[CDF_MODULE_ID_HDD] = {CDF_DEFAULT_TRACE_LEVEL, "HDD"},
-	[CDF_MODULE_ID_SME] = {CDF_DEFAULT_TRACE_LEVEL, "SME"},
-	[CDF_MODULE_ID_PE] = {CDF_DEFAULT_TRACE_LEVEL, "PE "},
-	[CDF_MODULE_ID_WMA] = {CDF_DEFAULT_TRACE_LEVEL, "WMA"},
-	[CDF_MODULE_ID_SYS] = {CDF_DEFAULT_TRACE_LEVEL, "SYS"},
-	[CDF_MODULE_ID_CDF] = {CDF_DEFAULT_TRACE_LEVEL, "CDF"},
-	[CDF_MODULE_ID_SAP] = {CDF_DEFAULT_TRACE_LEVEL, "SAP"},
-	[CDF_MODULE_ID_HDD_SOFTAP] = {CDF_DEFAULT_TRACE_LEVEL, "HSP"},
-	[CDF_MODULE_ID_HDD_DATA] = {CDF_DEFAULT_TRACE_LEVEL, "HDP"},
-	[CDF_MODULE_ID_HDD_SAP_DATA] = {CDF_DEFAULT_TRACE_LEVEL, "SDP"},
-	[CDF_MODULE_ID_BMI] = {CDF_DEFAULT_TRACE_LEVEL, "BMI"},
-	[CDF_MODULE_ID_HIF] = {CDF_DEFAULT_TRACE_LEVEL, "HIF"},
-	[CDF_MODULE_ID_TXRX] = {CDF_DEFAULT_TRACE_LEVEL, "TRX"},
-	[CDF_MODULE_ID_HTT] = {CDF_DEFAULT_TRACE_LEVEL, "HTT"},
-};
-
-/* Static and Global variables */
-static spinlock_t ltrace_lock;
-
-static cdf_trace_record_t g_cdf_trace_tbl[MAX_CDF_TRACE_RECORDS];
-/* global cdf trace data */
-static t_cdf_trace_data g_cdf_trace_data;
-/*
- * all the call back functions for dumping MTRACE messages from ring buffer
- * are stored in cdf_trace_cb_table,these callbacks are initialized during init
- * only so, we will make a copy of these call back functions and maintain in to
- * cdf_trace_restore_cb_table. Incase if we make modifications to
- * cdf_trace_cb_table, we can certainly retrieve all the call back functions
- * back from Restore Table
- */
-static tp_cdf_trace_cb cdf_trace_cb_table[CDF_MODULE_ID_MAX];
-static tp_cdf_trace_cb cdf_trace_restore_cb_table[CDF_MODULE_ID_MAX];
-
-/* Static and Global variables */
-static spinlock_t l_dp_trace_lock;
-
-static struct cdf_dp_trace_record_s
-			g_cdf_dp_trace_tbl[MAX_CDF_DP_TRACE_RECORDS];
-
-/*
- * all the options to configure/control DP trace are
- * defined in this structure
- */
-static struct s_cdf_dp_trace_data g_cdf_dp_trace_data;
-/*
- * all the call back functions for dumping DPTRACE messages from ring buffer
- * are stored in cdf_dp_trace_cb_table, callbacks are initialized during init
- */
-static tp_cdf_dp_trace_cb cdf_dp_trace_cb_table[CDF_DP_TRACE_MAX];
-
-/**
- * cdf_trace_set_level() - Set the trace level for a particular module
- * @level : trace level
- *
- * Trace level is a member of the CDF_TRACE_LEVEL enumeration indicating
- * the severity of the condition causing the trace message to be issued.
- * More severe conditions are more likely to be logged.
- *
- * This is an external API that allows trace levels to be set for each module.
- *
- * Return:  nothing
- */
-void cdf_trace_set_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level)
-{
-	/* make sure the caller is passing in a valid LEVEL */
-	if (level >= CDF_TRACE_LEVEL_MAX) {
-		pr_err("%s: Invalid trace level %d passed in!\n", __func__,
-		       level);
-		return;
-	}
-
-	/* Treat 'none' differently.  NONE means we have to run off all
-	 * the bits in the bit mask so none of the traces appear. Anything
-	 * other than 'none' means we need to turn ON a bit in the bitmask
-	 */
-	if (CDF_TRACE_LEVEL_NONE == level)
-		g_cdf_trace_info[module].moduleTraceLevel =
-			CDF_TRACE_LEVEL_NONE;
-	else
-		/* set the desired bit in the bit mask for the module trace
-		 * level */
-		g_cdf_trace_info[module].moduleTraceLevel |=
-			CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
-}
-
-/**
- * cdf_trace_set_module_trace_level() - Set module trace level
- * @module: Module id
- * @level: Trace level for a module, as a bitmask as per 'moduleTraceInfo'
- *
- * Sets the module trace level where the trace level is given as a bit mask
- *
- * Return: None
- */
-void cdf_trace_set_module_trace_level(CDF_MODULE_ID module, uint32_t level)
-{
-	if (module < 0 || module >= CDF_MODULE_ID_MAX) {
-		pr_err("%s: Invalid module id %d passed\n", __func__, module);
-		return;
-	}
-	g_cdf_trace_info[module].moduleTraceLevel = level;
-}
-
-void cdf_trace_set_value(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
-			 uint8_t on)
-{
-	/* make sure the caller is passing in a valid LEVEL */
-	if (level < 0 || level >= CDF_TRACE_LEVEL_MAX) {
-		pr_err("%s: Invalid trace level %d passed in!\n", __func__,
-		       level);
-		return;
-	}
-
-	/* make sure the caller is passing in a valid module */
-	if (module < 0 || module >= CDF_MODULE_ID_MAX) {
-		pr_err("%s: Invalid module id %d passed in!\n", __func__,
-		       module);
-		return;
-	}
-
-	/* Treat 'none' differently.  NONE means we have to turn off all
-	   the bits in the bit mask so none of the traces appear */
-	if (CDF_TRACE_LEVEL_NONE == level) {
-		g_cdf_trace_info[module].moduleTraceLevel =
-			CDF_TRACE_LEVEL_NONE;
-	}
-	/* Treat 'All' differently.  All means we have to turn on all
-	   the bits in the bit mask so all of the traces appear */
-	else if (CDF_TRACE_LEVEL_ALL == level) {
-		g_cdf_trace_info[module].moduleTraceLevel = 0xFFFF;
-	} else {
-		if (on)
-			/* set the desired bit in the bit mask for the module
-			   trace level */
-			g_cdf_trace_info[module].moduleTraceLevel |=
-				CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level);
-		else
-			/* clear the desired bit in the bit mask for the module
-			   trace level */
-			g_cdf_trace_info[module].moduleTraceLevel &=
-				~(CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level));
-	}
-}
-
-/**
- * cdf_trace_get_level() - get the trace level
- * @level : trace level
- *
- * This is an external API that returns a bool value to signify if a
- * particular trace level is set for the specified module.
- * A member of the CDF_TRACE_LEVEL enumeration indicating the severity
- * of the condition causing the trace message to be issued.
- *
- * Note that individual trace levels are the only valid values
- * for this API.  CDF_TRACE_LEVEL_NONE and CDF_TRACE_LEVEL_ALL
- * are not valid input and will return false
- *
- * Return:
- *      false - the specified trace level for the specified module is OFF
- *      true - the specified trace level for the specified module is ON
- */
-bool cdf_trace_get_level(CDF_MODULE_ID module, CDF_TRACE_LEVEL level)
-{
-	bool traceOn = false;
-
-	if ((CDF_TRACE_LEVEL_NONE == level) ||
-	    (CDF_TRACE_LEVEL_ALL == level) || (level >= CDF_TRACE_LEVEL_MAX)) {
-		traceOn = false;
-	} else {
-		traceOn = (level & g_cdf_trace_info[module].moduleTraceLevel)
-			  ? true : false;
-	}
-
-	return traceOn;
-}
-
-void cdf_snprintf(char *strBuffer, unsigned int size, char *strFormat, ...)
-{
-	va_list val;
-
-	va_start(val, strFormat);
-	snprintf(strBuffer, size, strFormat, val);
-	va_end(val);
-}
-
-#ifdef CDF_ENABLE_TRACING
-
-/**
- * cdf_trace_msg() - externally called trace function
- * @module : Module identifier a member of the CDF_MODULE_ID
- *	enumeration that identifies the module issuing the trace message.
- * @level : Trace level a member of the CDF_TRACE_LEVEL enumeration
- *	indicating the severity of the condition causing the trace message
- *	to be issued.   More severe conditions are more likely to be logged.
- * @strFormat : Format string  in which the message to be logged.  This format
- *	string contains printf-like replacement parameters, which follow
- *	this parameter in the variable argument list.
- *
- *  Checks the level of severity and accordingly prints the trace messages
- *
- *  Return:  nothing
- *
- */
-void cdf_trace_msg(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
-		   char *strFormat, ...)
-{
-	char strBuffer[CDF_TRACE_BUFFER_SIZE];
-	int n;
-
-	/* Print the trace message when the desired level bit is set in
-	   the module tracel level mask */
-	if (g_cdf_trace_info[module].moduleTraceLevel &
-	    CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)) {
-		/* the trace level strings in an array.  these are ordered in
-		 * the same order as the trace levels are defined in the enum
-		 * (see CDF_TRACE_LEVEL) so we can index into this array with
-		 * the level and get the right string. The cdf trace levels
-		 * are... none, Fatal, Error, Warning, Info, InfoHigh, InfoMed,
-		 * InfoLow, Debug
-		 */
-		static const char *TRACE_LEVEL_STR[] = { "  ", "F ", "E ", "W ",
-						"I ", "IH", "IM", "IL", "D" };
-		va_list val;
-		va_start(val, strFormat);
-
-		/* print the prefix string into the string buffer... */
-		n = snprintf(strBuffer, CDF_TRACE_BUFFER_SIZE,
-			     "wlan: [%d:%2s:%3s] ",
-			     in_interrupt() ? 0 : current->pid,
-			     (char *)TRACE_LEVEL_STR[level],
-			     (char *)g_cdf_trace_info[module].moduleNameStr);
-
-		/* print the formatted log message after the prefix string */
-		if ((n >= 0) && (n < CDF_TRACE_BUFFER_SIZE)) {
-			vsnprintf(strBuffer + n, CDF_TRACE_BUFFER_SIZE - n,
-				  strFormat, val);
-#if defined(WLAN_LOGGING_SOCK_SVC_ENABLE)
-			wlan_log_to_user(level, (char *)strBuffer,
-					 strlen(strBuffer));
-#else
-			pr_err("%s\n", strBuffer);
-#endif
-		}
-		va_end(val);
-	}
-}
-
-void cdf_trace_display(void)
-{
-	CDF_MODULE_ID moduleId;
-
-	pr_err
-		("     1)FATAL  2)ERROR  3)WARN  4)INFO  5)INFO_H  6)INFO_M  7)INFO_L 8)DEBUG\n");
-	for (moduleId = 0; moduleId < CDF_MODULE_ID_MAX; ++moduleId) {
-		pr_err
-			("%2d)%s    %s        %s       %s       %s        %s         %s         %s        %s\n",
-			(int)moduleId, g_cdf_trace_info[moduleId].moduleNameStr,
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_FATAL)) ? "X" :
-			" ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_ERROR)) ? "X" :
-			" ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_WARN)) ? "X" :
-			" ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO)) ? "X" :
-			" ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO_HIGH)) ? "X"
-			: " ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO_MED)) ? "X"
-			: " ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_INFO_LOW)) ? "X"
-			: " ",
-			(g_cdf_trace_info[moduleId].
-			 moduleTraceLevel & (1 << CDF_TRACE_LEVEL_DEBUG)) ? "X" :
-			" ");
-	}
-}
-
-#define ROW_SIZE 16
-/* Buffer size = data bytes(2 hex chars plus space) + NULL */
-#define BUFFER_SIZE ((ROW_SIZE * 3) + 1)
-
-/**
- * cdf_trace_hex_dump() - externally called hex dump function
- * @module : Module identifier a member of the CDF_MODULE_ID enumeration that
- *	     identifies the module issuing the trace message.
- * @level : Trace level a member of the CDF_TRACE_LEVEL enumeration indicating
- *	    the severity of the condition causing the trace message to be
- *	    issued. More severe conditions are more likely to be logged.
- * @data : The base address of the buffer to be logged.
- * @buf_len : The size of the buffer to be logged.
- *
- *  Checks the level of severity and accordingly prints the trace messages
- *
- *  Return :  nothing
- */
-void cdf_trace_hex_dump(CDF_MODULE_ID module, CDF_TRACE_LEVEL level,
-			void *data, int buf_len)
-{
-	const u8 *ptr = data;
-	int i, linelen, remaining = buf_len;
-	unsigned char linebuf[BUFFER_SIZE];
-
-	if (!(g_cdf_trace_info[module].moduleTraceLevel &
-		CDF_TRACE_LEVEL_TO_MODULE_BITMASK(level)))
-		return;
-
-	for (i = 0; i < buf_len; i += ROW_SIZE) {
-		linelen = min(remaining, ROW_SIZE);
-		remaining -= ROW_SIZE;
-
-		hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1,
-				linebuf, sizeof(linebuf), false);
-
-		cdf_trace_msg(module, level, "%.8x: %s", i, linebuf);
-	}
-}
-
-#endif
-
-/**
- * cdf_trace_enable() - Enable MTRACE for specific modules
- * @bitmask_of_moduleId : Bitmask according to enum of the modules.
- *  32 [dec]  = 0010 0000 [bin] <enum of HDD is 5>
- *  64 [dec]  = 0100 0000 [bin] <enum of SME is 6>
- *  128 [dec] = 1000 0000 [bin] <enum of PE is 7>
- * @enable : can be true or false true implies enabling MTRACE false implies
- *		disabling MTRACE.
- *
- * Enable MTRACE for specific modules whose bits are set in bitmask and enable
- * is true. if enable is false it disables MTRACE for that module. set the
- * bitmask according to enum value of the modules.
- * This functions will be called when you issue ioctl as mentioned following
- * [iwpriv wlan0 setdumplog <value> <enable>].
- * <value> - Decimal number, i.e. 64 decimal value shows only SME module,
- * 128 decimal value shows only PE module, 192 decimal value shows PE and SME.
- *
- *
- * Return : nothing
- */
-void cdf_trace_enable(uint32_t bitmask_of_moduleId, uint8_t enable)
-{
-	int i;
-	if (bitmask_of_moduleId) {
-		for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
-			if (((bitmask_of_moduleId >> i) & 1)) {
-				if (enable) {
-					if (NULL !=
-					    cdf_trace_restore_cb_table[i]) {
-						cdf_trace_cb_table[i] =
-						cdf_trace_restore_cb_table[i];
-					}
-				} else {
-					cdf_trace_restore_cb_table[i] =
-						cdf_trace_cb_table[i];
-					cdf_trace_cb_table[i] = NULL;
-				}
-			}
-		}
-	} else {
-		if (enable) {
-			for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
-				if (NULL != cdf_trace_restore_cb_table[i]) {
-					cdf_trace_cb_table[i] =
-						cdf_trace_restore_cb_table[i];
-				}
-			}
-		} else {
-			for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
-				cdf_trace_restore_cb_table[i] =
-					cdf_trace_cb_table[i];
-				cdf_trace_cb_table[i] = NULL;
-			}
-		}
-	}
-}
-
-/**
- * cdf_trace_init() - initializes cdf trace structures and variables
- *
- * Called immediately after cds_preopen, so that we can start recording HDD
- * events ASAP.
- *
- * Return : nothing
- */
-void cdf_trace_init(void)
-{
-	uint8_t i;
-	g_cdf_trace_data.head = INVALID_CDF_TRACE_ADDR;
-	g_cdf_trace_data.tail = INVALID_CDF_TRACE_ADDR;
-	g_cdf_trace_data.num = 0;
-	g_cdf_trace_data.enable = true;
-	g_cdf_trace_data.dumpCount = DEFAULT_CDF_TRACE_DUMP_COUNT;
-	g_cdf_trace_data.numSinceLastDump = 0;
-
-	for (i = 0; i < CDF_MODULE_ID_MAX; i++) {
-		cdf_trace_cb_table[i] = NULL;
-		cdf_trace_restore_cb_table[i] = NULL;
-	}
-}
-
-/**
- * cdf_trace() - puts the messages in to ring-buffer
- * @module : Enum of module, basically module id.
- * @param : Code to be recorded
- * @session : Session ID of the log
- * @data : Actual message contents
- *
- * This function will be called from each module who wants record the messages
- * in circular queue. Before calling this functions make sure you have
- * registered your module with cdf through cdf_trace_register function.
- *
- *
- * Return : nothing
- */
-void cdf_trace(uint8_t module, uint8_t code, uint16_t session, uint32_t data)
-{
-	tp_cdf_trace_record rec = NULL;
-	unsigned long flags;
-
-	if (!g_cdf_trace_data.enable)
-		return;
-
-	/* if module is not registered, don't record for that module */
-	if (NULL == cdf_trace_cb_table[module])
-		return;
-
-	/* Aquire the lock so that only one thread at a time can fill the ring
-	 * buffer
-	 */
-	spin_lock_irqsave(&ltrace_lock, flags);
-
-	g_cdf_trace_data.num++;
-
-	if (g_cdf_trace_data.num > MAX_CDF_TRACE_RECORDS)
-		g_cdf_trace_data.num = MAX_CDF_TRACE_RECORDS;
-
-	if (INVALID_CDF_TRACE_ADDR == g_cdf_trace_data.head) {
-		/* first record */
-		g_cdf_trace_data.head = 0;
-		g_cdf_trace_data.tail = 0;
-	} else {
-		/* queue is not empty */
-		uint32_t tail = g_cdf_trace_data.tail + 1;
-
-		if (MAX_CDF_TRACE_RECORDS == tail)
-			tail = 0;
-
-		if (g_cdf_trace_data.head == tail) {
-			/* full */
-			if (MAX_CDF_TRACE_RECORDS == ++g_cdf_trace_data.head)
-				g_cdf_trace_data.head = 0;
-		}
-		g_cdf_trace_data.tail = tail;
-	}
-
-	rec = &g_cdf_trace_tbl[g_cdf_trace_data.tail];
-	rec->code = code;
-	rec->session = session;
-	rec->data = data;
-	rec->time = cdf_get_log_timestamp();
-	rec->module = module;
-	rec->pid = (in_interrupt() ? 0 : current->pid);
-	g_cdf_trace_data.numSinceLastDump++;
-	spin_unlock_irqrestore(&ltrace_lock, flags);
-}
-
-/**
- * cdf_trace_spin_lock_init() - initializes the lock variable before use
- *
- * This function will be called from cds_alloc_global_context, we will have lock
- * available to use ASAP
- *
- * Return : nothing
- */
-CDF_STATUS cdf_trace_spin_lock_init(void)
-{
-	spin_lock_init(&ltrace_lock);
-
-	return CDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_trace_register() - registers the call back functions
- * @moduleID - enum value of module
- * @cdf_trace_callback - call back functions to display the messages in
- *  particular format.
- *
- * Registers the call back functions to display the messages in particular
- * format mentioned in these call back functions. This functions should be
- * called by interested module in their init part as we will be ready to
- * register as soon as modules are up.
- *
- * Return : nothing
- */
-void cdf_trace_register(CDF_MODULE_ID moduleID,
-			tp_cdf_trace_cb cdf_trace_callback)
-{
-	cdf_trace_cb_table[moduleID] = cdf_trace_callback;
-}
-
-/**
- * cdf_trace_dump_all() - Dump data from ring buffer via call back functions
- *			  registered with CDF
- * @pMac : Context of particular module
- * @code : Reason code
- * @session : Session id of log
- * @count : Number of lines to dump starting from tail to head
- *
- * This function will be called up on issueing ioctl call as mentioned following
- * [iwpriv wlan0 dumplog 0 0 <n> <bitmask_of_module>]
- *
- *  <n> - number lines to dump starting from tail to head.
- *
- *  <bitmask_of_module> - if anybody wants to know how many messages were
- *  recorded for particular module/s mentioned by setbit in bitmask from last
- *  <n> messages. It is optional, if you don't provide then it will dump
- *  everything from buffer.
- *
- * Return : nothing
- */
-void cdf_trace_dump_all(void *pMac, uint8_t code, uint8_t session,
-			uint32_t count, uint32_t bitmask_of_module)
-{
-	cdf_trace_record_t pRecord;
-	int32_t i, tail;
-
-	if (!g_cdf_trace_data.enable) {
-		CDF_TRACE(CDF_MODULE_ID_SYS,
-			  CDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
-		return;
-	}
-
-	CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_INFO,
-		  "Total Records: %d, Head: %d, Tail: %d",
-		  g_cdf_trace_data.num, g_cdf_trace_data.head,
-		  g_cdf_trace_data.tail);
-
-	/* aquire the lock so that only one thread at a time can read
-	 * the ring buffer
-	 */
-	spin_lock(&ltrace_lock);
-
-	if (g_cdf_trace_data.head != INVALID_CDF_TRACE_ADDR) {
-		i = g_cdf_trace_data.head;
-		tail = g_cdf_trace_data.tail;
-
-		if (count) {
-			if (count > g_cdf_trace_data.num)
-				count = g_cdf_trace_data.num;
-			if (tail >= (count - 1))
-				i = tail - count + 1;
-			else if (count != MAX_CDF_TRACE_RECORDS)
-				i = MAX_CDF_TRACE_RECORDS - ((count - 1) -
-							     tail);
-		}
-
-		pRecord = g_cdf_trace_tbl[i];
-		/* right now we are not using numSinceLastDump member but
-		 * in future we might re-visit and use this member to track
-		 * how many latest messages got added while we were dumping
-		 * from ring buffer
-		 */
-		g_cdf_trace_data.numSinceLastDump = 0;
-		spin_unlock(&ltrace_lock);
-		for (;; ) {
-			if ((code == 0 || (code == pRecord.code)) &&
-			    (cdf_trace_cb_table[pRecord.module] != NULL)) {
-				if (0 == bitmask_of_module) {
-					cdf_trace_cb_table[pRecord.
-							   module] (pMac,
-								    &pRecord,
-								    (uint16_t)
-								    i);
-				} else {
-					if (bitmask_of_module &
-					    (1 << pRecord.module)) {
-						cdf_trace_cb_table[pRecord.
-								   module]
-							(pMac, &pRecord,
-							(uint16_t) i);
-					}
-				}
-			}
-
-			if (i == tail)
-				break;
-			i += 1;
-
-			spin_lock(&ltrace_lock);
-			if (MAX_CDF_TRACE_RECORDS == i) {
-				i = 0;
-				pRecord = g_cdf_trace_tbl[0];
-			} else {
-				pRecord = g_cdf_trace_tbl[i];
-			}
-			spin_unlock(&ltrace_lock);
-		}
-	} else {
-		spin_unlock(&ltrace_lock);
-	}
-}
-
-/**
- * cdf_dp_trace_init() - enables the DP trace
- * Called during driver load and it enables DP trace
- *
- * Return: None
- */
-void cdf_dp_trace_init(void)
-{
-	uint8_t i;
-
-	cdf_dp_trace_spin_lock_init();
-	g_cdf_dp_trace_data.head = INVALID_CDF_DP_TRACE_ADDR;
-	g_cdf_dp_trace_data.tail = INVALID_CDF_DP_TRACE_ADDR;
-	g_cdf_dp_trace_data.num = 0;
-	g_cdf_dp_trace_data.proto_bitmap = 0;
-	g_cdf_dp_trace_data.no_of_record = 0;
-	g_cdf_dp_trace_data.verbosity    = CDF_DP_TRACE_VERBOSITY_DEFAULT;
-	g_cdf_dp_trace_data.enable = true;
-
-	for (i = 0; i < CDF_DP_TRACE_MAX; i++)
-		cdf_dp_trace_cb_table[i] = cdf_dp_display_record;
-}
-
-/**
- * cdf_dp_trace_set_value() - Configure the value to control DP trace
- * @proto_bitmap  : defines the protocol to be tracked
- * @no_of_records : defines the nth packet which is traced
- * @verbosity     : defines the verbosity level
- *
- * Return: None
- */
-void cdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_record,
-			 uint8_t verbosity)
-{
-	g_cdf_dp_trace_data.proto_bitmap = proto_bitmap;
-	g_cdf_dp_trace_data.no_of_record = no_of_record;
-	g_cdf_dp_trace_data.verbosity    = verbosity;
-	return;
-}
-
-/**
- * cdf_dp_trace_enable_track() - enable the tracing for netbuf
- * @code : defines the event
- *
- * Return: true or false depends on whether tracing enabled
- */
-static bool cdf_dp_trace_enable_track(enum CDF_DP_TRACE_ID code)
-{
-	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_HIGH)
-		return true;
-	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_MEDIUM
-		&& (code <= CDF_DP_TRACE_HIF_PACKET_PTR_RECORD))
-		return true;
-	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_LOW
-		&& (code <= CDF_DP_TRACE_CE_PACKET_RECORD))
-		return true;
-	if (g_cdf_dp_trace_data.verbosity == CDF_DP_TRACE_VERBOSITY_DEFAULT
-		&& (code == CDF_DP_TRACE_DROP_PACKET_RECORD))
-		return true;
-	return false;
-}
-
-/**
- * cdf_dp_trace_set_track() - Marks whether the packet needs to be traced
- * @nbuf  : defines the netbuf
- *
- * Return: None
- */
-void cdf_dp_trace_set_track(cdf_nbuf_t nbuf)
-{
-	spin_lock_bh(&l_dp_trace_lock);
-	g_cdf_dp_trace_data.count++;
-	if (g_cdf_dp_trace_data.proto_bitmap != 0) {
-		if (cds_pkt_get_proto_type(nbuf,
-			g_cdf_dp_trace_data.proto_bitmap, 0)) {
-			CDF_NBUF_SET_DP_TRACE(nbuf, 1);
-		}
-	}
-	if ((g_cdf_dp_trace_data.no_of_record != 0) &&
-		(g_cdf_dp_trace_data.count %
-			g_cdf_dp_trace_data.no_of_record == 0)) {
-		CDF_NBUF_SET_DP_TRACE(nbuf, 1);
-	}
-	spin_unlock_bh(&l_dp_trace_lock);
-	return;
-}
-
-/**
- * dump_hex_trace() - Display the data in buffer
- * @buf:     buffer which contains data to be displayed
- * @buf_len: defines the size of the data to be displayed
- *
- * Return: None
- */
-static void dump_hex_trace(uint8_t *buf, uint8_t buf_len)
-{
-	uint8_t i = 0;
-	/* Dump the bytes in the last line */
-	cdf_print("DATA: ");
-	for (i = 0; i < buf_len; i++)
-		cdf_print("%02x ", buf[i]);
-	cdf_print("\n");
-}
-
-/**
- * cdf_dp_display_trace() - Displays a record in DP trace
- * @pRecord  : pointer to a record in DP trace
- * @recIndex : record index
- *
- * Return: None
- */
-void cdf_dp_display_record(struct cdf_dp_trace_record_s *pRecord ,
-				uint16_t recIndex)
-{
-	cdf_print("INDEX: %04d TIME: %012llu CODE: %02d\n", recIndex,
-						pRecord->time, pRecord->code);
-	switch (pRecord->code) {
-	case  CDF_DP_TRACE_HDD_TX_TIMEOUT:
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-						"HDD TX Timeout\n");
-		break;
-	case  CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-						"HDD SoftAP TX Timeout\n");
-		break;
-	case  CDF_DP_TRACE_VDEV_PAUSE:
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-						"VDEV Pause\n");
-		break;
-	case  CDF_DP_TRACE_VDEV_UNPAUSE:
-		CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
-						"VDEV UnPause\n");
-		break;
-	default:
-		dump_hex_trace(pRecord->data, pRecord->size);
-	}
-	return;
-}
-
-/**
- * cdf_dp_trace() - Stores the data in buffer
- * @nbuf  : defines the netbuf
- * @code : defines the event
- * @data : defines the data to be stored
- * @size : defines the size of the data record
- *
- * Return: None
- */
-void cdf_dp_trace(cdf_nbuf_t nbuf, enum CDF_DP_TRACE_ID code,
-			uint8_t *data, uint8_t size)
-{
-	struct cdf_dp_trace_record_s *rec = NULL;
-
-	/* Return when Dp trace is not enabled */
-	if (!g_cdf_dp_trace_data.enable)
-		return;
-
-	/* If nbuf is NULL, check for VDEV PAUSE, UNPAUSE, TIMEOUT */
-	if (!nbuf) {
-		switch (code) {
-		case CDF_DP_TRACE_HDD_TX_TIMEOUT:
-		case CDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
-		case CDF_DP_TRACE_VDEV_PAUSE:
-		case CDF_DP_TRACE_VDEV_UNPAUSE:
-			if (cdf_dp_trace_enable_track(code))
-				goto  register_record;
-			else
-				return;
-
-		default:
-			return;
-		}
-	}
-
-	/* Return when the packet is not a data packet */
-	if (NBUF_GET_PACKET_TRACK(nbuf) != NBUF_TX_PKT_DATA_TRACK)
-		return;
-
-	/* Return when nbuf is not marked for dp tracing or
-	 * verbosity does not allow
-	 */
-	if (cdf_dp_trace_enable_track(code) == false ||
-			!CDF_NBUF_GET_DP_TRACE(nbuf))
-		return;
-
-	/* Acquire the lock so that only one thread at a time can fill the ring
-	 * buffer
-	 */
-
-register_record:
-
-	spin_lock_bh(&l_dp_trace_lock);
-
-	g_cdf_dp_trace_data.num++;
-
-	if (g_cdf_dp_trace_data.num > MAX_CDF_DP_TRACE_RECORDS)
-		g_cdf_dp_trace_data.num = MAX_CDF_DP_TRACE_RECORDS;
-
-	if (INVALID_CDF_DP_TRACE_ADDR == g_cdf_dp_trace_data.head) {
-		/* first record */
-		g_cdf_dp_trace_data.head = 0;
-		g_cdf_dp_trace_data.tail = 0;
-	} else {
-		/* queue is not empty */
-		g_cdf_dp_trace_data.tail++;
-
-		if (MAX_CDF_DP_TRACE_RECORDS == g_cdf_dp_trace_data.tail)
-			g_cdf_dp_trace_data.tail = 0;
-
-		if (g_cdf_dp_trace_data.head == g_cdf_dp_trace_data.tail) {
-			/* full */
-			if (MAX_CDF_DP_TRACE_RECORDS ==
-				++g_cdf_dp_trace_data.head)
-				g_cdf_dp_trace_data.head = 0;
-		}
-	}
-
-	rec = &g_cdf_dp_trace_tbl[g_cdf_dp_trace_data.tail];
-	rec->code = code;
-	rec->size = 0;
-	if (data != NULL && size > 0) {
-		if (size > CDF_DP_TRACE_RECORD_SIZE)
-			size = CDF_DP_TRACE_RECORD_SIZE;
-
-		rec->size = size;
-		switch (code) {
-		case CDF_DP_TRACE_HDD_PACKET_PTR_RECORD:
-		case CDF_DP_TRACE_CE_PACKET_PTR_RECORD:
-		case CDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD:
-		case CDF_DP_TRACE_TXRX_PACKET_PTR_RECORD:
-		case CDF_DP_TRACE_HTT_PACKET_PTR_RECORD:
-		case CDF_DP_TRACE_HTC_PACKET_PTR_RECORD:
-		case CDF_DP_TRACE_HIF_PACKET_PTR_RECORD:
-			cdf_mem_copy(rec->data, (uint8_t *)(&data), size);
-			break;
-
-		case CDF_DP_TRACE_DROP_PACKET_RECORD:
-		case CDF_DP_TRACE_HDD_PACKET_RECORD:
-		case CDF_DP_TRACE_CE_PACKET_RECORD:
-			cdf_mem_copy(rec->data, data, size);
-			break;
-		default:
-			break;
-		}
-	}
-	rec->time = cdf_get_log_timestamp();
-	rec->pid = (in_interrupt() ? 0 : current->pid);
-	spin_unlock_bh(&l_dp_trace_lock);
-}
-
-/**
- * cdf_dp_trace_spin_lock_init() - initializes the lock variable before use
- * This function will be called from cds_alloc_global_context, we will have lock
- * available to use ASAP
- *
- * Return : nothing
- */
-void cdf_dp_trace_spin_lock_init(void)
-{
-	spin_lock_init(&l_dp_trace_lock);
-
-	return;
-}
-
-/**
- * cdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions
- *			  registered with CDF
- * @code : Reason code
- * @count : Number of lines to dump starting from tail to head
- *
- * Return : nothing
- */
-void cdf_dp_trace_dump_all(uint32_t count)
-{
-	struct cdf_dp_trace_record_s pRecord;
-	int32_t i, tail;
-
-	if (!g_cdf_dp_trace_data.enable) {
-		CDF_TRACE(CDF_MODULE_ID_SYS,
-			  CDF_TRACE_LEVEL_ERROR, "Tracing Disabled");
-		return;
-	}
-
-	CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
-		  "Total Records: %d, Head: %d, Tail: %d",
-		  g_cdf_dp_trace_data.num, g_cdf_dp_trace_data.head,
-		  g_cdf_dp_trace_data.tail);
-
-	/* aquire the lock so that only one thread at a time can read
-	 * the ring buffer
-	 */
-	spin_lock_bh(&l_dp_trace_lock);
-
-	if (g_cdf_dp_trace_data.head != INVALID_CDF_DP_TRACE_ADDR) {
-		i = g_cdf_dp_trace_data.head;
-		tail = g_cdf_dp_trace_data.tail;
-
-		if (count) {
-			if (count > g_cdf_dp_trace_data.num)
-				count = g_cdf_dp_trace_data.num;
-			if (tail >= (count - 1))
-				i = tail - count + 1;
-			else if (count != MAX_CDF_DP_TRACE_RECORDS)
-				i = MAX_CDF_DP_TRACE_RECORDS - ((count - 1) -
-							     tail);
-		}
-
-		pRecord = g_cdf_dp_trace_tbl[i];
-		spin_unlock_bh(&l_dp_trace_lock);
-		for (;; ) {
-
-			cdf_dp_trace_cb_table[pRecord.
-					   code] (&pRecord, (uint16_t)i);
-			if (i == tail)
-				break;
-			i += 1;
-
-			spin_lock_bh(&l_dp_trace_lock);
-			if (MAX_CDF_DP_TRACE_RECORDS == i)
-				i = 0;
-
-			pRecord = g_cdf_dp_trace_tbl[i];
-			spin_unlock_bh(&l_dp_trace_lock);
-		}
-	} else {
-		spin_unlock_bh(&l_dp_trace_lock);
-	}
-}

Some files were not shown because too many files changed in this diff