Parcourir la source

qcacld-3.0: Add QDF lock API's

Replace CDF lock API's with QDF lock API's.

Change-Id: I7c4a59920e17915f077c87457c513e763738c062
CRs-Fixed: 981188
Anurag Chouhan il y a 9 ans
Parent
commit
a37b5b783a
87 fichiers modifiés avec 810 ajouts et 2020 suppressions
  1. 1 1
      core/bmi/inc/ol_if_athvar.h
  2. 0 306
      core/cdf/inc/cdf_lock.h
  3. 2 2
      core/cdf/inc/cdf_mc_timer.h
  4. 1 1
      core/cdf/inc/cdf_nbuf.h
  5. 1 1
      core/cdf/inc/osdep.h
  6. 0 648
      core/cdf/src/cdf_lock.c
  7. 13 13
      core/cdf/src/cdf_mc_timer.c
  8. 9 9
      core/cdf/src/cdf_memory.c
  9. 1 1
      core/cdf/src/cdf_nbuf.c
  10. 0 255
      core/cdf/src/i_cdf_lock.h
  11. 1 1
      core/cds/inc/cds_api.h
  12. 1 1
      core/cds/inc/cds_crypto.h
  13. 3 3
      core/cds/inc/cds_sched.h
  14. 6 6
      core/cds/src/cds_api.c
  15. 33 33
      core/cds/src/cds_concurrency.c
  16. 7 7
      core/cds/src/cds_packet.c
  17. 5 5
      core/dp/htt/htt_internal.h
  18. 4 4
      core/dp/htt/htt_t2h.c
  19. 2 2
      core/dp/htt/htt_types.h
  20. 2 2
      core/dp/ol/inc/ol_txrx_dbg.h
  21. 2 2
      core/dp/txrx/ol_rx_reorder_timeout.c
  22. 3 3
      core/dp/txrx/ol_rx_reorder_timeout.h
  23. 16 16
      core/dp/txrx/ol_tx.c
  24. 1 1
      core/dp/txrx/ol_tx.h
  25. 16 16
      core/dp/txrx/ol_tx_desc.c
  26. 9 9
      core/dp/txrx/ol_tx_queue.c
  27. 15 15
      core/dp/txrx/ol_tx_send.c
  28. 90 91
      core/dp/txrx/ol_txrx.c
  29. 46 46
      core/dp/txrx/ol_txrx_flow_control.c
  30. 4 4
      core/dp/txrx/ol_txrx_internal.h
  31. 12 12
      core/dp/txrx/ol_txrx_peer_find.c
  32. 14 14
      core/dp/txrx/ol_txrx_types.h
  33. 3 3
      core/hdd/inc/wlan_hdd_lro.h
  34. 8 8
      core/hdd/inc/wlan_hdd_main.h
  35. 1 1
      core/hdd/src/wlan_hdd_driver_ops.c
  36. 17 17
      core/hdd/src/wlan_hdd_ftm.c
  37. 6 6
      core/hdd/src/wlan_hdd_hostapd.c
  38. 107 107
      core/hdd/src/wlan_hdd_ipa.c
  39. 16 16
      core/hdd/src/wlan_hdd_lro.c
  40. 28 28
      core/hdd/src/wlan_hdd_main.c
  41. 10 10
      core/hdd/src/wlan_hdd_p2p.c
  42. 3 3
      core/hdd/src/wlan_hdd_power.c
  43. 15 15
      core/hdd/src/wlan_hdd_scan.c
  44. 1 1
      core/hdd/src/wlan_hdd_softap_tx_rx.c
  45. 1 1
      core/hdd/src/wlan_hdd_tx_rx.c
  46. 2 2
      core/mac/inc/ani_global.h
  47. 7 7
      core/mac/src/pe/lim/lim_api.c
  48. 4 4
      core/mac/src/pe/lim/lim_process_message_queue.c
  49. 8 8
      core/mac/src/pe/lim/lim_process_sme_req_messages.c
  50. 11 11
      core/sap/dfs/inc/dfs.h
  51. 2 2
      core/sap/dfs/src/dfs.c
  52. 12 12
      core/sap/dfs/src/dfs_fcc_bin5.c
  53. 12 12
      core/sap/dfs/src/dfs_misc.c
  54. 4 4
      core/sap/dfs/src/dfs_phyerr_tlv.c
  55. 17 17
      core/sap/dfs/src/dfs_process_phyerr.c
  56. 9 9
      core/sap/dfs/src/dfs_process_radarevent.c
  57. 1 1
      core/sap/src/sap_internal.h
  58. 4 4
      core/sap/src/sap_module.c
  59. 1 1
      core/sme/inc/csr_internal.h
  60. 2 2
      core/sme/inc/csr_link_list.h
  61. 1 1
      core/sme/inc/p2p_api.h
  62. 1 1
      core/sme/inc/sme_api.h
  63. 1 1
      core/sme/inc/sme_inside.h
  64. 2 2
      core/sme/inc/sme_internal.h
  65. 1 1
      core/sme/inc/sme_power_save.h
  66. 1 1
      core/sme/inc/sme_qos_api.h
  67. 1 1
      core/sme/inc/sme_qos_internal.h
  68. 1 1
      core/sme/inc/sme_rrm_api.h
  69. 1 1
      core/sme/inc/sme_rrm_internal.h
  70. 6 6
      core/sme/src/common/sme_api.c
  71. 5 5
      core/sme/src/csr/csr_link_list.c
  72. 2 2
      core/utils/epping/inc/epping_internal.h
  73. 2 2
      core/utils/epping/inc/epping_main.h
  74. 7 7
      core/utils/epping/src/epping_helper.c
  75. 6 6
      core/utils/epping/src/epping_tx.c
  76. 2 2
      core/utils/epping/src/epping_txrx.c
  77. 9 9
      core/wma/inc/wma.h
  78. 4 4
      core/wma/inc/wma_dfs_interface.h
  79. 8 8
      core/wma/src/wma_data.c
  80. 41 41
      core/wma/src/wma_dev_if.c
  81. 12 12
      core/wma/src/wma_features.c
  82. 27 27
      core/wma/src/wma_main.c
  83. 7 7
      core/wma/src/wma_mgmt.c
  84. 5 5
      core/wma/src/wma_scan_roam.c
  85. 3 3
      core/wma/src/wma_utils.c
  86. 20 20
      core/wmi/wmi_unified.c
  87. 2 2
      core/wmi/wmi_unified_priv.h

+ 1 - 1
core/bmi/inc/ol_if_athvar.h

@@ -33,7 +33,7 @@
 
 #include <osapi_linux.h>
 #include "qdf_types.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "wmi_unified_api.h"
 #include "htc_api.h"
 #include "bmi_msg.h"

+ 0 - 306
core/cdf/inc/cdf_lock.h

@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#if !defined(__CDF_LOCK_H)
-#define __CDF_LOCK_H
-
-/**
- *
- * @file  cdf_lock.h
- *
- * @brief Connectivity driver framework (CDF) lock APIs
- *
- * Definitions for CDF locks
- *
- */
-
-/* Include Files */
-#include "qdf_status.h"
-#include "i_cdf_lock.h"
-
-/* Preprocessor definitions and constants */
-
-/* Type declarations */
-/**
- * @brief Platform spinlock object
- */
-typedef __cdf_spinlock_t cdf_spinlock_t;
-/**
- * @brief Platform mutex object
- */
-typedef __cdf_semaphore_t cdf_semaphore_t;
-
-/* Function declarations and documenation */
-
-/**
- * cdf_semaphore_init() - initialize a semaphore
- * @m:  Semaphore to initialize
- *
- * Return: None
- */
-
-static inline void cdf_semaphore_init(cdf_semaphore_t *m)
-{
-	__cdf_semaphore_init(m);
-}
-
-/**
- * cdf_semaphore_acquire() - take the semaphore
- * @m:  Semaphore to take
- *
- * Return: None
- */
-static inline int cdf_semaphore_acquire(qdf_device_t osdev, cdf_semaphore_t *m)
-{
-	return __cdf_semaphore_acquire(osdev, m);
-}
-
-/**
- * cdf_semaphore_release () - give the semaphore
- * @m:  Semaphore to give
- *
- * Return: None
- */
-static inline void
-cdf_semaphore_release(qdf_device_t osdev, cdf_semaphore_t *m)
-{
-	__cdf_semaphore_release(osdev, m);
-}
-
-/**
- * cdf_mutex_init() - initialize a CDF lock
- * @lock:	 Pointer to the opaque lock object to initialize
- *
- * cdf_mutex_init() function initializes the specified lock. Upon
- * successful initialization, the state of the lock becomes initialized
- * and unlocked.
- *
- * A lock must be initialized by calling cdf_mutex_init() before it
- * may be used in any other lock functions.
- *
- * Attempting to initialize an already initialized lock results in
- * a failure.
- *
- * Return:
- *	QDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_init(cdf_mutex_t *lock);
-
-/**
- * cdf_mutex_acquire () - acquire a CDF lock
- * @lock:	 Pointer to the opaque lock object to acquire
- *
- * A lock object is acquired by calling cdf_mutex_acquire().  If the lock
- * is already locked, the calling thread shall block until the lock becomes
- * available. This operation shall return with the lock object referenced by
- * lock in the locked state with the calling thread as its owner.
- *
- * Return:
- *	QDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock);
-
-/**
- * cdf_mutex_release() - release a CDF lock
- * @lock:	 Pointer to the opaque lock object to be released
- *
- * cdf_mutex_release() function shall release the lock object
- * referenced by 'lock'.
- *
- * If a thread attempts to release a lock that it unlocked or is not
- * initialized, an error is returned.
- *
- * Return:
- *	QDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_release(cdf_mutex_t *lock);
-
-/**
- * cdf_mutex_destroy() - destroy a CDF lock
- * @lock:	 Pointer to the opaque lock object to be destroyed
- *
- * cdf_mutex_destroy() function shall destroy the lock object
- * referenced by lock.  After a successful return from \a cdf_mutex_destroy()
- * the lock object becomes, in effect, uninitialized.
- *
- * A destroyed lock object can be reinitialized using cdf_mutex_init();
- * the results of otherwise referencing the object after it has been destroyed
- * are undefined.  Calls to CDF lock functions to manipulate the lock such
- * as cdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
- * don't use the lock after it has been destroyed until it has
- * been re-initialized.
- *
- * Return:
- *	QDF_STATUS_SUCCESS:	lock was successfully initialized
- *	CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock);
-
-/**
- * cdf_spinlock_init() - initialize a spinlock
- * @lock: Spinlock object pointer
- *
- * Return: None
- */
-static inline void cdf_spinlock_init(cdf_spinlock_t *lock)
-{
-	__cdf_spinlock_init(lock);
-}
-
-/**
- * cdf_spinlock_destroy() - delete a spinlock
- * @lock: Spinlock object pointer
- *
- * Return: None
- */
-static inline void cdf_spinlock_destroy(cdf_spinlock_t *lock)
-{
-	__cdf_spinlock_destroy(lock);
-}
-
-/**
- * cdf_spin_lock_bh() - locks the spinlock semaphore in soft irq context
- * @lock: Spinlock object pointer
- *
- * Return: None
- */
-static inline void cdf_spin_lock_bh(cdf_spinlock_t *lock)
-{
-	__cdf_spin_lock_bh(lock);
-}
-
-/**
- * cdf_spin_lock_bh() - unlocks the spinlock semaphore in soft irq context
- * @lock: Spinlock object pointer
- *
- * Return: None
- */
-static inline void cdf_spin_unlock_bh(cdf_spinlock_t *lock)
-{
-	__cdf_spin_unlock_bh(lock);
-}
-
-/**
- * cdf_wake_lock_init() - initializes a CDF wake lock
- * @lock: The wake lock to initialize
- * @name: Name of wake lock
- *
- * Return:
- *    CDF status success : if wake lock is initialized
- *    CDF status fialure : if wake lock was not initialized
- */
-QDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *lock, const char *name);
-
-/**
- * cdf_wake_lock_acquire() - acquires a wake lock
- * @lock:	The wake lock to acquire
- * @reason:	Reason for taking wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status fialure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason);
-
-/**
- * cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
- * @lock:	The wake lock to acquire
- * @reason:	Reason for taking wakelock
- *
- * Return:
- *   CDF status success : if wake lock is acquired
- *   CDF status fialure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock,
-					 uint32_t msec, uint32_t reason);
-
-/**
- * cdf_wake_lock_release() - releases a wake lock
- * @lock:	the wake lock to release
- * @@reason:	Reason for taking wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status fialure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason);
-
-/**
- * cdf_wake_lock_destroy() - destroys a wake lock
- * @lock:	The wake lock to destroy
- *
- * Return:
- * CDF status success :	if wake lock is acquired
- * CDF status fialure :	if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock);
-
-struct hif_pm_runtime_lock;
-typedef struct hif_pm_runtime_lock *cdf_runtime_lock_t;
-
-QDF_STATUS cdf_runtime_pm_get(void);
-QDF_STATUS cdf_runtime_pm_put(void);
-QDF_STATUS cdf_runtime_pm_prevent_suspend(cdf_runtime_lock_t lock);
-QDF_STATUS cdf_runtime_pm_allow_suspend(cdf_runtime_lock_t lock);
-cdf_runtime_lock_t cdf_runtime_lock_init(const char *name);
-void cdf_runtime_lock_deinit(cdf_runtime_lock_t lock);
-
-/**
- * cdf_spinlock_acquire() - acquires a spin lock
- * @lock:	Spin lock to acquire
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status fialure : if wake lock was not acquired
- */
-QDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock);
-
-/**
- * cdf_spinlock_release() - release a spin lock
- * @lock:	Spin lock to release
- *
- * Return:
- * CDF status success :	if wake lock is acquired
- * CDF status fialure :	if wake lock was not acquired
- */
-QDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock);
-
-#define cdf_spin_lock(_lock) __cdf_spin_lock(_lock)
-#define cdf_spin_unlock(_lock) __cdf_spin_unlock(_lock)
-#define cdf_spin_lock_irqsave(_lock) __cdf_spin_lock_irqsave(_lock)
-#define cdf_spin_unlock_irqrestore(_lock) \
-	__cdf_spin_unlock_irqrestore(_lock)
-#define cdf_spin_lock_irq(_pLock, _flags)   __cdf_spin_lock_irq(_pLock, _flags)
-#define cdf_spin_unlock_irq(_pLock, _flags) \
-	__cdf_spin_unlock_irq(_pLock, _flags)
-
-#define cdf_in_softirq() __cdf_in_softirq()
-
-#endif /* __CDF_LOCK_H */

+ 2 - 2
core/cdf/inc/cdf_mc_timer.h

@@ -37,7 +37,7 @@
 /* Include Files */
 #include <qdf_types.h>
 #include <qdf_status.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <i_cdf_mc_timer.h>
 
 #ifdef TIMER_MANAGER
@@ -79,7 +79,7 @@ typedef struct cdf_mc_timer_s {
 	cdf_mc_timer_platform_t platformInfo;
 	cdf_mc_timer_callback_t callback;
 	void *userData;
-	cdf_mutex_t lock;
+	qdf_mutex_t lock;
 	QDF_TIMER_TYPE type;
 	CDF_TIMER_STATE state;
 } cdf_mc_timer_t;

+ 1 - 1
core/cdf/inc/cdf_nbuf.h

@@ -35,7 +35,7 @@
 #include <cdf_util.h>
 #include <qdf_types.h>
 #include <cdf_net_types.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <i_cdf_nbuf.h>
 #include <asm/cacheflush.h>
 

+ 1 - 1
core/cdf/inc/osdep.h

@@ -30,7 +30,7 @@
 
 #include <qdf_types.h>
 #include <cdf_memory.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <qdf_time.h>
 #include <qdf_timer.h>
 #include <qdf_defer.h>

+ 0 - 648
core/cdf/src/cdf_lock.c

@@ -1,648 +0,0 @@
-/*
- * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-/**
- * DOC: cdf_lock.c
- *
- * OVERVIEW: This source file contains definitions for CDF lock APIs
- *	     The four APIs mentioned in this file are used for
- *	     initializing, acquiring, releasing and destroying a lock.
- *	     the lock are implemented using critical sections
- */
-
-/* Include Files */
-
-#include "cdf_lock.h"
-#include "cdf_memory.h"
-#include "cdf_trace.h"
-#include <qdf_types.h>
-#ifdef CONFIG_CNSS
-#include <net/cnss.h>
-#endif
-#include "i_host_diag_core_event.h"
-#include "cds_api.h"
-#include "ani_global.h"
-#include "hif.h"
-
-/* Preprocessor Definitions and Constants */
-#define LINUX_LOCK_COOKIE 0x12345678
-
-#define WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT 0
-#define WIFI_POWER_EVENT_WAKELOCK_TAKEN 0
-#define WIFI_POWER_EVENT_WAKELOCK_RELEASED 1
-
-/* Type Declarations */
-
-enum {
-	LOCK_RELEASED = 0x11223344,
-	LOCK_ACQUIRED,
-	LOCK_DESTROYED
-};
-
-/* Global Data Definitions */
-
-/* Function Definitions and Documentation */
-
-/**
- * cdf_mutex_init() - initialize a CDF lock
- * @lock:        Pointer to the opaque lock object to initialize
- *
- * cdf_mutex_init() function initializes the specified lock. Upon
- * successful initialization, the state of the lock becomes initialized
- * and unlocked.
- *
- * A lock must be initialized by calling cdf_mutex_init() before it
- * may be used in any other lock functions.
- *
- * Attempting to initialize an already initialized lock results in
- * a failure.
- *
- * Return:
- *      QDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_init(cdf_mutex_t *lock)
-{
-	/* check for invalid pointer */
-	if (lock == NULL) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		return QDF_STATUS_E_FAULT;
-	}
-	/* check for 'already initialized' lock */
-	if (LINUX_LOCK_COOKIE == lock->cookie) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: already initialized lock", __func__);
-		return QDF_STATUS_E_BUSY;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		return QDF_STATUS_E_FAULT;
-	}
-
-	/* initialize new lock */
-	mutex_init(&lock->m_lock);
-	lock->cookie = LINUX_LOCK_COOKIE;
-	lock->state = LOCK_RELEASED;
-	lock->processID = 0;
-	lock->refcount = 0;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_mutex_acquire() - acquire a CDF lock
- * @lock:        Pointer to the opaque lock object to acquire
- *
- * A lock object is acquired by calling cdf_mutex_acquire().  If the lock
- * is already locked, the calling thread shall block until the lock becomes
- * available. This operation shall return with the lock object referenced by
- * lock in the locked state with the calling thread as its owner.
- *
- * Return:
- *      QDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_acquire(cdf_mutex_t *lock)
-{
-	int rc;
-	/* check for invalid pointer */
-	if (lock == NULL) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_FAULT;
-	}
-	/* check if lock refers to an initialized object */
-	if (LINUX_LOCK_COOKIE != lock->cookie) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: uninitialized lock", __func__);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_FAULT;
-	}
-	if ((lock->processID == current->pid) &&
-		(lock->state == LOCK_ACQUIRED)) {
-		lock->refcount++;
-#ifdef CDF_NESTED_LOCK_DEBUG
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
-			  "%s: %x %d %d", __func__, lock, current->pid,
-			  lock->refcount);
-#endif
-		return QDF_STATUS_SUCCESS;
-	}
-	/* acquire a Lock */
-	mutex_lock(&lock->m_lock);
-	rc = mutex_is_locked(&lock->m_lock);
-	if (rc == 0) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: unable to lock mutex (rc = %d)", __func__, rc);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_FAILURE;
-	}
-#ifdef CDF_NESTED_LOCK_DEBUG
-	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
-		  "%s: %x %d", __func__, lock, current->pid);
-#endif
-	if (LOCK_DESTROYED != lock->state) {
-		lock->processID = current->pid;
-		lock->refcount++;
-		lock->state = LOCK_ACQUIRED;
-		return QDF_STATUS_SUCCESS;
-	} else {
-		/* lock is already destroyed */
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: Lock is already destroyed", __func__);
-		mutex_unlock(&lock->m_lock);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_FAILURE;
-	}
-}
-
-/**
- * cdf_mutex_release() - release a CDF lock
- * @lock:        Pointer to the opaque lock object to be released
- *
- * cdf_mutex_release() function shall release the lock object
- * referenced by 'lock'.
- *
- * If a thread attempts to release a lock that it unlocked or is not
- * initialized, an error is returned.
- *
- * Return:
- *      QDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_release(cdf_mutex_t *lock)
-{
-	/* check for invalid pointer */
-	if (lock == NULL) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_FAULT;
-	}
-
-	/* check if lock refers to an uninitialized object */
-	if (LINUX_LOCK_COOKIE != lock->cookie) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: uninitialized lock", __func__);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_FAULT;
-	}
-
-	/* CurrentThread = GetCurrentThreadId();
-	 * Check thread ID of caller against thread ID
-	 * of the thread which acquire the lock
-	 */
-	if (lock->processID != current->pid) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: current task pid does not match original task pid!!",
-			  __func__);
-#ifdef CDF_NESTED_LOCK_DEBUG
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
-			  "%s: Lock held by=%d being released by=%d",
-			  __func__, lock->processID, current->pid);
-#endif
-		CDF_ASSERT(0);
-		return QDF_STATUS_E_PERM;
-	}
-	if ((lock->processID == current->pid) &&
-		(lock->state == LOCK_ACQUIRED)) {
-		if (lock->refcount > 0)
-			lock->refcount--;
-	}
-#ifdef CDF_NESTED_LOCK_DEBUG
-	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
-		  "%s: %x %d %d", __func__, lock, lock->processID,
-		  lock->refcount);
-#endif
-	if (lock->refcount)
-		return QDF_STATUS_SUCCESS;
-
-	lock->processID = 0;
-	lock->refcount = 0;
-	lock->state = LOCK_RELEASED;
-	/* release a Lock */
-	mutex_unlock(&lock->m_lock);
-#ifdef CDF_NESTED_LOCK_DEBUG
-	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
-		  "%s: Freeing lock %x %d %d", lock, lock->processID,
-		  lock->refcount);
-#endif
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_mutex_destroy() - destroy a CDF lock
- * @lock:        Pointer to the opaque lock object to be destroyed
- *
- * cdf_mutex_destroy() function shall destroy the lock object
- * referenced by lock.  After a successful return from cdf_mutex_destroy()
- * the lock object becomes, in effect, uninitialized.
- *
- * A destroyed lock object can be reinitialized using cdf_mutex_init();
- * the results of otherwise referencing the object after it has been destroyed
- * are undefined.  Calls to CDF lock functions to manipulate the lock such
- * as cdf_mutex_acquire() will fail if the lock is destroyed.  Therefore,
- * don't use the lock after it has been destroyed until it has
- * been re-initialized.
- *
- * Return:
- *      QDF_STATUS_SUCCESS:     lock was successfully initialized
- *      CDF failure reason codes: lock is not initialized and can't be used
- */
-QDF_STATUS cdf_mutex_destroy(cdf_mutex_t *lock)
-{
-	/* check for invalid pointer */
-	if (NULL == lock) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: NULL pointer passed in", __func__);
-		return QDF_STATUS_E_FAULT;
-	}
-
-	if (LINUX_LOCK_COOKIE != lock->cookie) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: uninitialized lock", __func__);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	if (in_interrupt()) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s cannot be called from interrupt context!!!",
-			  __func__);
-		return QDF_STATUS_E_FAULT;
-	}
-
-	/* check if lock is released */
-	if (!mutex_trylock(&lock->m_lock)) {
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-			  "%s: lock is not released", __func__);
-		return QDF_STATUS_E_BUSY;
-	}
-	lock->cookie = 0;
-	lock->state = LOCK_DESTROYED;
-	lock->processID = 0;
-	lock->refcount = 0;
-
-	mutex_unlock(&lock->m_lock);
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_spinlock_acquire() - acquires a spin lock
- * @pLock:       Spin lock to acquire
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status failure : if wake lock was not acquired
- */
-QDF_STATUS cdf_spinlock_acquire(cdf_spinlock_t *pLock)
-{
-	spin_lock(&pLock->spinlock);
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_spinlock_release() - release a spin lock
- * @pLock:       Spin lock to release
- *
- * Return:
- * CDF status success : if wake lock is acquired
- * CDF status failure : if wake lock was not acquired
- */
-QDF_STATUS cdf_spinlock_release(cdf_spinlock_t *pLock)
-{
-	spin_unlock(&pLock->spinlock);
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_name() - This function returns the name of the wakelock
- * @pLock: Pointer to the wakelock
- *
- * This function returns the name of the wakelock
- *
- * Return: Pointer to the name if it is valid or a default string
- *
- */
-static const char *cdf_wake_lock_name(cdf_wake_lock_t *pLock)
-{
-#if defined CONFIG_CNSS
-	if (pLock->name)
-		return pLock->name;
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	if (pLock->ws.name)
-		return pLock->ws.name;
-#endif
-	return "UNNAMED_WAKELOCK";
-}
-
-/**
- * cdf_wake_lock_init() - initializes a CDF wake lock
- * @pLock: The wake lock to initialize
- * @name: Name of wake lock
- *
- * Return:
- *    CDF status success : if wake lock is initialized
- *    CDF status failure : if wake lock was not initialized
- */
-QDF_STATUS cdf_wake_lock_init(cdf_wake_lock_t *pLock, const char *name)
-{
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_init(pLock, name);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock_init(pLock, WAKE_LOCK_SUSPEND, name);
-#endif
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_acquire() - acquires a wake lock
- * @pLock:       The wake lock to acquire
- * @reason:      Reason for wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status failure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_acquire(cdf_wake_lock_t *pLock, uint32_t reason)
-{
-	host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
-			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
-			WIFI_POWER_EVENT_WAKELOCK_TAKEN);
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock(pLock);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock(pLock);
-#endif
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_timeout_acquire() - acquires a wake lock with a timeout
- * @pLock:       The wake lock to acquire
- * @reason:      Reason for wakelock
- *
- * Return:
- *   CDF status success : if wake lock is acquired
- *   CDF status failure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_timeout_acquire(cdf_wake_lock_t *pLock, uint32_t msec,
-					 uint32_t reason)
-{
-	/* Wakelock for Rx is frequent.
-	 * It is reported only during active debug
-	 */
-	if (((cds_get_ring_log_level(RING_ID_WAKELOCK) >= WLAN_LOG_LEVEL_ACTIVE)
-			&& (WIFI_POWER_EVENT_WAKELOCK_HOLD_RX == reason)) ||
-			(WIFI_POWER_EVENT_WAKELOCK_HOLD_RX != reason)) {
-		host_diag_log_wlock(reason, cdf_wake_lock_name(pLock), msec,
-				WIFI_POWER_EVENT_WAKELOCK_TAKEN);
-	}
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_timeout(pLock, msec);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock_timeout(pLock, msecs_to_jiffies(msec));
-#endif
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_release() - releases a wake lock
- * @pLock:       the wake lock to release
- * @reason:      Reason for wakelock
- *
- * Return:
- *    CDF status success : if wake lock is acquired
- *    CDF status failure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_release(cdf_wake_lock_t *pLock, uint32_t reason)
-{
-	host_diag_log_wlock(reason, cdf_wake_lock_name(pLock),
-			WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
-			WIFI_POWER_EVENT_WAKELOCK_RELEASED);
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_release(pLock);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_unlock(pLock);
-#endif
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_wake_lock_destroy() - destroys a wake lock
- * @pLock:       The wake lock to destroy
- *
- * Return:
- * CDF status success : if wake lock is acquired
- * CDF status failure : if wake lock was not acquired
- */
-QDF_STATUS cdf_wake_lock_destroy(cdf_wake_lock_t *pLock)
-{
-#if defined CONFIG_CNSS
-	cnss_pm_wake_lock_destroy(pLock);
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-	wake_lock_destroy(pLock);
-#endif
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_get() - do a get opperation on the device
- *
- * A get opperation will prevent a runtime suspend untill a
- * corresponding put is done.  This api should be used when sending
- * data.
- *
- * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
- * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
- *
- * return: success if the bus is up and a get has been issued
- *   otherwise an error code.
- */
-QDF_STATUS cdf_runtime_pm_get(void)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_get(ol_sc);
-
-	if (ret)
-		return QDF_STATUS_E_FAILURE;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_put() - do a put opperation on the device
- *
- * A put opperation will allow a runtime suspend after a corresponding
- * get was done.  This api should be used when sending data.
- *
- * This api will return a failure if the hif module hasn't been initialized
- *
- * return: QDF_STATUS_SUCCESS if the put is performed
- */
-QDF_STATUS cdf_runtime_pm_put(void)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_put(ol_sc);
-
-	if (ret)
-		return QDF_STATUS_E_FAILURE;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
- * @lock: an opaque context for tracking
- *
- * The lock can only be acquired once per lock context and is tracked.
- *
- * return: QDF_STATUS_SUCCESS or failure code.
- */
-QDF_STATUS cdf_runtime_pm_prevent_suspend(cdf_runtime_lock_t lock)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_prevent_suspend(ol_sc, lock);
-
-	if (ret)
-		return QDF_STATUS_E_FAILURE;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_pm_prevent_suspend() - prevent a runtime bus suspend
- * @lock: an opaque context for tracking
- *
- * The lock can only be acquired once per lock context and is tracked.
- *
- * return: QDF_STATUS_SUCCESS or failure code.
- */
-QDF_STATUS cdf_runtime_pm_allow_suspend(cdf_runtime_lock_t lock)
-{
-	void *ol_sc;
-	int ret;
-
-	ol_sc = cds_get_context(QDF_MODULE_ID_HIF);
-
-	if (ol_sc == NULL) {
-		CDF_ASSERT(0);
-		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
-				"%s: HIF context is null!", __func__);
-		return QDF_STATUS_E_INVAL;
-	}
-
-	ret = hif_pm_runtime_allow_suspend(ol_sc, lock);
-
-	if (ret)
-		return QDF_STATUS_E_FAILURE;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * cdf_runtime_lock_init() - initialize runtime lock
- * @name: name of the runtime lock
- *
- * Initialize a runtime pm lock.  This lock can be used
- * to prevent the runtime pm system from putting the bus
- * to sleep.
- *
- * Return: runtime_pm_lock_t
- */
-cdf_runtime_lock_t cdf_runtime_lock_init(const char *name)
-{
-	return hif_runtime_lock_init(name);
-}
-
-/**
- * cdf_runtime_lock_deinit() - deinitialize runtime pm lock
- * @lock: the lock to deinitialize
- *
- * Ensures the lock is released. Frees the runtime lock.
- *
- * Return: void
- */
-void cdf_runtime_lock_deinit(cdf_runtime_lock_t lock)
-{
-	void *hif_ctx = cds_get_context(CDF_MODULE_ID_HIF);
-	hif_runtime_lock_deinit(hif_ctx, lock);
-}

+ 13 - 13
core/cdf/src/cdf_mc_timer.c

@@ -33,7 +33,7 @@
 
 /* Include Files */
 #include <cdf_mc_timer.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <cds_api.h>
 #include "wlan_qct_sys.h"
 #include "cds_sched.h"
@@ -48,7 +48,7 @@
 
 /* Static Variable Definitions */
 static unsigned int persistent_timer_count;
-static cdf_mutex_t persistent_timer_count_lock;
+static qdf_mutex_t persistent_timer_count_lock;
 
 /* Function declarations and documenation */
 
@@ -64,14 +64,14 @@ static cdf_mutex_t persistent_timer_count_lock;
 static void try_allowing_sleep(QDF_TIMER_TYPE type)
 {
 	if (QDF_TIMER_TYPE_WAKE_APPS == type) {
-		/* cdf_mutex_acquire(&persistent_timer_count_lock); */
+		/* qdf_mutex_acquire(&persistent_timer_count_lock); */
 		persistent_timer_count--;
 		if (0 == persistent_timer_count) {
 			/* since the number of persistent timers has
 			   decreased from 1 to 0, the timer should allow
 			   sleep sleep_assert_okts( sleepClientHandle ); */
 		}
-		/* cdf_mutex_release(&persistent_timer_count_lock); */
+		/* qdf_mutex_release(&persistent_timer_count_lock); */
 	}
 }
 
@@ -228,13 +228,13 @@ void cdf_timer_module_init(void)
 {
 	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
 		  "Initializing the CDF timer module");
-	cdf_mutex_init(&persistent_timer_count_lock);
+	qdf_mutex_create(&persistent_timer_count_lock);
 }
 
 #ifdef TIMER_MANAGER
 
 qdf_list_t cdf_timer_list;
-cdf_spinlock_t cdf_timer_list_lock;
+qdf_spinlock_t cdf_timer_list_lock;
 
 static void cdf_timer_clean(void);
 
@@ -248,7 +248,7 @@ static void cdf_timer_clean(void);
 void cdf_mc_timer_manager_init(void)
 {
 	qdf_list_create(&cdf_timer_list, 1000);
-	cdf_spinlock_init(&cdf_timer_list_lock);
+	qdf_spinlock_create(&cdf_timer_list_lock);
 	return;
 }
 
@@ -276,10 +276,10 @@ static void cdf_timer_clean(void)
 			  __func__, (int)listSize);
 
 		do {
-			cdf_spin_lock_irqsave(&cdf_timer_list_lock);
+			qdf_spin_lock_irqsave(&cdf_timer_list_lock);
 			qdf_status =
 				qdf_list_remove_front(&cdf_timer_list, &pNode);
-			cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
+			qdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
 			if (QDF_STATUS_SUCCESS == qdf_status) {
 				ptimerNode = (cdf_mc_timer_node_t *) pNode;
 				CDF_TRACE(QDF_MODULE_ID_QDF,
@@ -371,10 +371,10 @@ QDF_STATUS cdf_mc_timer_init_debug(cdf_mc_timer_t *timer,
 	timer->ptimerNode->lineNum = lineNum;
 	timer->ptimerNode->cdf_timer = timer;
 
-	cdf_spin_lock_irqsave(&cdf_timer_list_lock);
+	qdf_spin_lock_irqsave(&cdf_timer_list_lock);
 	qdf_status = qdf_list_insert_front(&cdf_timer_list,
 					   &timer->ptimerNode->pNode);
-	cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
+	qdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
 	if (QDF_STATUS_SUCCESS != qdf_status) {
 		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
 			  "%s: Unable to insert node into List qdf_status %d",
@@ -477,10 +477,10 @@ QDF_STATUS cdf_mc_timer_destroy(cdf_mc_timer_t *timer)
 		return QDF_STATUS_E_INVAL;
 	}
 
-	cdf_spin_lock_irqsave(&cdf_timer_list_lock);
+	qdf_spin_lock_irqsave(&cdf_timer_list_lock);
 	status = qdf_list_remove_node(&cdf_timer_list,
 				       &timer->ptimerNode->pNode);
-	cdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
+	qdf_spin_unlock_irqrestore(&cdf_timer_list_lock);
 	if (status != QDF_STATUS_SUCCESS) {
 		CDF_ASSERT(0);
 		return QDF_STATUS_E_INVAL;

+ 9 - 9
core/cdf/src/cdf_memory.c

@@ -35,7 +35,7 @@
 #include "cdf_memory.h"
 #include "cdf_nbuf.h"
 #include "cdf_trace.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_mc_timer.h"
 
 #if defined(CONFIG_CNSS)
@@ -51,7 +51,7 @@
 #include <linux/stacktrace.h>
 
 qdf_list_t cdf_mem_list;
-cdf_spinlock_t cdf_mem_list_lock;
+qdf_spinlock_t cdf_mem_list_lock;
 
 static uint8_t WLAN_MEM_HEADER[] = { 0x61, 0x62, 0x63, 0x64, 0x65, 0x66,
 					0x67, 0x68 };
@@ -137,7 +137,7 @@ void cdf_mem_init(void)
 {
 	/* Initalizing the list with maximum size of 60000 */
 	qdf_list_create(&cdf_mem_list, 60000);
-	cdf_spinlock_init(&cdf_mem_list_lock);
+	qdf_spinlock_create(&cdf_mem_list_lock);
 	cdf_net_buf_debug_init();
 	return;
 }
@@ -169,10 +169,10 @@ void cdf_mem_clean(void)
 			  __func__, (int)listSize);
 
 		do {
-			cdf_spin_lock(&cdf_mem_list_lock);
+			qdf_spin_lock(&cdf_mem_list_lock);
 			qdf_status =
 				qdf_list_remove_front(&cdf_mem_list, &pNode);
-			cdf_spin_unlock(&cdf_mem_list_lock);
+			qdf_spin_unlock(&cdf_mem_list_lock);
 			if (QDF_STATUS_SUCCESS == qdf_status) {
 				memStruct = (struct s_cdf_mem_struct *)pNode;
 				/* Take care to log only once multiple memory
@@ -301,10 +301,10 @@ void *cdf_mem_malloc_debug(size_t size, char *fileName, uint32_t lineNum)
 		cdf_mem_copy((uint8_t *) (memStruct + 1) + size,
 			     &WLAN_MEM_TAIL[0], sizeof(WLAN_MEM_TAIL));
 
-		cdf_spin_lock_irqsave(&cdf_mem_list_lock);
+		qdf_spin_lock_irqsave(&cdf_mem_list_lock);
 		qdf_status = qdf_list_insert_front(&cdf_mem_list,
 						   &memStruct->pNode);
-		cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
+		qdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
 		if (QDF_STATUS_SUCCESS != qdf_status) {
 			CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
 				  "%s: Unable to insert node into List qdf_status %d",
@@ -338,10 +338,10 @@ void cdf_mem_free(void *ptr)
 			return;
 #endif
 
-		cdf_spin_lock_irqsave(&cdf_mem_list_lock);
+		qdf_spin_lock_irqsave(&cdf_mem_list_lock);
 		qdf_status =
 			qdf_list_remove_node(&cdf_mem_list, &memStruct->pNode);
-		cdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
+		qdf_spin_unlock_irqrestore(&cdf_mem_list_lock);
 
 		if (QDF_STATUS_SUCCESS == qdf_status) {
 			if (0 == cdf_mem_compare(memStruct->header,

+ 1 - 1
core/cdf/src/cdf_nbuf.c

@@ -45,7 +45,7 @@
 #include <cdf_memory.h>
 #include <cdf_trace.h>
 #include <qdf_status.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 
 /* Packet Counter */
 static uint32_t nbuf_tx_mgmt[NBUF_TX_PKT_STATE_MAX];

+ 0 - 255
core/cdf/src/i_cdf_lock.h

@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
-
-#if !defined(__I_CDF_LOCK_H)
-#define __I_CDF_LOCK_H
-
-/**
- * DOC: i_cdf_lock.h
- *
- * Linux-specific definitions for CDF Locks
- *
- */
-
-/* Include Files */
-#include <qdf_types.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <linux/semaphore.h>
-#include <linux/interrupt.h>
-#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-#include <linux/wakelock.h>
-#endif
-
-/* Preprocessor definitions and constants */
-
-/* define for flag */
-#define ADF_OS_LINUX_UNLOCK_BH  1
-
-#ifdef __cplusplus
-extern "C" {
-#endif /* __cplusplus */
-
-/**
- * typedef struct - cdf_mutex_t
- * @m_lock: Mutex lock
- * @cookie: Lock cookie
- * @processID: Process ID to track lock
- * @state: Lock status
- * @refcount: Reference count for recursive lock
- */
-typedef struct cdf_lock_s {
-	struct mutex m_lock;
-	uint32_t cookie;
-	int processID;
-	uint32_t state;
-	uint8_t refcount;
-} cdf_mutex_t;
-
-/**
- * typedef struct - cdf_spinlock_t
- * @spinlock: Spin lock
- * @flags: Lock flag
- * @_flags: Internal lock flag
- */
-typedef struct __cdf_spinlock {
-	spinlock_t spinlock;
-	unsigned int flags;
-	unsigned long _flags;
-} cdf_spinlock_t;
-
-typedef cdf_spinlock_t __cdf_spinlock_t;
-typedef struct semaphore __cdf_semaphore_t;
-
-#if defined CONFIG_CNSS
-typedef struct wakeup_source cdf_wake_lock_t;
-#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
-typedef struct wake_lock cdf_wake_lock_t;
-#else
-typedef int cdf_wake_lock_t;
-#endif
-
-/* Function declarations and documenation */
-
-/**
- * __cdf_semaphore_init() - initialize the semaphore
- * @m: Semaphore object
- *
- * Return: QDF_STATUS_SUCCESS
- */
-static inline QDF_STATUS __cdf_semaphore_init(struct semaphore *m)
-{
-	sema_init(m, 1);
-	return QDF_STATUS_SUCCESS;
-}
-
-/**
- * __cdf_semaphore_acquire() - acquire semaphore
- * @m: Semaphore object
- *
- * Return: 0
- */
-static inline int
-__cdf_semaphore_acquire(qdf_device_t osdev, struct semaphore *m)
-{
-	down(m);
-	return 0;
-}
-
-/**
- * __cdf_semaphore_release() - release semaphore
- * @m: Semaphore object
- *
- * Return: result of UP operation in integer
- */
-static inline void
-__cdf_semaphore_release(qdf_device_t osdev, struct semaphore *m)
-{
-	up(m);
-}
-
-/**
- * __cdf_spinlock_init() - initialize spin lock
- * @lock: Spin lock object
- *
- * Return: QDF_STATUS_SUCCESS
- */
-static inline QDF_STATUS __cdf_spinlock_init(__cdf_spinlock_t *lock)
-{
-	spin_lock_init(&lock->spinlock);
-	lock->flags = 0;
-
-	return QDF_STATUS_SUCCESS;
-}
-
-#define __cdf_spinlock_destroy(lock)
-/**
- * __cdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_lock(__cdf_spinlock_t *lock)
-{
-	spin_lock(&lock->spinlock);
-}
-
-/**
- * __cdf_spin_unlock() - Unlock the spinlock and enables the Preemption
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_unlock(__cdf_spinlock_t *lock)
-{
-	spin_unlock(&lock->spinlock);
-}
-
-/**
- * __cdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
- *				(Preemptive) and disable IRQs
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_lock_irqsave(__cdf_spinlock_t *lock)
-{
-	spin_lock_irqsave(&lock->spinlock, lock->_flags);
-}
-/**
- * __cdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
- *					Preemption and enable IRQ
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_unlock_irqrestore(__cdf_spinlock_t *lock)
-{
-	spin_unlock_irqrestore(&lock->spinlock, lock->_flags);
-}
-
-/*
- * Synchronous versions - only for OS' that have interrupt disable
- */
-#define __cdf_spin_lock_irq(_pLock, _flags)    spin_lock_irqsave(_pLock, _flags)
-#define __cdf_spin_unlock_irq(_pLock, _flags)  spin_unlock_irqrestore(_pLock, _flags)
-
-/**
- * __cdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_lock_bh(__cdf_spinlock_t *lock)
-{
-	if (likely(irqs_disabled() || in_softirq())) {
-		spin_lock(&lock->spinlock);
-	} else {
-		spin_lock_bh(&lock->spinlock);
-		lock->flags |= ADF_OS_LINUX_UNLOCK_BH;
-	}
-
-}
-
-/**
- * __cdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
- * @lock: Lock object
- *
- * Return: none
- */
-static inline void
-__cdf_spin_unlock_bh(__cdf_spinlock_t *lock)
-{
-	if (unlikely(lock->flags & ADF_OS_LINUX_UNLOCK_BH)) {
-		lock->flags &= ~ADF_OS_LINUX_UNLOCK_BH;
-		spin_unlock_bh(&lock->spinlock);
-	} else
-		spin_unlock(&lock->spinlock);
-}
-
-/**
- * __cdf_in_softirq() - in soft irq context
- *
- * Return: true if in softirs context else false
- */
-static inline bool __cdf_in_softirq(void)
-{
-	return in_softirq();
-}
-
-#ifdef __cplusplus
-}
-#endif /* __cplusplus */
-
-#endif /* __I_CDF_LOCK_H */

+ 1 - 1
core/cds/inc/cds_api.h

@@ -40,7 +40,7 @@
 #include <qdf_list.h>
 #include <cdf_trace.h>
 #include <qdf_event.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <cds_reg_service.h>
 #include <cds_mq.h>
 #include <cds_packet.h>

+ 1 - 1
core/cds/inc/cds_crypto.h

@@ -40,7 +40,7 @@
 #include <qdf_list.h>
 #include <cdf_trace.h>
 #include <qdf_event.h>
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <cds_reg_service.h>
 #include <cds_mq.h>
 #include <cds_packet.h>

+ 3 - 3
core/cds/inc/cds_sched.h

@@ -47,7 +47,7 @@
 #endif
 #include <cds_mq.h>
 #include <qdf_types.h>
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 
 #define TX_POST_EVENT_MASK               0x001
 #define TX_SUSPEND_EVENT_MASK            0x002
@@ -283,9 +283,9 @@ typedef struct _cds_context_type {
 	uint32_t driver_debug_log_level;
 	uint32_t fw_debug_log_level;
 	struct cds_log_complete log_complete;
-	cdf_spinlock_t bug_report_lock;
+	qdf_spinlock_t bug_report_lock;
 	qdf_event_t connection_update_done_evt;
-	cdf_mutex_t cdf_conc_list_lock;
+	qdf_mutex_t cdf_conc_list_lock;
 
 } cds_context_type, *p_cds_contextType;
 

+ 6 - 6
core/cds/src/cds_api.c

@@ -1855,7 +1855,7 @@ void cds_init_log_completion(void)
 	/* Attempting to initialize an already initialized lock
 	 * results in a failure. This must be ok here.
 	 */
-	cdf_spinlock_init(&p_cds_context->bug_report_lock);
+	qdf_spinlock_create(&p_cds_context->bug_report_lock);
 }
 
 /**
@@ -1877,7 +1877,7 @@ void cds_deinit_log_completion(void)
 		return;
 	}
 
-	cdf_spinlock_destroy(&p_cds_context->bug_report_lock);
+	qdf_spinlock_destroy(&p_cds_context->bug_report_lock);
 }
 
 /**
@@ -1904,12 +1904,12 @@ QDF_STATUS cds_set_log_completion(uint32_t is_fatal,
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	cdf_spinlock_acquire(&p_cds_context->bug_report_lock);
+	qdf_spinlock_acquire(&p_cds_context->bug_report_lock);
 	p_cds_context->log_complete.is_fatal = is_fatal;
 	p_cds_context->log_complete.indicator = indicator;
 	p_cds_context->log_complete.reason_code = reason_code;
 	p_cds_context->log_complete.is_report_in_progress = true;
-	cdf_spinlock_release(&p_cds_context->bug_report_lock);
+	qdf_spinlock_release(&p_cds_context->bug_report_lock);
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -1936,12 +1936,12 @@ void cds_get_log_completion(uint32_t *is_fatal,
 		return;
 	}
 
-	cdf_spinlock_acquire(&p_cds_context->bug_report_lock);
+	qdf_spinlock_acquire(&p_cds_context->bug_report_lock);
 	*is_fatal =  p_cds_context->log_complete.is_fatal;
 	*indicator = p_cds_context->log_complete.indicator;
 	*reason_code = p_cds_context->log_complete.reason_code;
 	p_cds_context->log_complete.is_report_in_progress = false;
-	cdf_spinlock_release(&p_cds_context->bug_report_lock);
+	qdf_spinlock_release(&p_cds_context->bug_report_lock);
 }
 
 /**

+ 33 - 33
core/cds/src/cds_concurrency.c

@@ -2026,7 +2026,7 @@ bool cds_set_connection_in_progress(bool value)
 		return false;
 	}
 
-	cdf_spin_lock(&hdd_ctx->connection_status_lock);
+	qdf_spin_lock(&hdd_ctx->connection_status_lock);
 	/*
 	 * if the value is set to true previously and if someone is
 	 * trying to make it true again then it could be some race
@@ -2037,7 +2037,7 @@ bool cds_set_connection_in_progress(bool value)
 		status = false;
 	else
 		hdd_ctx->connection_in_progress = value;
-	cdf_spin_unlock(&hdd_ctx->connection_status_lock);
+	qdf_spin_unlock(&hdd_ctx->connection_status_lock);
 	return status;
 }
 
@@ -2203,7 +2203,7 @@ static void cds_update_hw_mode_conn_info(uint32_t num_vdev_mac_entries,
 		return;
 	}
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	for (i = 0; i < num_vdev_mac_entries; i++) {
 		conn_index = 0;
 		found = 0;
@@ -2236,7 +2236,7 @@ static void cds_update_hw_mode_conn_info(uint32_t num_vdev_mac_entries,
 			  conc_connection_list[conn_index].rx_spatial_stream);
 		}
 	}
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 }
 
 /**
@@ -3533,7 +3533,7 @@ void cds_incr_active_session(enum tQDF_ADAPTER_MODE mode,
 	 * Need to aquire mutex as entire functionality in this function
 	 * is in critical section
 	 */
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	switch (mode) {
 	case QDF_STA_MODE:
 	case QDF_P2P_CLIENT_MODE:
@@ -3558,7 +3558,7 @@ void cds_incr_active_session(enum tQDF_ADAPTER_MODE mode,
 		cds_info("Set PCL of STA to FW");
 	}
 	cds_incr_connection_count(session_id);
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 }
 
 /**
@@ -3716,7 +3716,7 @@ void cds_decr_session_set_pcl(enum tQDF_ADAPTER_MODE mode,
 	 * given to the FW. After setting the PCL, we need to restore
 	 * the entry that we have saved before.
 	 */
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	cds_set_pcl_for_existing_combo(CDS_STA_MODE);
 	/* do we need to change the HW mode */
 	if (cds_need_opportunistic_upgrade()) {
@@ -3729,7 +3729,7 @@ void cds_decr_session_set_pcl(enum tQDF_ADAPTER_MODE mode,
 		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
 			cds_err("Failed to start dbs opportunistic timer");
 	}
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 
 	return;
 }
@@ -3768,7 +3768,7 @@ void cds_decr_active_session(enum tQDF_ADAPTER_MODE mode,
 	 * Need to aquire mutex as entire functionality in this function
 	 * is in critical section
 	 */
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	switch (mode) {
 	case QDF_STA_MODE:
 	case QDF_P2P_CLIENT_MODE:
@@ -3784,7 +3784,7 @@ void cds_decr_active_session(enum tQDF_ADAPTER_MODE mode,
 	cds_info("No.# of active sessions for mode %d = %d",
 		mode, hdd_ctx->no_of_active_sessions[mode]);
 	cds_decr_connection_count(session_id);
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 }
 
 /**
@@ -3807,7 +3807,7 @@ void cds_dbs_opportunistic_timer_handler(void *data)
 		return;
 	}
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	/* if we still need it */
 	action = cds_need_opportunistic_upgrade();
 	if (action) {
@@ -3819,7 +3819,7 @@ void cds_dbs_opportunistic_timer_handler(void *data)
 		cds_next_actions(0, action,
 				CDS_UPDATE_REASON_OPPORTUNISTIC);
 	}
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 
 }
 
@@ -3840,7 +3840,7 @@ QDF_STATUS cds_deinit_policy_mgr(void)
 		cds_err("Invalid CDS Context");
 		return QDF_STATUS_E_FAILURE;
 	}
-	if (!QDF_IS_STATUS_SUCCESS(cdf_mutex_destroy(
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_destroy(
 					&cds_ctx->cdf_conc_list_lock))) {
 		cds_err("Failed to destroy cdf_conc_list_lock");
 		return QDF_STATUS_E_FAILURE;
@@ -3879,7 +3879,7 @@ QDF_STATUS cds_init_policy_mgr(void)
 	/* init conc_connection_list */
 	cdf_mem_zero(conc_connection_list, sizeof(conc_connection_list));
 
-	if (!QDF_IS_STATUS_SUCCESS(cdf_mutex_init(
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create(
 					&cds_ctx->cdf_conc_list_lock))) {
 		cds_err("Failed to init cdf_conc_list_lock");
 		/* Lets us not proceed further */
@@ -4093,7 +4093,7 @@ QDF_STATUS cds_update_connection_info(uint32_t vdev_id)
 		return status;
 	}
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
 		if (vdev_id == conc_connection_list[conn_index].vdev_id) {
 			/* debug msg */
@@ -4104,7 +4104,7 @@ QDF_STATUS cds_update_connection_info(uint32_t vdev_id)
 	}
 	if (!found) {
 		/* err msg */
-		cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 		cds_err("can't find vdev_id %d in conc_connection_list",
 			vdev_id);
 		return status;
@@ -4114,7 +4114,7 @@ QDF_STATUS cds_update_connection_info(uint32_t vdev_id)
 
 	if (NULL == wma_conn_table_entry) {
 		/* err msg*/
-		cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 		cds_err("can't find vdev_id %d in WMA table", vdev_id);
 		return status;
 	}
@@ -4129,7 +4129,7 @@ QDF_STATUS cds_update_connection_info(uint32_t vdev_id)
 			wma_conn_table_entry->tx_streams,
 			wma_conn_table_entry->rx_streams,
 			wma_conn_table_entry->nss, vdev_id, true);
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -4878,7 +4878,7 @@ bool cds_allow_concurrency(enum cds_con_mode mode,
 		return status;
 	}
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	/* find the current connection state from conc_connection_list*/
 	num_connections = cds_get_connection_count();
 
@@ -5065,7 +5065,7 @@ bool cds_allow_concurrency(enum cds_con_mode mode,
 	status = true;
 
 done:
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 	return status;
 }
 
@@ -5572,7 +5572,7 @@ QDF_STATUS cds_current_connections_update(uint32_t session_id,
 	else
 		band = CDS_BAND_5;
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	num_connections = cds_get_connection_count();
 
 	cds_debug("num_connections=%d channel=%d",
@@ -5629,7 +5629,7 @@ QDF_STATUS cds_current_connections_update(uint32_t session_id,
 		reason, session_id);
 
 done:
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 	return status;
 }
 
@@ -5721,10 +5721,10 @@ void cds_nss_update_cb(void *context, uint8_t tx_status, uint8_t vdev_id,
 	/*
 	 * Check if we are ok to request for HW mode change now
 	 */
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	conn_index = cds_get_connection_for_vdev_id(vdev_id);
 	if (MAX_NUMBER_OF_CONC_CONNECTIONS == conn_index) {
-		cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 		cds_err("connection not found for vdev %d", vdev_id);
 		return;
 	}
@@ -5747,7 +5747,7 @@ void cds_nss_update_cb(void *context, uint8_t tx_status, uint8_t vdev_id,
 		cds_next_actions(vdev_id,
 				next_action,
 				CDS_UPDATE_REASON_NSS_UPDATE);
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 	return;
 }
 
@@ -7404,7 +7404,7 @@ QDF_STATUS cds_update_connection_info_utfw(
 		return status;
 	}
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	while (CONC_CONNECTION_LIST_VALID_INDEX(conn_index)) {
 		if (vdev_id == conc_connection_list[conn_index].vdev_id) {
 			/* debug msg */
@@ -7415,7 +7415,7 @@ QDF_STATUS cds_update_connection_info_utfw(
 	}
 	if (!found) {
 		/* err msg */
-		cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 		cds_err("can't find vdev_id %d in conc_connection_list",
 			vdev_id);
 		return status;
@@ -7426,7 +7426,7 @@ QDF_STATUS cds_update_connection_info_utfw(
 			cds_get_mode(type, sub_type),
 			channelid, mac_id, chain_mask, tx_streams,
 			rx_streams, 0, vdev_id, true);
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -7453,11 +7453,11 @@ QDF_STATUS cds_incr_connection_count_utfw(
 		return status;
 	}
 
-	cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 	conn_index = cds_get_connection_count();
 	if (MAX_NUMBER_OF_CONC_CONNECTIONS <= conn_index) {
 		/* err msg */
-		cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 		cds_err("exceeded max connection limit %d",
 			MAX_NUMBER_OF_CONC_CONNECTIONS);
 		return status;
@@ -7468,7 +7468,7 @@ QDF_STATUS cds_incr_connection_count_utfw(
 			     cds_get_mode(type, sub_type),
 			     channelid, mac_id, chain_mask, tx_streams,
 			     rx_streams, 0, vdev_id, true);
-	cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+	qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -7492,9 +7492,9 @@ QDF_STATUS cds_decr_connection_count_utfw(uint32_t del_all,
 			return QDF_STATUS_E_FAILURE;
 		}
 	} else {
-		cdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_acquire(&cds_ctx->cdf_conc_list_lock);
 		cds_decr_connection_count(vdev_id);
-		cdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
+		qdf_mutex_release(&cds_ctx->cdf_conc_list_lock);
 	}
 
 	return QDF_STATUS_SUCCESS;

+ 7 - 7
core/cds/src/cds_packet.c

@@ -69,7 +69,7 @@ typedef struct {
 
 cds_pkt_proto_trace_t *trace_buffer = NULL;
 unsigned int trace_buffer_order = 0;
-cdf_spinlock_t trace_buffer_lock;
+qdf_spinlock_t trace_buffer_lock;
 #endif /* QCA_PKT_PROTO_TRACE */
 
 /**
@@ -193,7 +193,7 @@ void cds_pkt_trace_buf_update(char *event_string)
 
 	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_INFO,
 		  "%s %d, %s", __func__, __LINE__, event_string);
-	cdf_spinlock_acquire(&trace_buffer_lock);
+	qdf_spinlock_acquire(&trace_buffer_lock);
 	slot = trace_buffer_order % CDS_PKT_TRAC_MAX_TRACE_BUF;
 	trace_buffer[slot].order = trace_buffer_order;
 	trace_buffer[slot].event_time = cdf_mc_timer_get_system_time();
@@ -204,7 +204,7 @@ void cds_pkt_trace_buf_update(char *event_string)
 		     (CDS_PKT_TRAC_MAX_STRING_LEN < strlen(event_string)) ?
 		     CDS_PKT_TRAC_MAX_STRING_LEN : strlen(event_string));
 	trace_buffer_order++;
-	cdf_spinlock_release(&trace_buffer_lock);
+	qdf_spinlock_release(&trace_buffer_lock);
 
 	return;
 }
@@ -217,7 +217,7 @@ void cds_pkt_trace_buf_dump(void)
 {
 	uint32_t slot, idx;
 
-	cdf_spinlock_acquire(&trace_buffer_lock);
+	qdf_spinlock_acquire(&trace_buffer_lock);
 	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
 		  "PACKET TRACE DUMP START Current Timestamp %u",
 		  (unsigned int)cdf_mc_timer_get_system_time());
@@ -245,7 +245,7 @@ void cds_pkt_trace_buf_dump(void)
 
 	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
 		  "PACKET TRACE DUMP END");
-	cdf_spinlock_release(&trace_buffer_lock);
+	qdf_spinlock_release(&trace_buffer_lock);
 
 	return;
 }
@@ -257,7 +257,7 @@ void cds_pkt_trace_buf_dump(void)
 void cds_pkt_proto_trace_init(void)
 {
 	/* Init spin lock to protect global memory */
-	cdf_spinlock_init(&trace_buffer_lock);
+	qdf_spinlock_create(&trace_buffer_lock);
 	trace_buffer_order = 0;
 
 	trace_buffer = cdf_mem_malloc(CDS_PKT_TRAC_MAX_TRACE_BUF *
@@ -278,7 +278,7 @@ void cds_pkt_proto_trace_close(void)
 	CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_ERROR,
 		  "%s %d", __func__, __LINE__);
 	cdf_mem_free(trace_buffer);
-	cdf_spinlock_destroy(&trace_buffer_lock);
+	qdf_spinlock_destroy(&trace_buffer_lock);
 
 	return;
 }

+ 5 - 5
core/dp/htt/htt_internal.h

@@ -362,16 +362,16 @@ static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
 #endif
 
 #define HTT_TX_MUTEX_INIT(_mutex)			\
-	cdf_spinlock_init(_mutex)
+	qdf_spinlock_create(_mutex)
 
 #define HTT_TX_MUTEX_ACQUIRE(_mutex)			\
-	cdf_spin_lock_bh(_mutex)
+	qdf_spin_lock_bh(_mutex)
 
 #define HTT_TX_MUTEX_RELEASE(_mutex)			\
-	cdf_spin_unlock_bh(_mutex)
+	qdf_spin_unlock_bh(_mutex)
 
 #define HTT_TX_MUTEX_DESTROY(_mutex)			\
-	cdf_spinlock_destroy(_mutex)
+	qdf_spinlock_destroy(_mutex)
 
 #define HTT_TX_DESC_PADDR(_pdev, _tx_desc_vaddr)       \
 	((_pdev)->tx_descs.pool_paddr +  (uint32_t)	  \
@@ -381,7 +381,7 @@ static inline void htt_print_rx_desc(struct htt_host_rx_desc_base *rx_desc)
 #ifdef ATH_11AC_TXCOMPACT
 
 #define HTT_TX_NBUF_QUEUE_MUTEX_INIT(_pdev)		\
-	cdf_spinlock_init(&_pdev->txnbufq_mutex)
+	qdf_spinlock_create(&_pdev->txnbufq_mutex)
 
 #define HTT_TX_NBUF_QUEUE_MUTEX_DESTROY(_pdev)	       \
 	HTT_TX_MUTEX_DESTROY(&_pdev->txnbufq_mutex)

+ 4 - 4
core/dp/htt/htt_t2h.c

@@ -143,7 +143,7 @@ void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
 	switch (msg_type) {
 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
 	{
-		cdf_runtime_pm_put();
+		qdf_runtime_pm_put();
 		pdev->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
 		pdev->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
 		qdf_print
@@ -290,7 +290,7 @@ void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
 			ol_tx_single_completion_handler(pdev->txrx_pdev,
 							compl_msg->status,
 							compl_msg->desc_id);
-			cdf_runtime_pm_put();
+			qdf_runtime_pm_put();
 			HTT_TX_SCHED(pdev);
 		} else {
 			qdf_print("Ignoring HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND indication");
@@ -306,7 +306,7 @@ void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
 		cookie |= ((uint64_t) (*(msg_word + 2))) << 32;
 
 		stats_info_list = (uint8_t *) (msg_word + 3);
-		cdf_runtime_pm_put();
+		qdf_runtime_pm_put();
 		ol_txrx_fw_stats_handler(pdev->txrx_pdev, cookie,
 					 stats_info_list);
 		break;
@@ -362,7 +362,7 @@ void htt_t2h_lp_msg_handler(void *context, cdf_nbuf_t htt_t2h_msg)
 		uint8_t *op_msg_buffer;
 		uint8_t *msg_start_ptr;
 
-		cdf_runtime_pm_put();
+		qdf_runtime_pm_put();
 		msg_start_ptr = (uint8_t *) msg_word;
 		op_code =
 			HTT_WDI_IPA_OP_RESPONSE_OP_CODE_GET(*msg_word);

+ 2 - 2
core/dp/htt/htt_types.h

@@ -30,7 +30,7 @@
 
 #include <osdep.h>              /* uint16_t, dma_addr_t */
 #include <qdf_types.h>          /* qdf_device_t */
-#include <cdf_lock.h>           /* cdf_spinlock_t */
+#include <qdf_lock.h>           /* qdf_spinlock_t */
 #include <qdf_timer.h>		/* qdf_timer_t */
 #include <qdf_atomic.h>         /* qdf_atomic_inc */
 #include <cdf_nbuf.h>           /* cdf_nbuf_t */
@@ -41,7 +41,7 @@
 
 #define DEBUG_DMA_DONE
 
-#define HTT_TX_MUTEX_TYPE cdf_spinlock_t
+#define HTT_TX_MUTEX_TYPE qdf_spinlock_t
 
 #ifdef QCA_TX_HTT2_SUPPORT
 #ifndef HTC_TX_HTT2_MAX_SIZE

+ 2 - 2
core/dp/ol/inc/ol_txrx_dbg.h

@@ -33,7 +33,7 @@
 #define _OL_TXRX_DBG__H_
 
 #include <athdefs.h>            /* A_STATUS, uint64_t */
-#include <cdf_lock.h>           /* cdf_semaphore_t */
+#include <qdf_lock.h>           /* qdf_semaphore_t */
 #include <htt.h>                /* htt_dbg_stats_type */
 #include <ol_txrx_stats.h>      /* ol_txrx_stats */
 
@@ -70,7 +70,7 @@ struct ol_txrx_stats_req {
 	 */
 	struct {
 		int blocking;
-		cdf_semaphore_t *sem_ptr;
+		qdf_semaphore_t *sem_ptr;
 	} wait;
 };
 

+ 2 - 2
core/dp/txrx/ol_rx_reorder_timeout.c

@@ -136,7 +136,7 @@ static void ol_rx_reorder_timeout(void *arg)
 	time_now_ms = qdf_system_ticks_to_msecs(qdf_system_ticks());
 
 	pdev = rx_reorder_timeout_ac->pdev;
-	cdf_spin_lock(&pdev->rx.mutex);
+	qdf_spin_lock(&pdev->rx.mutex);
 /* TODO: conditionally take mutex lock during regular rx */
 	TAILQ_FOREACH_SAFE(list_elem,
 			   &rx_reorder_timeout_ac->virtual_timer_list,
@@ -165,7 +165,7 @@ static void ol_rx_reorder_timeout(void *arg)
 	if (!TAILQ_EMPTY(&rx_reorder_timeout_ac->virtual_timer_list))
 		ol_rx_reorder_timeout_start(rx_reorder_timeout_ac, time_now_ms);
 
-	cdf_spin_unlock(&pdev->rx.mutex);
+	qdf_spin_unlock(&pdev->rx.mutex);
 }
 
 void ol_rx_reorder_timeout_init(struct ol_txrx_pdev_t *pdev)

+ 3 - 3
core/dp/txrx/ol_rx_reorder_timeout.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -46,9 +46,9 @@ void ol_rx_reorder_timeout_peer_cleanup(struct ol_txrx_peer_t *peer);
 #define OL_RX_REORDER_TIMEOUT_PEER_TID_INIT(peer, tid) \
 	(peer)->tids_rx_reorder[(tid)].timeout.active = 0
 #define OL_RX_REORDER_TIMEOUT_MUTEX_LOCK(pdev) \
-	cdf_spin_lock(&(pdev)->rx.mutex)
+	qdf_spin_lock(&(pdev)->rx.mutex)
 #define OL_RX_REORDER_TIMEOUT_MUTEX_UNLOCK(pdev) \
-	cdf_spin_unlock(&(pdev)->rx.mutex)
+	qdf_spin_unlock(&(pdev)->rx.mutex)
 
 #else
 

+ 16 - 16
core/dp/txrx/ol_tx.c

@@ -757,9 +757,9 @@ static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
 {
 	int max_to_accept;
 
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	if (vdev->ll_pause.paused_reason) {
-		cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 		return;
 	}
 
@@ -814,14 +814,14 @@ static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
 			vdev->ll_pause.q_overflow_cnt++;
 	}
 
-	cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 }
 
 static cdf_nbuf_t
 ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
 			      cdf_nbuf_t msdu_list, uint8_t start_timer)
 {
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	while (msdu_list &&
 	       vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
 		cdf_nbuf_t next = cdf_nbuf_next(msdu_list);
@@ -851,7 +851,7 @@ ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
 					OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
 		vdev->ll_pause.is_q_timer_on = true;
 	}
-	cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 
 	return msdu_list;
 }
@@ -942,17 +942,17 @@ void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
 		more = 0;
 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
 
-			cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+			qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 			if (vdev->ll_pause.txq.depth) {
 				if (vdev->ll_pause.paused_reason) {
-					cdf_spin_unlock_bh(&vdev->ll_pause.
+					qdf_spin_unlock_bh(&vdev->ll_pause.
 							   mutex);
 					continue;
 				}
 
 				tx_msdu = vdev->ll_pause.txq.head;
 				if (NULL == tx_msdu) {
-					cdf_spin_unlock_bh(&vdev->ll_pause.
+					qdf_spin_unlock_bh(&vdev->ll_pause.
 							   mutex);
 					continue;
 				}
@@ -987,22 +987,22 @@ void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
 			/*check if there are more msdus to transmit */
 			if (vdev->ll_pause.txq.depth)
 				more = 1;
-			cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 		}
 	} while (more && max_to_send);
 
 	vdev = NULL;
 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
-		cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+		qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 		if (vdev->ll_pause.txq.depth) {
 			qdf_timer_stop(&pdev->tx_throttle.tx_timer);
 			qdf_timer_start(
 				&pdev->tx_throttle.tx_timer,
 				OL_TX_VDEV_PAUSE_QUEUE_SEND_PERIOD_MS);
-			cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 			return;
 		}
-		cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 	}
 }
 
@@ -1376,7 +1376,7 @@ void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
 		c_element->next = NULL;
 	}
 	pdev->tso_seg_pool.pool_size = num_seg;
-	cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spinlock_create(&pdev->tso_seg_pool.tso_mutex);
 }
 
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
@@ -1385,7 +1385,7 @@ void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 	struct cdf_tso_seg_elem_t *c_element;
 	struct cdf_tso_seg_elem_t *temp;
 
-	cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
 	c_element = pdev->tso_seg_pool.freelist;
 	for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
 		temp = c_element->next;
@@ -1398,7 +1398,7 @@ void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 	pdev->tso_seg_pool.freelist = NULL;
 	pdev->tso_seg_pool.num_free = 0;
 	pdev->tso_seg_pool.pool_size = 0;
-	cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
-	cdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spinlock_destroy(&pdev->tso_seg_pool.tso_mutex);
 }
 #endif /* FEATURE_TSO */

+ 1 - 1
core/dp/txrx/ol_tx.h

@@ -33,7 +33,7 @@
 #define _OL_TX__H_
 
 #include <cdf_nbuf.h>           /* cdf_nbuf_t */
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include <ol_txrx_api.h>        /* ol_txrx_vdev_handle */
 
 #include <ol_txrx_types.h>      /* ol_tx_desc_t, ol_txrx_msdu_info_t */

+ 16 - 16
core/dp/txrx/ol_tx_desc.c

@@ -28,7 +28,7 @@
 #include <cdf_net_types.h>      /* CDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
 #include <cdf_nbuf.h>           /* cdf_nbuf_t, etc. */
 #include <cdf_util.h>           /* cdf_assert */
-#include <cdf_lock.h>           /* cdf_spinlock */
+#include <qdf_lock.h>           /* cdf_spinlock */
 #ifdef QCA_COMPUTE_TX_DELAY
 #include <qdf_time.h>           /* qdf_system_ticks */
 #endif
@@ -120,13 +120,13 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 {
 	struct ol_tx_desc_t *tx_desc = NULL;
 
-	cdf_spin_lock_bh(&pdev->tx_mutex);
+	qdf_spin_lock_bh(&pdev->tx_mutex);
 	if (pdev->tx_desc.freelist) {
 		tx_desc = ol_tx_get_desc_global_pool(pdev);
 		ol_tx_desc_sanity_checks(pdev, tx_desc);
 		ol_tx_desc_compute_delay(tx_desc);
 	}
-	cdf_spin_unlock_bh(&pdev->tx_mutex);
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
 	return tx_desc;
 }
 
@@ -163,23 +163,23 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 	struct ol_tx_desc_t *tx_desc = NULL;
 
 	if (pool) {
-		cdf_spin_lock_bh(&pool->flow_pool_lock);
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
 		if (pool->avail_desc) {
 			tx_desc = ol_tx_get_desc_flow_pool(pool);
 			if (cdf_unlikely(pool->avail_desc < pool->stop_th)) {
 				pool->status = FLOW_POOL_ACTIVE_PAUSED;
-				cdf_spin_unlock_bh(&pool->flow_pool_lock);
+				qdf_spin_unlock_bh(&pool->flow_pool_lock);
 				/* pause network queues */
 				pdev->pause_cb(vdev->vdev_id,
 					       WLAN_STOP_ALL_NETIF_QUEUE,
 					       WLAN_DATA_FLOW_CONTROL);
 			} else {
-				cdf_spin_unlock_bh(&pool->flow_pool_lock);
+				qdf_spin_unlock_bh(&pool->flow_pool_lock);
 			}
 			ol_tx_desc_sanity_checks(pdev, tx_desc);
 			ol_tx_desc_compute_delay(tx_desc);
 		} else {
-			cdf_spin_unlock_bh(&pool->flow_pool_lock);
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
 			pdev->pool_stats.pkt_drop_no_desc++;
 		}
 	} else {
@@ -229,7 +229,7 @@ ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
  */
 void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 {
-	cdf_spin_lock_bh(&pdev->tx_mutex);
+	qdf_spin_lock_bh(&pdev->tx_mutex);
 
 	if (tx_desc->pkt_type == ol_tx_frm_tso) {
 		if (cdf_unlikely(tx_desc->tso_desc == NULL)) {
@@ -244,7 +244,7 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 	ol_tx_desc_reset_timestamp(tx_desc);
 
 	ol_tx_put_desc_global_pool(pdev, tx_desc);
-	cdf_spin_unlock_bh(&pdev->tx_mutex);
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
 }
 
 #else
@@ -271,7 +271,7 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 	ol_tx_desc_reset_pkt_type(tx_desc);
 	ol_tx_desc_reset_timestamp(tx_desc);
 
-	cdf_spin_lock_bh(&pool->flow_pool_lock);
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
 	ol_tx_put_desc_flow_pool(pool, tx_desc);
 	switch (pool->status) {
 	case FLOW_POOL_ACTIVE_PAUSED:
@@ -284,7 +284,7 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 		break;
 	case FLOW_POOL_INVALID:
 		if (pool->avail_desc == pool->flow_pool_size) {
-			cdf_spin_unlock_bh(&pool->flow_pool_lock);
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
 			ol_tx_free_invalid_flow_pool(pool);
 			qdf_print("%s %d pool is INVALID State!!\n",
 				 __func__, __LINE__);
@@ -298,7 +298,7 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 				 __func__, __LINE__);
 		break;
 	};
-	cdf_spin_unlock_bh(&pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 }
 #endif
@@ -550,13 +550,13 @@ struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
 {
 	struct cdf_tso_seg_elem_t *tso_seg = NULL;
 
-	cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
 	if (pdev->tso_seg_pool.freelist) {
 		pdev->tso_seg_pool.num_free--;
 		tso_seg = pdev->tso_seg_pool.freelist;
 		pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
 	}
-	cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
 
 	return tso_seg;
 }
@@ -576,10 +576,10 @@ struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
 void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
 	 struct cdf_tso_seg_elem_t *tso_seg)
 {
-	cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
 	tso_seg->next = pdev->tso_seg_pool.freelist;
 	pdev->tso_seg_pool.freelist = tso_seg;
 	pdev->tso_seg_pool.num_free++;
-	cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
 }
 #endif

+ 9 - 9
core/dp/txrx/ol_tx_queue.c

@@ -49,11 +49,11 @@ void ol_txrx_vdev_pause(ol_txrx_vdev_handle vdev, uint32_t reason)
 	/* acquire the mutex lock, since we'll be modifying the queues */
 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
 
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	vdev->ll_pause.paused_reason |= reason;
 	vdev->ll_pause.q_pause_cnt++;
 	vdev->ll_pause.is_q_paused = true;
-	cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 
 	DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_PAUSE,
 				NULL, 0));
@@ -66,19 +66,19 @@ void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
 	/* acquire the mutex lock, since we'll be modifying the queues */
 	TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
 
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	if (vdev->ll_pause.paused_reason & reason) {
 		vdev->ll_pause.paused_reason &= ~reason;
 		if (!vdev->ll_pause.paused_reason) {
 			vdev->ll_pause.is_q_paused = false;
 			vdev->ll_pause.q_unpause_cnt++;
-			cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 			ol_tx_vdev_ll_pause_queue_send(vdev);
 		} else {
-			cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+			qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 		}
 	} else {
-		cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+		qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 	}
 	DPTRACE(cdf_dp_trace(NULL, CDF_DP_TRACE_VDEV_UNPAUSE,
 				NULL, 0));
@@ -87,7 +87,7 @@ void ol_txrx_vdev_unpause(ol_txrx_vdev_handle vdev, uint32_t reason)
 
 void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
 {
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	qdf_timer_stop(&vdev->ll_pause.timer);
 	vdev->ll_pause.is_q_timer_on = false;
 	while (vdev->ll_pause.txq.head) {
@@ -103,7 +103,7 @@ void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
 	}
 	vdev->ll_pause.txq.tail = NULL;
 	vdev->ll_pause.txq.depth = 0;
-	cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 }
 
 #endif /* defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) */
@@ -405,7 +405,7 @@ void ol_tx_throttle_init(struct ol_txrx_pdev_t *pdev)
 
 	pdev->tx_throttle.current_throttle_level = THROTTLE_LEVEL_0;
 	pdev->tx_throttle.current_throttle_phase = THROTTLE_PHASE_OFF;
-	cdf_spinlock_init(&pdev->tx_throttle.mutex);
+	qdf_spinlock_create(&pdev->tx_throttle.mutex);
 
 	throttle_period = ol_cfg_throttle_period_ms(pdev->ctrl_pdev);
 

+ 15 - 15
core/dp/txrx/ol_tx_send.c

@@ -26,7 +26,7 @@
  */
 
 #include <qdf_atomic.h>         /* qdf_atomic_inc, etc. */
-#include <cdf_lock.h>           /* cdf_os_spinlock */
+#include <qdf_lock.h>           /* cdf_os_spinlock */
 #include <qdf_time.h>           /* qdf_system_ticks, etc. */
 #include <cdf_nbuf.h>           /* cdf_nbuf_t */
 #include <cdf_net_types.h>      /* ADF_NBUF_TX_EXT_TID_INVALID */
@@ -114,15 +114,15 @@
 		TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {	\
 			if (qdf_atomic_read(&vdev->os_q_paused) &&	\
 			    (vdev->tx_fl_hwm != 0)) {			\
-				cdf_spin_lock(&pdev->tx_mutex);		\
+				qdf_spin_lock(&pdev->tx_mutex);		\
 				if (pdev->tx_desc.num_free >		\
 				    vdev->tx_fl_hwm) {			\
 					qdf_atomic_set(&vdev->os_q_paused, 0); \
-					cdf_spin_unlock(&pdev->tx_mutex); \
+					qdf_spin_unlock(&pdev->tx_mutex); \
 					ol_txrx_flow_control_cb(vdev, true);\
 				}					\
 				else {					\
-					cdf_spin_unlock(&pdev->tx_mutex); \
+					qdf_spin_unlock(&pdev->tx_mutex); \
 				}					\
 			}						\
 		}							\
@@ -504,7 +504,7 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 		tx_desc->status = status;
 		netbuf = tx_desc->netbuf;
 
-		cdf_runtime_pm_put();
+		qdf_runtime_pm_put();
 		cdf_nbuf_trace_update(netbuf, trace_str);
 		/* Per SDU update of byte count */
 		byte_cnt += cdf_nbuf_len(netbuf);
@@ -529,11 +529,11 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 
 	/* One shot protected access to pdev freelist, when setup */
 	if (lcl_freelist) {
-		cdf_spin_lock(&pdev->tx_mutex);
+		qdf_spin_lock(&pdev->tx_mutex);
 		tx_desc_last->next = pdev->tx_desc.freelist;
 		pdev->tx_desc.freelist = lcl_freelist;
 		pdev->tx_desc.num_free += (uint16_t) num_msdus;
-		cdf_spin_unlock(&pdev->tx_mutex);
+		qdf_spin_unlock(&pdev->tx_mutex);
 	} else {
 		ol_tx_desc_frame_list_free(pdev, &tx_descs,
 					   status != htt_tx_status_ok);
@@ -634,10 +634,10 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
 	}
 
 	if (lcl_freelist) {
-		cdf_spin_lock(&pdev->tx_mutex);
+		qdf_spin_lock(&pdev->tx_mutex);
 		tx_desc_last->next = pdev->tx_desc.freelist;
 		pdev->tx_desc.freelist = lcl_freelist;
-		cdf_spin_unlock(&pdev->tx_mutex);
+		qdf_spin_unlock(&pdev->tx_mutex);
 	} else {
 		ol_tx_desc_frame_list_free(pdev, &tx_descs,
 					   htt_tx_status_discard);
@@ -697,7 +697,7 @@ ol_tx_delay(ol_txrx_pdev_handle pdev,
 
 	cdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
 
-	cdf_spin_lock_bh(&pdev->tx_delay.mutex);
+	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
 	index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
 
 	data = &pdev->tx_delay.cats[category].copies[index];
@@ -729,7 +729,7 @@ ol_tx_delay(ol_txrx_pdev_handle pdev,
 		*queue_delay_microsec = 0;
 	}
 
-	cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
+	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
 }
 
 void
@@ -741,7 +741,7 @@ ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
 
 	cdf_assert(category >= 0 && category < QCA_TX_DELAY_NUM_CATEGORIES);
 
-	cdf_spin_lock_bh(&pdev->tx_delay.mutex);
+	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
 	index = 1 - pdev->tx_delay.cats[category].in_progress_idx;
 
 	data = &pdev->tx_delay.cats[category].copies[index];
@@ -755,7 +755,7 @@ ol_tx_delay_hist(ol_txrx_pdev_handle pdev,
 	}
 	report_bin_values[i] = data->hist_bins_queue[j];        /* overflow */
 
-	cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
+	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
 }
 
 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
@@ -917,7 +917,7 @@ ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
 	}
 
 	/* since we may switch the ping-pong index, provide mutex w. readers */
-	cdf_spin_lock_bh(&pdev->tx_delay.mutex);
+	qdf_spin_lock_bh(&pdev->tx_delay.mutex);
 	index = pdev->tx_delay.cats[cat].in_progress_idx;
 
 	data = &pdev->tx_delay.cats[cat].copies[index];
@@ -962,7 +962,7 @@ ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
 			     sizeof(pdev->tx_delay.cats[cat].copies[index]));
 	}
 
-	cdf_spin_unlock_bh(&pdev->tx_delay.mutex);
+	qdf_spin_unlock_bh(&pdev->tx_delay.mutex);
 }
 
 #endif /* QCA_COMPUTE_TX_DELAY */

+ 90 - 91
core/dp/txrx/ol_txrx.c

@@ -30,7 +30,7 @@
 #include <osdep.h>              /* uint32_t, etc. */
 #include <cdf_memory.h>         /* cdf_mem_malloc,free */
 #include <qdf_types.h>          /* qdf_device_t, qdf_print */
-#include <cdf_lock.h>           /* cdf_spinlock */
+#include <qdf_lock.h>           /* cdf_spinlock */
 #include <qdf_atomic.h>         /* qdf_atomic_read */
 
 /* Required for WLAN_FEATURE_FASTPATH */
@@ -199,9 +199,9 @@ ol_txrx_peer_find_by_local_id(struct ol_txrx_pdev_t *pdev,
 		return NULL;
 	}
 
-	cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
+	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
 	peer = pdev->local_peer_ids.map[local_peer_id];
-	cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
+	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
 	return peer;
 }
 
@@ -222,7 +222,7 @@ static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
 	pdev->local_peer_ids.pool[i] = i;
 
-	cdf_spinlock_init(&pdev->local_peer_ids.lock);
+	qdf_spinlock_create(&pdev->local_peer_ids.lock);
 }
 
 static void
@@ -231,7 +231,7 @@ ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
 {
 	int i;
 
-	cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
+	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
 	i = pdev->local_peer_ids.freelist;
 	if (pdev->local_peer_ids.pool[i] == i) {
 		/* the list is empty, except for the list-end marker */
@@ -242,7 +242,7 @@ ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
 		pdev->local_peer_ids.map[i] = peer;
 	}
-	cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
+	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
 }
 
 static void
@@ -255,16 +255,16 @@ ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
 		return;
 	}
 	/* put this ID on the head of the freelist */
-	cdf_spin_lock_bh(&pdev->local_peer_ids.lock);
+	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
 	pdev->local_peer_ids.freelist = i;
 	pdev->local_peer_ids.map[i] = NULL;
-	cdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
+	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
 }
 
 static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
 {
-	cdf_spinlock_destroy(&pdev->local_peer_ids.lock);
+	qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
 }
 
 #else
@@ -349,14 +349,14 @@ uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
 	uint32_t free_desc;
 
 	free_desc = pdev->tx_desc.num_free;
-	cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
 					 flow_pool_list_elem) {
-		cdf_spin_lock_bh(&pool->flow_pool_lock);
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
 		free_desc += pool->avail_desc;
-		cdf_spin_unlock_bh(&pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
 	}
-	cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 
 	return free_desc;
 }
@@ -802,10 +802,10 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 	}
 
 	/* initialize mutexes for tx desc alloc and peer lookup */
-	cdf_spinlock_init(&pdev->tx_mutex);
-	cdf_spinlock_init(&pdev->peer_ref_mutex);
-	cdf_spinlock_init(&pdev->rx.mutex);
-	cdf_spinlock_init(&pdev->last_real_peer_mutex);
+	qdf_spinlock_create(&pdev->tx_mutex);
+	qdf_spinlock_create(&pdev->peer_ref_mutex);
+	qdf_spinlock_create(&pdev->rx.mutex);
+	qdf_spinlock_create(&pdev->last_real_peer_mutex);
 	OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
 
 	if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
@@ -871,7 +871,7 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 
 #ifdef QCA_COMPUTE_TX_DELAY
 	cdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
-	cdf_spinlock_init(&pdev->tx_delay.mutex);
+	qdf_spinlock_create(&pdev->tx_delay.mutex);
 
 	/* initialize compute interval with 5 seconds (ESE default) */
 	pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
@@ -928,10 +928,10 @@ pn_trace_attach_fail:
 	OL_RX_REORDER_TRACE_DETACH(pdev);
 
 reorder_trace_attach_fail:
-	cdf_spinlock_destroy(&pdev->tx_mutex);
-	cdf_spinlock_destroy(&pdev->peer_ref_mutex);
-	cdf_spinlock_destroy(&pdev->rx.mutex);
-	cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
+	qdf_spinlock_destroy(&pdev->tx_mutex);
+	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
+	qdf_spinlock_destroy(&pdev->rx.mutex);
+	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
 
 control_init_fail:
@@ -1045,13 +1045,13 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 
 	ol_txrx_peer_find_detach(pdev);
 
-	cdf_spinlock_destroy(&pdev->tx_mutex);
-	cdf_spinlock_destroy(&pdev->peer_ref_mutex);
-	cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
-	cdf_spinlock_destroy(&pdev->rx.mutex);
+	qdf_spinlock_destroy(&pdev->tx_mutex);
+	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
+	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
+	qdf_spinlock_destroy(&pdev->rx.mutex);
 #ifdef QCA_SUPPORT_TX_THROTTLE
 	/* Thermal Mitigation */
-	cdf_spinlock_destroy(&pdev->tx_throttle.mutex);
+	qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
 #endif
 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
 
@@ -1064,7 +1064,7 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 	ol_txrx_local_peer_id_cleanup(pdev);
 
 #ifdef QCA_COMPUTE_TX_DELAY
-	cdf_spinlock_destroy(&pdev->tx_delay.mutex);
+	qdf_spinlock_destroy(&pdev->tx_delay.mutex);
 #endif
 }
 
@@ -1104,7 +1104,7 @@ ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
 	vdev->ibss_peer_heart_beat_timer = 0;
 #endif
 
-	cdf_spinlock_init(&vdev->ll_pause.mutex);
+	qdf_spinlock_create(&vdev->ll_pause.mutex);
 	vdev->ll_pause.paused_reason = 0;
 	vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
 	vdev->ll_pause.txq.depth = 0;
@@ -1117,7 +1117,7 @@ ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
 	vdev->tx_fl_lwm = 0;
 	vdev->tx_fl_hwm = 0;
 	vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
-	cdf_spinlock_init(&vdev->flow_control_lock);
+	qdf_spinlock_create(&vdev->flow_control_lock);
 	vdev->osif_flow_control_cb = NULL;
 	vdev->osif_fc_ctx = NULL;
 
@@ -1185,7 +1185,7 @@ ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
 	/* preconditions */
 	TXRX_ASSERT2(vdev);
 
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	qdf_timer_stop(&vdev->ll_pause.timer);
 	qdf_timer_free(&vdev->ll_pause.timer);
 	vdev->ll_pause.is_q_timer_on = false;
@@ -1197,14 +1197,14 @@ ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
 		cdf_nbuf_tx_free(vdev->ll_pause.txq.head, NBUF_PKT_ERROR);
 		vdev->ll_pause.txq.head = next;
 	}
-	cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
-	cdf_spinlock_destroy(&vdev->ll_pause.mutex);
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	qdf_spinlock_destroy(&vdev->ll_pause.mutex);
 
-	cdf_spin_lock_bh(&vdev->flow_control_lock);
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
 	vdev->osif_flow_control_cb = NULL;
 	vdev->osif_fc_ctx = NULL;
-	cdf_spin_unlock_bh(&vdev->flow_control_lock);
-	cdf_spinlock_destroy(&vdev->flow_control_lock);
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
+	qdf_spinlock_destroy(&vdev->flow_control_lock);
 
 	/* remove the vdev from its parent pdev's list */
 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
@@ -1213,7 +1213,7 @@ ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
 	 * Use peer_ref_mutex while accessing peer_list, in case
 	 * a peer is in the process of being removed from the list.
 	 */
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	/* check that the vdev has no peers allocated */
 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
 		/* debug print - will be removed later */
@@ -1228,10 +1228,10 @@ ol_txrx_vdev_detach(ol_txrx_vdev_handle vdev,
 		vdev->delete.pending = 1;
 		vdev->delete.callback = callback;
 		vdev->delete.context = context;
-		cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 		return;
 	}
-	cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 
 	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 		   "%s: deleting vdev obj %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
@@ -1273,19 +1273,19 @@ void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
 	}
 
 	cdf_assert(cds_ctx);
-	cdf_spin_lock_bh(&peer->peer_info_lock);
+	qdf_spin_lock_bh(&peer->peer_info_lock);
 	if (peer->state >= ol_txrx_peer_state_conn)
 		data_rx = peer->osif_rx;
 	else
 		drop = true;
-	cdf_spin_unlock_bh(&peer->peer_info_lock);
+	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
-	cdf_spin_lock_bh(&peer->bufq_lock);
+	qdf_spin_lock_bh(&peer->bufq_lock);
 	cache_buf = list_entry((&peer->cached_bufq)->next,
 				typeof(*cache_buf), list);
 	while (!list_empty(&peer->cached_bufq)) {
 		list_del(&cache_buf->list);
-		cdf_spin_unlock_bh(&peer->bufq_lock);
+		qdf_spin_unlock_bh(&peer->bufq_lock);
 		if (drop) {
 			cdf_nbuf_free(cache_buf->buf);
 		} else {
@@ -1295,11 +1295,11 @@ void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
 				cdf_nbuf_free(cache_buf->buf);
 		}
 		cdf_mem_free(cache_buf);
-		cdf_spin_lock_bh(&peer->bufq_lock);
+		qdf_spin_lock_bh(&peer->bufq_lock);
 		cache_buf = list_entry((&peer->cached_bufq)->next,
 				typeof(*cache_buf), list);
 	}
-	cdf_spin_unlock_bh(&peer->bufq_lock);
+	qdf_spin_unlock_bh(&peer->bufq_lock);
 	qdf_atomic_dec(&peer->flush_in_progress);
 }
 
@@ -1319,7 +1319,7 @@ ol_txrx_peer_attach(ol_txrx_pdev_handle pdev,
 	TXRX_ASSERT2(vdev);
 	TXRX_ASSERT2(peer_mac_addr);
 
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	/* check for duplicate exsisting peer */
 	TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
 		if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
@@ -1335,12 +1335,12 @@ ol_txrx_peer_attach(ol_txrx_pdev_handle pdev,
 				qdf_event_create(&vdev->wait_delete_comp);
 				wait_on_deletion = true;
 			} else {
-				cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 				return NULL;
 			}
 		}
 	}
-	cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 
 	if (wait_on_deletion) {
 		/* wait for peer deletion */
@@ -1366,10 +1366,10 @@ ol_txrx_peer_attach(ol_txrx_pdev_handle pdev,
 		     OL_TXRX_MAC_ADDR_LEN);
 
 	INIT_LIST_HEAD(&peer->cached_bufq);
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	/* add this peer into the vdev's list */
 	TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
-	cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 	/* check whether this is a real peer (peer mac addr != vdev mac addr) */
 	if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
 		vdev->last_real_peer = peer;
@@ -1384,8 +1384,8 @@ ol_txrx_peer_attach(ol_txrx_pdev_handle pdev,
 
 
 	peer->osif_rx = NULL;
-	cdf_spinlock_init(&peer->peer_info_lock);
-	cdf_spinlock_init(&peer->bufq_lock);
+	qdf_spinlock_create(&peer->peer_info_lock);
+	qdf_spinlock_create(&peer->bufq_lock);
 
 	qdf_atomic_init(&peer->delete_in_progress);
 	qdf_atomic_init(&peer->flush_in_progress);
@@ -1700,7 +1700,7 @@ void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
 	 * vdev's list of peers is empty, to make sure that list is not modified
 	 * concurrently with the empty check.
 	 */
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
 		u_int16_t peer_id;
 
@@ -1749,7 +1749,7 @@ void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
 				 * Now that there are no references to the peer,
 				 * we can release the peer reference lock.
 				 */
-				cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 
 				TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 					   "%s: deleting vdev object %p "
@@ -1767,10 +1767,10 @@ void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
 				if (vdev_delete_cb)
 					vdev_delete_cb(vdev_delete_context);
 			} else {
-				cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 			}
 		} else {
-			cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 		}
 
 		/*
@@ -1795,7 +1795,7 @@ void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
 
 		cdf_mem_free(peer);
 	} else {
-		cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 	}
 }
 
@@ -1824,14 +1824,14 @@ void ol_txrx_peer_detach(ol_txrx_peer_handle peer)
 	if (peer->vdev->last_real_peer == peer)
 		peer->vdev->last_real_peer = NULL;
 
-	cdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
+	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
 	if (vdev->last_real_peer == peer)
 		vdev->last_real_peer = NULL;
-	cdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
+	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
 	htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
 
-	cdf_spinlock_destroy(&peer->peer_info_lock);
-	cdf_spinlock_destroy(&peer->bufq_lock);
+	qdf_spinlock_destroy(&peer->peer_info_lock);
+	qdf_spinlock_destroy(&peer->bufq_lock);
 	/* set delete_in_progress to identify that wma
 	 * is waiting for unmap massage for this peer */
 	qdf_atomic_set(&peer->delete_in_progress, 1);
@@ -2056,7 +2056,7 @@ ol_txrx_fw_stats_get(ol_txrx_vdev_handle vdev, struct ol_txrx_stats_req *req,
 	}
 
 	if (req->wait.blocking)
-		while (cdf_semaphore_acquire(pdev->osdev, req->wait.sem_ptr))
+		while (qdf_semaphore_acquire(req->wait.sem_ptr))
 			;
 
 	if (response_expected == false)
@@ -2250,8 +2250,7 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
 
 	if (!more) {
 		if (req->base.wait.blocking)
-			cdf_semaphore_release(pdev->osdev,
-					      req->base.wait.sem_ptr);
+			qdf_semaphore_release(req->base.wait.sem_ptr);
 		cdf_mem_free(req);
 	}
 }
@@ -2539,9 +2538,9 @@ ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
 			ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
 {
 	cdf_assert(pdev && peer && stats);
-	cdf_spin_lock_bh(&pdev->peer_stat_mutex);
+	qdf_spin_lock_bh(&pdev->peer_stat_mutex);
 	cdf_mem_copy(stats, &peer->stats, sizeof(*stats));
-	cdf_spin_unlock_bh(&pdev->peer_stat_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
 	return A_OK;
 }
 #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
@@ -2611,10 +2610,10 @@ int ol_txrx_register_tx_flow_control (uint8_t vdev_id,
 		return -EINVAL;
 	}
 
-	cdf_spin_lock_bh(&vdev->flow_control_lock);
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
 	vdev->osif_flow_control_cb = flowControl;
 	vdev->osif_fc_ctx = osif_fc_ctx;
-	cdf_spin_unlock_bh(&vdev->flow_control_lock);
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
 	return 0;
 }
 
@@ -2633,10 +2632,10 @@ int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
 		return -EINVAL;
 	}
 
-	cdf_spin_lock_bh(&vdev->flow_control_lock);
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
 	vdev->osif_flow_control_cb = NULL;
 	vdev->osif_fc_ctx = NULL;
-	cdf_spin_unlock_bh(&vdev->flow_control_lock);
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
 	return 0;
 }
 
@@ -2666,17 +2665,17 @@ ol_txrx_get_tx_resource(uint8_t sta_id,
 		return true;
 	}
 
-	cdf_spin_lock_bh(&vdev->pdev->tx_mutex);
+	qdf_spin_lock_bh(&vdev->pdev->tx_mutex);
 	if (vdev->pdev->tx_desc.num_free < (uint16_t) low_watermark) {
 		vdev->tx_fl_lwm = (uint16_t) low_watermark;
 		vdev->tx_fl_hwm =
 			(uint16_t) (low_watermark + high_watermark_offset);
 		/* Not enough free resource, stop TX OS Q */
 		qdf_atomic_set(&vdev->os_q_paused, 1);
-		cdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+		qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
 		return false;
 	}
-	cdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
+	qdf_spin_unlock_bh(&vdev->pdev->tx_mutex);
 	return true;
 }
 
@@ -2697,9 +2696,9 @@ ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
 		return -EINVAL;
 	}
 
-	cdf_spin_lock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
 	vdev->ll_pause.max_q_depth = pause_q_depth;
-	cdf_spin_unlock_bh(&vdev->ll_pause.mutex);
+	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
 
 	return 0;
 }
@@ -2714,10 +2713,10 @@ ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
 inline void ol_txrx_flow_control_cb(ol_txrx_vdev_handle vdev,
 	bool tx_resume)
 {
-	cdf_spin_lock_bh(&vdev->flow_control_lock);
+	qdf_spin_lock_bh(&vdev->flow_control_lock);
 	if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
 		vdev->osif_flow_control_cb(vdev->osif_fc_ctx, tx_resume);
-	cdf_spin_unlock_bh(&vdev->flow_control_lock);
+	qdf_spin_unlock_bh(&vdev->flow_control_lock);
 
 	return;
 }
@@ -2996,21 +2995,21 @@ static void ol_rx_data_cb(struct ol_txrx_peer_t *peer,
 	if (cdf_unlikely(!cds_ctx))
 		goto free_buf;
 
-	cdf_spin_lock_bh(&peer->peer_info_lock);
+	qdf_spin_lock_bh(&peer->peer_info_lock);
 	if (cdf_unlikely(!(peer->state >= ol_txrx_peer_state_conn))) {
-		cdf_spin_unlock_bh(&peer->peer_info_lock);
+		qdf_spin_unlock_bh(&peer->peer_info_lock);
 		goto free_buf;
 	}
 	data_rx = peer->osif_rx;
-	cdf_spin_unlock_bh(&peer->peer_info_lock);
+	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
-	cdf_spin_lock_bh(&peer->bufq_lock);
+	qdf_spin_lock_bh(&peer->bufq_lock);
 	if (!list_empty(&peer->cached_bufq)) {
-		cdf_spin_unlock_bh(&peer->bufq_lock);
+		qdf_spin_unlock_bh(&peer->bufq_lock);
 		/* Flush the cached frames to HDD before passing new rx frame */
 		ol_txrx_flush_rx_frames(peer, 0);
 	} else
-		cdf_spin_unlock_bh(&peer->bufq_lock);
+		qdf_spin_unlock_bh(&peer->bufq_lock);
 
 	buf = buf_list;
 	while (buf) {
@@ -3057,10 +3056,10 @@ void ol_rx_data_process(struct ol_txrx_peer_t *peer,
 		goto drop_rx_buf;
 	}
 
-	cdf_spin_lock_bh(&peer->peer_info_lock);
+	qdf_spin_lock_bh(&peer->peer_info_lock);
 	if (peer->state >= ol_txrx_peer_state_conn)
 		data_rx = peer->osif_rx;
-	cdf_spin_unlock_bh(&peer->peer_info_lock);
+	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
 	/*
 	 * If there is a data frame from peer before the peer is
@@ -3081,10 +3080,10 @@ void ol_rx_data_process(struct ol_txrx_peer_t *peer,
 				/* Add NULL terminator */
 				cdf_nbuf_set_next(buf, NULL);
 				cache_buf->buf = buf;
-				cdf_spin_lock_bh(&peer->bufq_lock);
+				qdf_spin_lock_bh(&peer->bufq_lock);
 				list_add_tail(&cache_buf->list,
 					      &peer->cached_bufq);
-				cdf_spin_unlock_bh(&peer->bufq_lock);
+				qdf_spin_unlock_bh(&peer->bufq_lock);
 			}
 			buf = next_buf;
 		}
@@ -3164,10 +3163,10 @@ QDF_STATUS ol_txrx_register_peer(ol_rx_callback_fp rxcb,
 	if (!peer)
 		return QDF_STATUS_E_FAULT;
 
-	cdf_spin_lock_bh(&peer->peer_info_lock);
+	qdf_spin_lock_bh(&peer->peer_info_lock);
 	peer->osif_rx = rxcb;
 	peer->state = ol_txrx_peer_state_conn;
-	cdf_spin_unlock_bh(&peer->peer_info_lock);
+	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
 	param.qos_capable = sta_desc->is_qos_enabled;
 	ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
@@ -3223,10 +3222,10 @@ QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
 	/* Purge the cached rx frame queue */
 	ol_txrx_flush_rx_frames(peer, 1);
 
-	cdf_spin_lock_bh(&peer->peer_info_lock);
+	qdf_spin_lock_bh(&peer->peer_info_lock);
 	peer->osif_rx = NULL;
 	peer->state = ol_txrx_peer_state_disc;
-	cdf_spin_unlock_bh(&peer->peer_info_lock);
+	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
 	return QDF_STATUS_SUCCESS;
 }

+ 46 - 46
core/dp/txrx/ol_txrx_flow_control.c

@@ -109,7 +109,7 @@ ol_tx_deregister_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
  */
 void ol_tx_register_flow_control(struct ol_txrx_pdev_t *pdev)
 {
-	cdf_spinlock_init(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spinlock_create(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_INIT(&pdev->tx_desc.flow_pool_list);
 
 	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
@@ -127,7 +127,7 @@ void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
 	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
 		ol_tx_deregister_global_mgmt_pool(pdev);
 
-	cdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spinlock_destroy(&pdev->tx_desc.flow_pool_list_lock);
 	if (!TAILQ_EMPTY(&pdev->tx_desc.flow_pool_list)) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
 			"flow pool list is not empty!!!\n");
@@ -168,13 +168,13 @@ void ol_tx_dump_flow_pool_info(void)
 	 * Always take in below order.
 	 * flow_pool_list_lock -> flow_pool_lock
 	 */
-	cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
 					 flow_pool_list_elem) {
-		cdf_spin_lock_bh(&pool->flow_pool_lock);
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
 		cdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
-		cdf_spin_unlock_bh(&pool->flow_pool_lock);
-		cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
 			"Flow_pool_id %d :: status %d\n",
 			tmp_pool.flow_pool_id, tmp_pool.status);
@@ -188,9 +188,9 @@ void ol_tx_dump_flow_pool_info(void)
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
 			"Member flow_id  %d :: flow_type %d\n",
 			tmp_pool.member_flow_id, tmp_pool.flow_type);
-		cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	}
-	cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 
 	return;
 }
@@ -229,17 +229,17 @@ static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
 	union ol_tx_desc_list_elem_t *temp_list = NULL;
 
 	/* Take descriptors from source pool and put it in temp_list */
-	cdf_spin_lock_bh(&src_pool->flow_pool_lock);
+	qdf_spin_lock_bh(&src_pool->flow_pool_lock);
 	for (i = 0; i < desc_move_count; i++) {
 		tx_desc = ol_tx_get_desc_flow_pool(src_pool);
 		((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
 		temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
 
 	}
-	cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
 
 	/* Take descriptors from temp_list and put it in destination pool */
-	cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
+	qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
 	for (i = 0; i < desc_move_count; i++) {
 		if (dst_pool->deficient_desc)
 			dst_pool->deficient_desc--;
@@ -250,16 +250,16 @@ static int ol_tx_move_desc_n(struct ol_tx_flow_pool_t *src_pool,
 		ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
 		count++;
 	}
-	cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
 
 	/* If anything is there in temp_list put it back to source pool */
-	cdf_spin_lock_bh(&src_pool->flow_pool_lock);
+	qdf_spin_lock_bh(&src_pool->flow_pool_lock);
 	while (temp_list) {
 		tx_desc = &temp_list->tx_desc;
 		temp_list = temp_list->next;
 		ol_tx_put_desc_flow_pool(src_pool, tx_desc);
 	}
-	cdf_spin_unlock_bh(&src_pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&src_pool->flow_pool_lock);
 
 	return count;
 }
@@ -287,25 +287,25 @@ ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
 		   "%s: pdev is NULL\n", __func__);
 		return -EINVAL;
 	}
-	cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
 					 flow_pool_list_elem) {
-		cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
+		qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
 		if (dst_pool->deficient_desc) {
 			desc_move_count =
 				(dst_pool->deficient_desc > desc_count) ?
 					desc_count : dst_pool->deficient_desc;
-			cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
+			qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
 			desc_move_count = ol_tx_move_desc_n(src_pool,
 						dst_pool, desc_move_count);
 			desc_count -= desc_move_count;
-			cdf_spin_lock_bh(&dst_pool->flow_pool_lock);
+			qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
 		}
-		cdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
 		if (desc_count == 0)
 			break;
 	}
-	cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 
 	return 0;
 }
@@ -349,10 +349,10 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
 	pool->start_th = (start_threshold * flow_pool_size)/100;
 	pool->stop_th = (stop_threshold * flow_pool_size)/100;
-	cdf_spinlock_init(&pool->flow_pool_lock);
+	qdf_spinlock_create(&pool->flow_pool_lock);
 
 	/* Take TX descriptor from global_pool and put it in temp_list*/
-	cdf_spin_lock_bh(&pdev->tx_mutex);
+	qdf_spin_lock_bh(&pdev->tx_mutex);
 	if (pdev->tx_desc.num_free >= pool->flow_pool_size)
 		size = pool->flow_pool_size;
 	else
@@ -365,7 +365,7 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
 		temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
 
 	}
-	cdf_spin_unlock_bh(&pdev->tx_mutex);
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
 
 	/* put temp_list to flow_pool */
 	pool->freelist = temp_list;
@@ -373,10 +373,10 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
 	pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
 
 	/* Add flow_pool to flow_pool_list */
-	cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
 			 flow_pool_list_elem);
-	cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 
 	return pool;
 }
@@ -403,11 +403,11 @@ int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
 		return -ENOMEM;
 	}
 
-	cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_REMOVE(&pdev->tx_desc.flow_pool_list, pool, flow_pool_list_elem);
-	cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 
-	cdf_spin_lock_bh(&pool->flow_pool_lock);
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
 	if (pool->avail_desc == pool->flow_pool_size)
 		pool->status = FLOW_POOL_INACTIVE;
 	else
@@ -420,14 +420,14 @@ int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
 	pool->avail_desc = 0;
 
 	if (pool->status == FLOW_POOL_INACTIVE) {
-		cdf_spin_unlock_bh(&pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
 		/* Free flow_pool */
-		cdf_spinlock_destroy(&pool->flow_pool_lock);
+		qdf_spinlock_destroy(&pool->flow_pool_lock);
 		cdf_mem_free(pool);
 	} else { /* FLOW_POOL_INVALID case*/
 		pool->flow_pool_size -= size;
 		pool->flow_pool_id = INVALID_FLOW_ID;
-		cdf_spin_unlock_bh(&pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 		pdev->tx_desc.num_invalid_bin++;
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
@@ -436,21 +436,21 @@ int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool)
 		if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
 			ASSERT(0);
 
-		cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 		TAILQ_INSERT_TAIL(&pdev->tx_desc.flow_pool_list, pool,
 				 flow_pool_list_elem);
-		cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	}
 
 	/* put free descriptors to global pool */
-	cdf_spin_lock_bh(&pdev->tx_mutex);
+	qdf_spin_lock_bh(&pdev->tx_mutex);
 	for (i = 0; i < size; i++) {
 		tx_desc = &temp_list->tx_desc;
 		temp_list = temp_list->next;
 
 		ol_tx_put_desc_global_pool(pdev, tx_desc);
 	}
-	cdf_spin_unlock_bh(&pdev->tx_mutex);
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
 
 	return 0;
 }
@@ -475,9 +475,9 @@ int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
 	/* direclty distribute to other deficient pools */
 	ol_tx_distribute_descs_to_deficient_pools(pool);
 
-	cdf_spin_lock_bh(&pool->flow_pool_lock);
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
 	pool->flow_pool_size = pool->avail_desc;
-	cdf_spin_unlock_bh(&pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 	pdev->tx_desc.num_invalid_bin--;
 	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
@@ -499,18 +499,18 @@ struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
 	struct ol_tx_flow_pool_t *pool = NULL;
 	bool is_found = false;
 
-	cdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
 	TAILQ_FOREACH(pool, &pdev->tx_desc.flow_pool_list,
 					 flow_pool_list_elem) {
-		cdf_spin_lock_bh(&pool->flow_pool_lock);
+		qdf_spin_lock_bh(&pool->flow_pool_lock);
 		if (pool->flow_pool_id == flow_pool_id) {
-			cdf_spin_unlock_bh(&pool->flow_pool_lock);
+			qdf_spin_unlock_bh(&pool->flow_pool_lock);
 			is_found = true;
 			break;
 		}
-		cdf_spin_unlock_bh(&pool->flow_pool_lock);
+		qdf_spin_unlock_bh(&pool->flow_pool_lock);
 	}
-	cdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
+	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
 
 	if (is_found == false)
 		pool = NULL;
@@ -541,9 +541,9 @@ void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
 	}
 
 	vdev->pool = pool;
-	cdf_spin_lock_bh(&pool->flow_pool_lock);
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
 	pool->member_flow_id = vdev_id;
-	cdf_spin_unlock_bh(&pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 	return;
 }
@@ -569,9 +569,9 @@ void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
 	}
 
 	vdev->pool = NULL;
-	cdf_spin_lock_bh(&pool->flow_pool_lock);
+	qdf_spin_lock_bh(&pool->flow_pool_lock);
 	pool->member_flow_id = INVALID_FLOW_ID;
-	cdf_spin_unlock_bh(&pool->flow_pool_lock);
+	qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 	return;
 }

+ 4 - 4
core/dp/txrx/ol_txrx_internal.h

@@ -611,10 +611,10 @@ NOT_IP_TCP:
 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
 #define OL_TXRX_PEER_STATS_UPDATE_BASE(peer, tx_or_rx, type, msdu) \
 	do { \
-		cdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
+		qdf_spin_lock_bh(&peer->vdev->pdev->peer_stat_mutex); \
 		peer->stats.tx_or_rx.frms.type += 1; \
 		peer->stats.tx_or_rx.bytes.type += cdf_nbuf_len(msdu); \
-		cdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex);	\
+		qdf_spin_unlock_bh(&peer->vdev->pdev->peer_stat_mutex);	\
 	} while (0)
 #define OL_TXRX_PEER_STATS_UPDATE(peer, tx_or_rx, msdu)	\
 	do { \
@@ -648,9 +648,9 @@ NOT_IP_TCP:
 #define OL_RX_PEER_STATS_UPDATE(peer, msdu) \
 	OL_TXRX_PEER_STATS_UPDATE(peer, rx, msdu)
 #define OL_TXRX_PEER_STATS_MUTEX_INIT(pdev) \
-	cdf_spinlock_init(&pdev->peer_stat_mutex)
+	qdf_spinlock_create(&pdev->peer_stat_mutex)
 #define OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev) \
-	cdf_spinlock_destroy(&pdev->peer_stat_mutex)
+	qdf_spinlock_destroy(&pdev->peer_stat_mutex)
 #else
 #define OL_TX_PEER_STATS_UPDATE(peer, msdu)     /* no-op */
 #define OL_RX_PEER_STATS_UPDATE(peer, msdu)     /* no-op */

+ 12 - 12
core/dp/txrx/ol_txrx_peer_find.c

@@ -157,7 +157,7 @@ ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
 	unsigned index;
 
 	index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	/*
 	 * It is important to add the new peer at the tail of the peer list
 	 * with the bin index.  Together with having the hash_find function
@@ -166,7 +166,7 @@ ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
 	 * found first.
 	 */
 	TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem);
-	cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 }
 
 struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
@@ -187,7 +187,7 @@ struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
 		mac_addr = &local_mac_addr_aligned;
 	}
 	index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
 		if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
 		    0 && (check_valid == 0 || peer->valid)
@@ -195,11 +195,11 @@ struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
 			/* found it - increment the ref count before releasing
 			   the lock */
 			qdf_atomic_inc(&peer->ref_cnt);
-			cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 			return peer;
 		}
 	}
-	cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 	return NULL;            /* failure */
 }
 
@@ -220,18 +220,18 @@ struct ol_txrx_peer_t *ol_txrx_peer_find_hash_find(struct ol_txrx_pdev_t *pdev,
 		mac_addr = &local_mac_addr_aligned;
 	}
 	index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
-	cdf_spin_lock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 	TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
 		if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
 		    0 && (check_valid == 0 || peer->valid)) {
 			/* found it - increment the ref count before
 			   releasing the lock */
 			qdf_atomic_inc(&peer->ref_cnt);
-			cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 			return peer;
 		}
 	}
-	cdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 	return NULL;            /* failure */
 }
 
@@ -256,9 +256,9 @@ ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
 	 * peer ref count is decremented to zero, but just before the peer
 	 * object reference is removed from the hash table.
 	 */
-	/* cdf_spin_lock_bh(&pdev->peer_ref_mutex); */
+	/* qdf_spin_lock_bh(&pdev->peer_ref_mutex); */
 	TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem);
-	/* cdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
+	/* qdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
 }
 
 void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
@@ -434,7 +434,7 @@ struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
 {
 	struct ol_txrx_peer_t *peer;
 
-	cdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
+	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
 	/*
 	 * Check the TXRX Peer is itself valid And also
 	 * if HTT Peer ID has been setup for this peer
@@ -446,7 +446,7 @@ struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
 	} else {
 		peer = NULL;
 	}
-	cdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
+	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
 	return peer;
 }
 

+ 14 - 14
core/dp/txrx/ol_txrx_types.h

@@ -40,7 +40,7 @@
 #include <qdf_atomic.h>         /* qdf_atomic_t */
 #include <wdi_event_api.h>      /* wdi_event_subscribe */
 #include <qdf_timer.h>		/* qdf_timer_t */
-#include <cdf_lock.h>           /* cdf_spinlock */
+#include <qdf_lock.h>           /* cdf_spinlock */
 #include <pktlog.h>             /* ol_pktlog_dev_handle */
 #include <ol_txrx_stats.h>
 #include <txrx.h>
@@ -76,8 +76,8 @@
 #define OL_TXRX_MGMT_TYPE_BASE htt_pkt_num_types
 #define OL_TXRX_MGMT_NUM_TYPES 8
 
-#define OL_TX_MUTEX_TYPE cdf_spinlock_t
-#define OL_RX_MUTEX_TYPE cdf_spinlock_t
+#define OL_TX_MUTEX_TYPE qdf_spinlock_t
+#define OL_RX_MUTEX_TYPE qdf_spinlock_t
 
 /* TXRX Histogram defines */
 #define TXRX_DATA_HISTROGRAM_GRANULARITY      1000
@@ -374,7 +374,7 @@ struct ol_txrx_pool_stats {
  */
 struct ol_tx_flow_pool_t {
 	TAILQ_ENTRY(ol_tx_flow_pool_t) flow_pool_list_elem;
-	cdf_spinlock_t flow_pool_lock;
+	qdf_spinlock_t flow_pool_lock;
 	uint8_t flow_pool_id;
 	uint16_t flow_pool_size;
 	uint16_t avail_desc;
@@ -530,7 +530,7 @@ struct ol_txrx_pdev_t {
 			struct ol_tx_reorder_cat_timeout_t
 				access_cats[TXRX_NUM_WMM_AC];
 		} reorder_timeout;
-		cdf_spinlock_t mutex;
+		qdf_spinlock_t mutex;
 	} rx;
 
 	/* rx proc function */
@@ -559,7 +559,7 @@ struct ol_txrx_pdev_t {
 		union ol_tx_desc_list_elem_t *freelist;
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 		uint8_t num_invalid_bin;
-		cdf_spinlock_t flow_pool_list_lock;
+		qdf_spinlock_t flow_pool_list_lock;
 		TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
 #endif
 		uint32_t page_size;
@@ -686,7 +686,7 @@ struct ol_txrx_pdev_t {
 	} tx_queue;
 
 #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
-	cdf_spinlock_t peer_stat_mutex;
+	qdf_spinlock_t peer_stat_mutex;
 #endif
 
 	int rssi_update_shift;
@@ -695,7 +695,7 @@ struct ol_txrx_pdev_t {
 	struct {
 		ol_txrx_local_peer_id_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
 		ol_txrx_local_peer_id_t freelist;
-		cdf_spinlock_t lock;
+		qdf_spinlock_t lock;
 		ol_txrx_peer_handle map[OL_TXRX_NUM_LOCAL_PEER_IDS];
 	} local_peer_ids;
 #endif
@@ -708,7 +708,7 @@ struct ol_txrx_pdev_t {
 #define QCA_TX_DELAY_NUM_CATEGORIES 1
 #endif
 	struct {
-		cdf_spinlock_t mutex;
+		qdf_spinlock_t mutex;
 		struct {
 			struct ol_tx_delay_data copies[2]; /* ping-pong */
 			int in_progress_idx;
@@ -726,7 +726,7 @@ struct ol_txrx_pdev_t {
 #endif /* QCA_COMPUTE_TX_DELAY */
 
 	struct {
-		cdf_spinlock_t mutex;
+		qdf_spinlock_t mutex;
 		/* timer used to monitor the throttle "on" phase and
 		   "off" phase */
 		qdf_timer_t phase_timer;
@@ -832,7 +832,7 @@ struct ol_txrx_vdev_t {
 			int depth;
 		} txq;
 		uint32_t paused_reason;
-		cdf_spinlock_t mutex;
+		qdf_spinlock_t mutex;
 		qdf_timer_t timer;
 		int max_q_depth;
 		bool is_q_paused;
@@ -845,7 +845,7 @@ struct ol_txrx_vdev_t {
 	qdf_atomic_t os_q_paused;
 	uint16_t tx_fl_lwm;
 	uint16_t tx_fl_hwm;
-	cdf_spinlock_t flow_control_lock;
+	qdf_spinlock_t flow_control_lock;
 	ol_txrx_tx_flow_control_fp osif_flow_control_cb;
 	void *osif_fc_ctx;
 	uint16_t wait_on_peer_id;
@@ -924,9 +924,9 @@ struct ol_txrx_peer_t {
 	 * for all systems.
 	 */
 	enum ol_txrx_peer_state state;
-	cdf_spinlock_t peer_info_lock;
+	qdf_spinlock_t peer_info_lock;
 	ol_rx_callback_fp osif_rx;
-	cdf_spinlock_t bufq_lock;
+	qdf_spinlock_t bufq_lock;
 	struct list_head cached_bufq;
 
 	ol_tx_filter_func tx_filter;

+ 3 - 3
core/hdd/inc/wlan_hdd_lro.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -72,7 +72,7 @@ struct hdd_lro_desc_entry {
 struct hdd_lro_desc_pool {
 	struct hdd_lro_desc_entry *lro_desc_array;
 	struct list_head lro_free_list_head;
-	cdf_spinlock_t lro_pool_lock;
+	qdf_spinlock_t lro_pool_lock;
 };
 
 /**
@@ -93,7 +93,7 @@ struct hdd_lro_desc_table {
  */
 struct hdd_lro_desc_info {
 	struct hdd_lro_desc_table *lro_hash_table;
-	cdf_spinlock_t lro_hash_lock;
+	qdf_spinlock_t lro_hash_lock;
 	struct hdd_lro_desc_pool lro_desc_pool;
 };
 

+ 8 - 8
core/hdd/inc/wlan_hdd_main.h

@@ -1105,7 +1105,7 @@ struct hdd_context_s {
 	struct wiphy *wiphy;
 	/* TODO Remove this from here. */
 
-	cdf_spinlock_t hdd_adapter_lock;
+	qdf_spinlock_t hdd_adapter_lock;
 	qdf_list_t hddAdapters; /* List of adapters */
 
 	/* One per STA: 1 for BCMC_STA_ID, 1 for each SAP_SELF_STA_ID, 1 for WDS_STAID */
@@ -1163,10 +1163,10 @@ struct hdd_context_s {
 	struct qdf_mac_addr p2pDeviceAddress;
 
 #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
-	cdf_wake_lock_t rx_wake_lock;
+	qdf_wake_lock_t rx_wake_lock;
 #endif
 
-	cdf_wake_lock_t sap_wake_lock;
+	qdf_wake_lock_t sap_wake_lock;
 
 #ifdef FEATURE_WLAN_TDLS
 	eTDLSSupportMode tdls_mode;
@@ -1202,7 +1202,7 @@ struct hdd_context_s {
 	/* Use below lock to protect access to isSchedScanUpdatePending
 	 * since it will be accessed in two different contexts.
 	 */
-	cdf_spinlock_t sched_scan_lock;
+	qdf_spinlock_t sched_scan_lock;
 
 	/* Flag keeps track of wiphy suspend/resume */
 	bool isWiphySuspended;
@@ -1274,7 +1274,7 @@ struct hdd_context_s {
 	uint8_t skip_acs_scan_status;
 #endif
 
-	cdf_wake_lock_t sap_dfs_wakelock;
+	qdf_wake_lock_t sap_dfs_wakelock;
 	atomic_t sap_dfs_ref_cnt;
 
 #ifdef WLAN_FEATURE_EXTWOW_SUPPORT
@@ -1289,9 +1289,9 @@ struct hdd_context_s {
 	unsigned long g_event_flags;
 	/* RoC request queue and work */
 	struct delayed_work roc_req_work;
-	cdf_spinlock_t hdd_roc_req_q_lock;
+	qdf_spinlock_t hdd_roc_req_q_lock;
 	qdf_list_t hdd_roc_req_q;
-	cdf_spinlock_t hdd_scan_req_q_lock;
+	qdf_spinlock_t hdd_scan_req_q_lock;
 	qdf_list_t hdd_scan_req_q;
 	uint8_t miracast_value;
 #ifdef WLAN_NS_OFFLOAD
@@ -1319,7 +1319,7 @@ struct hdd_context_s {
 
 	cdf_mc_timer_t dbs_opportunistic_timer;
 	bool connection_in_progress;
-	cdf_spinlock_t connection_status_lock;
+	qdf_spinlock_t connection_status_lock;
 
 	uint16_t hdd_txrx_hist_idx;
 	struct hdd_tx_rx_histogram hdd_txrx_hist[NUM_TX_RX_HISTOGRAM];

+ 1 - 1
core/hdd/src/wlan_hdd_driver_ops.c

@@ -39,7 +39,7 @@
 #endif /* HIF_PCI */
 #include "cds_api.h"
 #include "qdf_status.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cds_sched.h"
 #include "osdep.h"
 #include "hif.h"

+ 17 - 17
core/hdd/src/wlan_hdd_ftm.c

@@ -252,7 +252,7 @@ static QDF_STATUS wlan_ftm_cds_open(v_CONTEXT_t p_cds_context,
 		goto err_sched_close;
 	}
 
-	ol_ctx = cds_get_context(CDF_MODULE_ID_BMI);
+	ol_ctx = cds_get_context(QDF_MODULE_ID_BMI);
 	if (bmi_download_firmware(ol_ctx)) {
 		CDF_TRACE(QDF_MODULE_ID_QDF, CDF_TRACE_LEVEL_FATAL,
 			  "%s: BMI failed to download target", __func__);
@@ -496,10 +496,10 @@ static QDF_STATUS cds_ftm_pre_start(v_CONTEXT_t cds_context)
 		cds_get_global_context();
 #endif
 
-	CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_INFO, "cds prestart");
+	CDF_TRACE(QDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_INFO, "cds prestart");
 	if (NULL == p_cds_context->pWMAContext) {
 		CDF_ASSERT(0);
-		CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
 			  "%s: WMA NULL context", __func__);
 		return QDF_STATUS_E_FAILURE;
 	}
@@ -510,7 +510,7 @@ static QDF_STATUS cds_ftm_pre_start(v_CONTEXT_t cds_context)
 	/*call WMA pre start */
 	vStatus = wma_pre_start(p_cds_context);
 	if (!QDF_IS_STATUS_SUCCESS(vStatus)) {
-		CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_ERROR,
 			  "Failed to WMA prestart ");
 		CDF_ASSERT(0);
 		return QDF_STATUS_E_FAILURE;
@@ -535,7 +535,7 @@ static QDF_STATUS cds_ftm_pre_start(v_CONTEXT_t cds_context)
 #if  defined(QCA_WIFI_FTM)
 	vStatus = htc_start(gp_cds_context->htc_ctx);
 	if (!QDF_IS_STATUS_SUCCESS(vStatus)) {
-		CDF_TRACE(CDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_FATAL,
+		CDF_TRACE(QDF_MODULE_ID_SYS, CDF_TRACE_LEVEL_FATAL,
 			  "Failed to Start HTC");
 		CDF_ASSERT(0);
 		return QDF_STATUS_E_FAILURE;
@@ -560,13 +560,13 @@ int wlan_hdd_ftm_open(hdd_context_t *hdd_ctx)
 	QDF_STATUS vStatus = QDF_STATUS_SUCCESS;
 	p_cds_contextType p_cds_context = NULL;
 
-	CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO_HIGH,
+	CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO_HIGH,
 		  "%s: Opening CDS", __func__);
 
 	p_cds_context = cds_get_global_context();
 
 	if (NULL == p_cds_context) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
 			  "%s: Trying to open CDS without a PreOpen", __func__);
 		CDF_ASSERT(0);
 		goto err_cdf_status_failure;
@@ -585,7 +585,7 @@ int wlan_hdd_ftm_open(hdd_context_t *hdd_ctx)
 
 	/* Save the hal context in Adapter */
 	hdd_ctx->hHal =
-		(tHalHandle) cds_get_context(CDF_MODULE_ID_SME);
+		(tHalHandle) cds_get_context(QDF_MODULE_ID_SME);
 
 	if (NULL == hdd_ctx->hHal) {
 		hddLog(CDF_TRACE_LEVEL_ERROR, "%s: HAL context is null",
@@ -661,13 +661,13 @@ int wlan_hdd_ftm_close(hdd_context_t *hdd_ctx)
 	hdd_adapter_t *adapter = hdd_get_adapter(hdd_ctx, WLAN_HDD_FTM);
 	ENTER();
 	if (adapter == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_FATAL,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_FATAL,
 			  "%s:adapter is NULL", __func__);
 		return -ENXIO;
 	}
 
 	if (WLAN_FTM_STARTED == hdd_ctx->ftm.ftm_state) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_FATAL,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_FATAL,
 			  "%s: Ftm has been started. stopping ftm", __func__);
 		wlan_ftm_stop(hdd_ctx);
 	}
@@ -753,18 +753,18 @@ static int wlan_hdd_ftm_start(hdd_context_t *hdd_ctx)
 		return 0;
 	}
 
-	CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
+	CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
 		  "%s: Starting CLD SW", __func__);
 
 	/* We support only one instance for now ... */
 	if (p_cds_context == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
 			  "%s: mismatch in context", __func__);
 		goto err_status_failure;
 	}
 
 	if (p_cds_context->pMACContext == NULL) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
 			  "%s: MAC NULL context", __func__);
 		goto err_status_failure;
 	}
@@ -785,11 +785,11 @@ static int wlan_hdd_ftm_start(hdd_context_t *hdd_ctx)
 		goto err_status_failure;
 	}
 
-	CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
+	CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
 		  "%s: MAC correctly started", __func__);
 
 	if (hdd_ftm_service_registration(hdd_ctx)) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
 			  "%s: failed", __func__);
 		goto err_ftm_service_reg;
 	}
@@ -1022,7 +1022,7 @@ QDF_STATUS wlan_hdd_ftm_testmode_cmd(void *data, int len)
 		   cdf_mem_malloc(sizeof(*cmd_data));
 
 	if (!cmd_data) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
 			  ("Failed to allocate FTM command data"));
 		return QDF_STATUS_E_NOMEM;
 	}
@@ -1030,7 +1030,7 @@ QDF_STATUS wlan_hdd_ftm_testmode_cmd(void *data, int len)
 	cmd_data->data = cdf_mem_malloc(len);
 
 	if (!cmd_data->data) {
-		CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
+		CDF_TRACE(QDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
 			  ("Failed to allocate FTM command data buffer"));
 		cdf_mem_free(cmd_data);
 		return QDF_STATUS_E_NOMEM;

+ 6 - 6
core/hdd/src/wlan_hdd_hostapd.c

@@ -108,7 +108,7 @@
 void hdd_hostapd_channel_wakelock_init(hdd_context_t *pHddCtx)
 {
 	/* Initialize the wakelock */
-	cdf_wake_lock_init(&pHddCtx->sap_dfs_wakelock, "sap_dfs_wakelock");
+	qdf_wake_lock_create(&pHddCtx->sap_dfs_wakelock, "sap_dfs_wakelock");
 	atomic_set(&pHddCtx->sap_dfs_ref_cnt, 0);
 }
 
@@ -142,7 +142,7 @@ void hdd_hostapd_channel_allow_suspend(hdd_adapter_t *pAdapter,
 		if (atomic_dec_and_test(&pHddCtx->sap_dfs_ref_cnt)) {
 			hddLog(LOGE, FL("DFS: allowing suspend (chan %d)"),
 			       channel);
-			cdf_wake_lock_release(&pHddCtx->sap_dfs_wakelock,
+			qdf_wake_lock_release(&pHddCtx->sap_dfs_wakelock,
 					      WIFI_POWER_EVENT_WAKELOCK_DFS);
 		}
 	}
@@ -178,7 +178,7 @@ void hdd_hostapd_channel_prevent_suspend(hdd_adapter_t *pAdapter,
 		if (atomic_inc_return(&pHddCtx->sap_dfs_ref_cnt) == 1) {
 			hddLog(LOGE, FL("DFS: preventing suspend (chan %d)"),
 			       channel);
-			cdf_wake_lock_acquire(&pHddCtx->sap_dfs_wakelock,
+			qdf_wake_lock_acquire(&pHddCtx->sap_dfs_wakelock,
 					      WIFI_POWER_EVENT_WAKELOCK_DFS);
 		}
 	}
@@ -195,7 +195,7 @@ void hdd_hostapd_channel_wakelock_deinit(hdd_context_t *pHddCtx)
 {
 	if (atomic_read(&pHddCtx->sap_dfs_ref_cnt)) {
 		/* Release wakelock */
-		cdf_wake_lock_release(&pHddCtx->sap_dfs_wakelock,
+		qdf_wake_lock_release(&pHddCtx->sap_dfs_wakelock,
 				      WIFI_POWER_EVENT_WAKELOCK_DRIVER_EXIT);
 		/* Reset the reference count */
 		atomic_set(&pHddCtx->sap_dfs_ref_cnt, 0);
@@ -203,7 +203,7 @@ void hdd_hostapd_channel_wakelock_deinit(hdd_context_t *pHddCtx)
 	}
 
 	/* Destroy lock */
-	cdf_wake_lock_destroy(&pHddCtx->sap_dfs_wakelock);
+	qdf_wake_lock_destroy(&pHddCtx->sap_dfs_wakelock);
 }
 
 /**
@@ -1386,7 +1386,7 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent,
 #ifdef FEATURE_WLAN_AUTO_SHUTDOWN
 		wlan_hdd_auto_shutdown_enable(pHddCtx, false);
 #endif
-		cdf_wake_lock_timeout_acquire(&pHddCtx->sap_wake_lock,
+		qdf_wake_lock_timeout_acquire(&pHddCtx->sap_wake_lock,
 					      HDD_SAP_WAKE_LOCK_DURATION,
 					      WIFI_POWER_EVENT_WAKELOCK_SAP);
 		{

+ 107 - 107
core/hdd/src/wlan_hdd_ipa.c

@@ -241,7 +241,7 @@ struct hdd_ipa_iface_context {
 
 	uint8_t iface_id;       /* This iface ID */
 	uint8_t sta_id;         /* This iface station ID */
-	cdf_spinlock_t interface_lock;
+	qdf_spinlock_t interface_lock;
 	uint32_t ifa_address;
 	struct hdd_ipa_iface_stats stats;
 };
@@ -380,10 +380,10 @@ struct hdd_ipa_priv {
 	 * APIs as it is taken care gracefully. Without this, kernel would throw
 	 * an warning if spin_lock_bh is used while IRQ is disabled
 	 */
-	cdf_spinlock_t rm_lock;
+	qdf_spinlock_t rm_lock;
 	struct uc_rm_work_struct uc_rm_work;
 	struct uc_op_work_struct uc_op_work[HDD_IPA_UC_OPCODE_MAX];
-	cdf_wake_lock_t wake_lock;
+	qdf_wake_lock_t wake_lock;
 	struct delayed_work wake_lock_work;
 	bool wake_lock_released;
 
@@ -392,7 +392,7 @@ struct hdd_ipa_priv {
 	atomic_t tx_ref_cnt;
 	cdf_nbuf_queue_t pm_queue_head;
 	struct work_struct pm_work;
-	cdf_spinlock_t pm_lock;
+	qdf_spinlock_t pm_lock;
 	bool suspended;
 
 	uint32_t pending_hw_desc_cnt;
@@ -423,7 +423,7 @@ struct hdd_ipa_priv {
 	bool pending_cons_req;
 	struct ipa_uc_stas_map assoc_stas_map[WLAN_MAX_STA_COUNT];
 	qdf_list_t pending_event;
-	cdf_mutex_t event_lock;
+	qdf_mutex_t event_lock;
 	bool ipa_pipes_down;
 	uint32_t ipa_tx_packets_diff;
 	uint32_t ipa_rx_packets_diff;
@@ -439,8 +439,8 @@ struct hdd_ipa_priv {
 	struct uc_rt_debug_info rt_bug_buffer[HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
 	unsigned int rt_buf_fill_index;
 	cdf_mc_timer_t rt_debug_fill_timer;
-	cdf_mutex_t rt_debug_lock;
-	cdf_mutex_t ipa_lock;
+	qdf_mutex_t rt_debug_lock;
+	qdf_mutex_t ipa_lock;
 
 	/* CE resources */
 	qdf_dma_addr_t ce_sr_base_paddr;
@@ -724,7 +724,7 @@ static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
 
 	hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
 
-	cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
+	qdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
 	dump_info = &hdd_ipa->rt_bug_buffer[
 		hdd_ipa->rt_buf_fill_index % HDD_IPA_UC_RT_DEBUG_BUF_COUNT];
 
@@ -737,7 +737,7 @@ static void hdd_ipa_uc_rt_debug_host_fill(void *ctext)
 	dump_info->tx_fwd_count = hdd_ipa->ipa_tx_forward;
 	dump_info->rx_destructor_call = hdd_ipa->ipa_rx_destructor_count;
 	hdd_ipa->rt_buf_fill_index++;
-	cdf_mutex_release(&hdd_ipa->rt_debug_lock);
+	qdf_mutex_release(&hdd_ipa->rt_debug_lock);
 
 	cdf_mc_timer_start(&hdd_ipa->rt_debug_fill_timer,
 		HDD_IPA_UC_RT_DEBUG_FILL_INTERVAL);
@@ -773,7 +773,7 @@ void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
 	HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
 		"     TM     :   EXEP   :   DROP   :   NETS   :   MCBC   :   TXFD   :   DSTR   :   DSCD\n");
 
-	cdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
+	qdf_mutex_acquire(&hdd_ipa->rt_debug_lock);
 	for (dump_count = 0;
 		dump_count < HDD_IPA_UC_RT_DEBUG_BUF_COUNT;
 		dump_count++) {
@@ -788,7 +788,7 @@ void hdd_ipa_uc_rt_debug_host_dump(hdd_context_t *hdd_ctx)
 			dump_info->rx_destructor_call,
 			dump_info->rx_discard_count);
 	}
-	cdf_mutex_release(&hdd_ipa->rt_debug_lock);
+	qdf_mutex_release(&hdd_ipa->rt_debug_lock);
 	HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
 		"======= WLAN-IPA DEBUG BUF DUMP END ========\n");
 }
@@ -874,7 +874,7 @@ static void hdd_ipa_uc_rt_debug_deinit(hdd_context_t *hdd_ctx)
 		cdf_mc_timer_stop(&hdd_ipa->rt_debug_fill_timer);
 	}
 	cdf_mc_timer_destroy(&hdd_ipa->rt_debug_fill_timer);
-	cdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
+	qdf_mutex_destroy(&hdd_ipa->rt_debug_lock);
 
 	if (!hdd_ipa_is_rt_debugging_enabled(hdd_ctx)) {
 		HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
@@ -901,7 +901,7 @@ static void hdd_ipa_uc_rt_debug_init(hdd_context_t *hdd_ctx)
 {
 	struct hdd_ipa_priv *hdd_ipa = (struct hdd_ipa_priv *)hdd_ctx->hdd_ipa;
 
-	cdf_mutex_init(&hdd_ipa->rt_debug_lock);
+	qdf_mutex_create(&hdd_ipa->rt_debug_lock);
 	cdf_mc_timer_init(&hdd_ipa->rt_debug_fill_timer, QDF_TIMER_TYPE_SW,
 		hdd_ipa_uc_rt_debug_host_fill, (void *)hdd_ctx);
 	hdd_ipa->rt_buf_fill_index = 0;
@@ -954,7 +954,7 @@ void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
 		return;
 	}
 
-	cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+	qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 	if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
 		(false == hdd_ipa->resource_loading)) {
 		*ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
@@ -962,7 +962,7 @@ void hdd_ipa_uc_stat_query(hdd_context_t *pHddCtx,
 		HDD_IPA_LOG(LOG1, "STAT Query TX DIFF %d, RX DIFF %d",
 			    *ipa_tx_diff, *ipa_rx_diff);
 	}
-	cdf_mutex_release(&hdd_ipa->ipa_lock);
+	qdf_mutex_release(&hdd_ipa->ipa_lock);
 	return;
 }
 
@@ -990,7 +990,7 @@ void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
 	}
 
 	HDD_IPA_LOG(LOG1, "STAT REQ Reason %d", reason);
-	cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+	qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 	if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
 		(false == hdd_ipa->resource_loading)) {
 		hdd_ipa->stat_req_reason = reason;
@@ -999,7 +999,7 @@ void hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
 			(int)WMA_VDEV_TXRX_GET_IPA_UC_FW_STATS_CMDID,
 			0, VDEV_CMD);
 	}
-	cdf_mutex_release(&hdd_ipa->ipa_lock);
+	qdf_mutex_release(&hdd_ipa->ipa_lock);
 }
 
 /**
@@ -1255,12 +1255,12 @@ hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
 	case IPA_RM_RESOURCE_GRANTED:
 		/* Differed RM Granted */
 		hdd_ipa_uc_enable_pipes(hdd_ipa);
-		cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+		qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 		if ((false == hdd_ipa->resource_unloading) &&
 			(!hdd_ipa->activated_fw_pipe)) {
 			hdd_ipa_uc_enable_pipes(hdd_ipa);
 		}
-		cdf_mutex_release(&hdd_ipa->ipa_lock);
+		qdf_mutex_release(&hdd_ipa->ipa_lock);
 		break;
 
 	case IPA_RM_RESOURCE_RELEASED:
@@ -1386,7 +1386,7 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
 
 	if ((HDD_IPA_UC_OPCODE_TX_RESUME == msg->op_code) ||
 	    (HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
-		cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+		qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 		hdd_ipa->activated_fw_pipe++;
 		if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
 			hdd_ipa->resource_loading = false;
@@ -1397,12 +1397,12 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
 						IPA_RM_RESOURCE_WLAN_CONS);
 			hdd_ipa->pending_cons_req = false;
 		}
-		cdf_mutex_release(&hdd_ipa->ipa_lock);
+		qdf_mutex_release(&hdd_ipa->ipa_lock);
 	}
 
 	if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
 	    (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
-		cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+		qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 		hdd_ipa->activated_fw_pipe--;
 		if (!hdd_ipa->activated_fw_pipe) {
 			hdd_ipa_uc_disable_pipes(hdd_ipa);
@@ -1415,7 +1415,7 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
 			hdd_ipa_uc_proc_pending_event(hdd_ipa);
 			hdd_ipa->pending_cons_req = false;
 		}
-		cdf_mutex_release(&hdd_ipa->ipa_lock);
+		qdf_mutex_release(&hdd_ipa->ipa_lock);
 	}
 
 	if ((HDD_IPA_UC_OPCODE_STATS == msg->op_code) &&
@@ -1599,7 +1599,7 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
 		/* STATs from FW */
 		uc_fw_stat = (struct ipa_uc_fw_stats *)
 			((uint8_t *)op_msg + sizeof(struct op_msg_type));
-		cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+		qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 		hdd_ipa->ipa_tx_packets_diff = HDD_BW_GET_DIFF(
 			uc_fw_stat->tx_pkts_completed,
 			hdd_ipa->ipa_p_tx_packets);
@@ -1614,7 +1614,7 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
 			(uc_fw_stat->rx_num_ind_drop_no_space +
 			uc_fw_stat->rx_num_ind_drop_no_buf +
 			uc_fw_stat->rx_num_pkts_indicated);
-		cdf_mutex_release(&hdd_ipa->ipa_lock);
+		qdf_mutex_release(&hdd_ipa->ipa_lock);
 	} else {
 		HDD_IPA_LOG(LOGE, "INVALID REASON %d",
 			    hdd_ipa->stat_req_reason);
@@ -1789,8 +1789,8 @@ static QDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx)
 	cdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi_out_params));
 
 	qdf_list_create(&ipa_ctxt->pending_event, 1000);
-	cdf_mutex_init(&ipa_ctxt->event_lock);
-	cdf_mutex_init(&ipa_ctxt->ipa_lock);
+	qdf_mutex_create(&ipa_ctxt->event_lock);
+	qdf_mutex_create(&ipa_ctxt->ipa_lock);
 
 	/* TX PIPE */
 	pipe_in.sys.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
@@ -1945,12 +1945,12 @@ int hdd_ipa_uc_ssr_deinit(void)
 	if (!hdd_ipa->ipa_pipes_down)
 		hdd_ipa_uc_disable_pipes(hdd_ipa);
 
-	cdf_mutex_acquire(&hdd_ipa->ipa_lock);
+	qdf_mutex_acquire(&hdd_ipa->ipa_lock);
 	for (idx = 0; idx < WLAN_MAX_STA_COUNT; idx++) {
 		hdd_ipa->assoc_stas_map[idx].is_reserved = false;
 		hdd_ipa->assoc_stas_map[idx].sta_id = 0xFF;
 	}
-	cdf_mutex_release(&hdd_ipa->ipa_lock);
+	qdf_mutex_release(&hdd_ipa->ipa_lock);
 
 	/* Full IPA driver cleanup not required since wlan driver is now
 	 * unloaded and reloaded after SSR.
@@ -2042,17 +2042,17 @@ static void hdd_ipa_wake_lock_timer_func(struct work_struct *work)
 						    struct hdd_ipa_priv,
 						    wake_lock_work);
 
-	cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 
 	if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED)
 		goto end;
 
 	hdd_ipa->wake_lock_released = true;
-	cdf_wake_lock_release(&hdd_ipa->wake_lock,
+	qdf_wake_lock_release(&hdd_ipa->wake_lock,
 			      WIFI_POWER_EVENT_WAKELOCK_IPA);
 
 end:
-	cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 }
 
 /**
@@ -2068,26 +2068,26 @@ static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
 	if (!hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
 		return 0;
 
-	cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 
 	switch (hdd_ipa->rm_state) {
 	case HDD_IPA_RM_GRANTED:
-		cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 		return 0;
 	case HDD_IPA_RM_GRANT_PENDING:
-		cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 		return -EINPROGRESS;
 	case HDD_IPA_RM_RELEASED:
 		hdd_ipa->rm_state = HDD_IPA_RM_GRANT_PENDING;
 		break;
 	}
 
-	cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 
 	ret = ipa_rm_inactivity_timer_request_resource(
 			IPA_RM_RESOURCE_WLAN_PROD);
 
-	cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 	if (ret == 0) {
 		hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
 		hdd_ipa->stats.num_rm_grant_imm++;
@@ -2095,11 +2095,11 @@ static int hdd_ipa_rm_request(struct hdd_ipa_priv *hdd_ipa)
 
 	cancel_delayed_work(&hdd_ipa->wake_lock_work);
 	if (hdd_ipa->wake_lock_released) {
-		cdf_wake_lock_acquire(&hdd_ipa->wake_lock,
+		qdf_wake_lock_acquire(&hdd_ipa->wake_lock,
 				      WIFI_POWER_EVENT_WAKELOCK_IPA);
 		hdd_ipa->wake_lock_released = false;
 	}
-	cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 
 	return ret;
 }
@@ -2128,23 +2128,23 @@ static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
 	}
 	spin_unlock_bh(&hdd_ipa->q_lock);
 
-	cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 
 	if (!cdf_nbuf_is_queue_empty(&hdd_ipa->pm_queue_head)) {
-		cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 		return -EAGAIN;
 	}
-	cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
-	cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 	switch (hdd_ipa->rm_state) {
 	case HDD_IPA_RM_GRANTED:
 		break;
 	case HDD_IPA_RM_GRANT_PENDING:
-		cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 		return -EINPROGRESS;
 	case HDD_IPA_RM_RELEASED:
-		cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 		return 0;
 	}
 
@@ -2153,12 +2153,12 @@ static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
 	 */
 	hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
 	hdd_ipa->stats.num_rm_release++;
-	cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 
 	ret =
 		ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
 
-	cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 	if (unlikely(ret != 0)) {
 		hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
 		WARN_ON(1);
@@ -2174,7 +2174,7 @@ static int hdd_ipa_rm_try_release(struct hdd_ipa_priv *hdd_ipa)
 			      msecs_to_jiffies
 				      (HDD_IPA_RX_INACTIVITY_MSEC_DELAY));
 
-	cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 
 	return ret;
 }
@@ -2211,9 +2211,9 @@ static void hdd_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
 			schedule_work(&hdd_ipa->uc_rm_work.work);
 			break;
 		}
-		cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 		hdd_ipa->rm_state = HDD_IPA_RM_GRANTED;
-		cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 		hdd_ipa->stats.num_rm_grant++;
 		break;
 
@@ -2434,7 +2434,7 @@ static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
 		goto set_perf_failed;
 	}
 
-	cdf_wake_lock_init(&hdd_ipa->wake_lock, "wlan_ipa");
+	qdf_wake_lock_create(&hdd_ipa->wake_lock, "wlan_ipa");
 #ifdef CONFIG_CNSS
 	cnss_init_delayed_work(&hdd_ipa->wake_lock_work,
 			       hdd_ipa_wake_lock_timer_func);
@@ -2442,7 +2442,7 @@ static int hdd_ipa_setup_rm(struct hdd_ipa_priv *hdd_ipa)
 	INIT_DELAYED_WORK(&hdd_ipa->wake_lock_work,
 			  hdd_ipa_wake_lock_timer_func);
 #endif
-	cdf_spinlock_init(&hdd_ipa->rm_lock);
+	qdf_spinlock_create(&hdd_ipa->rm_lock);
 	hdd_ipa->rm_state = HDD_IPA_RM_RELEASED;
 	hdd_ipa->wake_lock_released = true;
 	atomic_set(&hdd_ipa->tx_ref_cnt, 0);
@@ -2478,12 +2478,12 @@ static void hdd_ipa_destroy_rm_resource(struct hdd_ipa_priv *hdd_ipa)
 		return;
 
 	cancel_delayed_work_sync(&hdd_ipa->wake_lock_work);
-	cdf_wake_lock_destroy(&hdd_ipa->wake_lock);
+	qdf_wake_lock_destroy(&hdd_ipa->wake_lock);
 
 #ifdef WLAN_OPEN_SOURCE
 	cancel_work_sync(&hdd_ipa->uc_rm_work.work);
 #endif
-	cdf_spinlock_destroy(&hdd_ipa->rm_lock);
+	qdf_spinlock_destroy(&hdd_ipa->rm_lock);
 
 	ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WLAN_PROD);
 
@@ -2705,13 +2705,13 @@ static void hdd_ipa_send_pkt_to_tl(
 	hdd_adapter_t *adapter = NULL;
 	cdf_nbuf_t skb;
 
-	cdf_spin_lock_bh(&iface_context->interface_lock);
+	qdf_spin_lock_bh(&iface_context->interface_lock);
 	adapter = iface_context->adapter;
 	if (!adapter) {
 		HDD_IPA_LOG(CDF_TRACE_LEVEL_WARN, "Interface Down");
 		ipa_free_skb(ipa_tx_desc);
 		iface_context->stats.num_tx_drop++;
-		cdf_spin_unlock_bh(&iface_context->interface_lock);
+		qdf_spin_unlock_bh(&iface_context->interface_lock);
 		hdd_ipa_rm_try_release(hdd_ipa);
 		return;
 	}
@@ -2722,7 +2722,7 @@ static void hdd_ipa_send_pkt_to_tl(
 	 */
 	if (WLAN_HDD_GET_AP_CTX_PTR(adapter)->dfs_cac_block_tx) {
 		ipa_free_skb(ipa_tx_desc);
-		cdf_spin_unlock_bh(&iface_context->interface_lock);
+		qdf_spin_unlock_bh(&iface_context->interface_lock);
 		iface_context->stats.num_tx_cac_drop++;
 		hdd_ipa_rm_try_release(hdd_ipa);
 		return;
@@ -2731,7 +2731,7 @@ static void hdd_ipa_send_pkt_to_tl(
 	interface_id = adapter->sessionId;
 	++adapter->stats.tx_packets;
 
-	cdf_spin_unlock_bh(&iface_context->interface_lock);
+	qdf_spin_unlock_bh(&iface_context->interface_lock);
 
 	skb = ipa_tx_desc->skb;
 
@@ -2788,10 +2788,10 @@ static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
 	cdf_nbuf_t skb;
 	uint32_t dequeued = 0;
 
-	cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 
 	while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
-		cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
 		pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
 
@@ -2800,10 +2800,10 @@ static void hdd_ipa_pm_send_pkt_to_tl(struct work_struct *work)
 		hdd_ipa_send_pkt_to_tl(pm_tx_cb->iface_context,
 				       pm_tx_cb->ipa_tx_desc);
 
-		cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+		qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 	}
 
-	cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
 	hdd_ipa->stats.num_tx_dequeued += dequeued;
 	if (dequeued > hdd_ipa->stats.num_max_pm_queue)
@@ -2867,7 +2867,7 @@ static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
 	 */
 	hdd_ipa_rm_request(hdd_ipa);
 
-	cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 	/*
 	 * If host is still suspended then queue the packets and these will be
 	 * drained later when resume completes. When packet is arrived here and
@@ -2882,11 +2882,11 @@ static void hdd_ipa_i2w_cb(void *priv, enum ipa_dp_evt_type evt,
 		cdf_nbuf_queue_add(&hdd_ipa->pm_queue_head, skb);
 		hdd_ipa->stats.num_tx_queued++;
 
-		cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 		return;
 	}
 
-	cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
 	/*
 	 * If we are here means, host is not suspended, wait for the work queue
@@ -2921,17 +2921,17 @@ int hdd_ipa_suspend(hdd_context_t *hdd_ctx)
 	if (atomic_read(&hdd_ipa->tx_ref_cnt))
 		return -EAGAIN;
 
-	cdf_spin_lock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->rm_lock);
 
 	if (hdd_ipa->rm_state != HDD_IPA_RM_RELEASED) {
-		cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 		return -EAGAIN;
 	}
-	cdf_spin_unlock_bh(&hdd_ipa->rm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->rm_lock);
 
-	cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 	hdd_ipa->suspended = true;
-	cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
 	return 0;
 }
@@ -2951,9 +2951,9 @@ int hdd_ipa_resume(hdd_context_t *hdd_ctx)
 
 	schedule_work(&hdd_ipa->pm_work);
 
-	cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 	hdd_ipa->suspended = false;
-	cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
 	return 0;
 }
@@ -3399,11 +3399,11 @@ static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
 
 	hdd_ipa_clean_hdr(iface_context->adapter);
 
-	cdf_spin_lock_bh(&iface_context->interface_lock);
+	qdf_spin_lock_bh(&iface_context->interface_lock);
 	iface_context->adapter->ipa_context = NULL;
 	iface_context->adapter = NULL;
 	iface_context->tl_context = NULL;
-	cdf_spin_unlock_bh(&iface_context->interface_lock);
+	qdf_spin_unlock_bh(&iface_context->interface_lock);
 	iface_context->ifa_address = 0;
 	if (!iface_context->hdd_ipa->num_iface) {
 		HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
@@ -3651,7 +3651,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 		hdd_err("IPA resource %s inprogress",
 			hdd_ipa->resource_loading ? "load":"unload");
 
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 
 		pending_event_count = qdf_list_size(&hdd_ipa->pending_event);
 		if (pending_event_count >= HDD_IPA_MAX_PENDING_EVENT_COUNT) {
@@ -3666,7 +3666,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 
 		if (!pending_event) {
 			hdd_err("Pending event memory alloc fail");
-			cdf_mutex_release(&hdd_ipa->event_lock);
+			qdf_mutex_release(&hdd_ipa->event_lock);
 			return -ENOMEM;
 		}
 
@@ -3679,7 +3679,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 		qdf_list_insert_back(&hdd_ipa->pending_event,
 				&pending_event->node);
 
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 		return 0;
 	}
 
@@ -3699,7 +3699,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			hdd_ipa_uc_offload_enable_disable(adapter,
 				SIR_STA_RX_DATA_OFFLOAD, 1);
 
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 
 		if (!hdd_ipa_uc_is_enabled(hdd_ipa->hdd_ctx)) {
 			HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
@@ -3710,7 +3710,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			/* Enable IPA UC TX PIPE when STA connected */
 			ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
 			if (ret) {
-				cdf_mutex_release(&hdd_ipa->event_lock);
+				qdf_mutex_release(&hdd_ipa->event_lock);
 				HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
 					"handle 1st con ret %d", ret);
 				hdd_ipa_uc_offload_enable_disable(adapter,
@@ -3720,7 +3720,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 		}
 		ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
 		if (ret) {
-			cdf_mutex_release(&hdd_ipa->event_lock);
+			qdf_mutex_release(&hdd_ipa->event_lock);
 			hdd_ipa_uc_offload_enable_disable(adapter,
 				SIR_STA_RX_DATA_OFFLOAD, 0);
 			goto end;
@@ -3732,7 +3732,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 #endif /* IPA_UC_OFFLOAD */
 		}
 
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 
 		hdd_ipa->sta_connected = 1;
 		break;
@@ -3751,13 +3751,13 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			hdd_ipa_uc_offload_enable_disable(adapter,
 				SIR_AP_RX_DATA_OFFLOAD, 1);
 		}
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 		ret = hdd_ipa_setup_iface(hdd_ipa, adapter, sta_id);
 		if (ret) {
 			HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
 				"%s: Evt: %d, Interface setup failed",
 				msg_ex->name, meta.msg_type);
-			cdf_mutex_release(&hdd_ipa->event_lock);
+			qdf_mutex_release(&hdd_ipa->event_lock);
 			goto end;
 
 #ifdef IPA_UC_OFFLOAD
@@ -3766,18 +3766,18 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 				(adapter->ipa_context))->iface_id;
 #endif /* IPA_UC_OFFLOAD */
 		}
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 		break;
 
 	case WLAN_STA_DISCONNECT:
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 		hdd_ipa_cleanup_iface(adapter->ipa_context);
 
 		if (!hdd_ipa->sta_connected) {
 			HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
 				"%s: Evt: %d, STA already disconnected",
 				msg_ex->name, meta.msg_type);
-			cdf_mutex_release(&hdd_ipa->event_lock);
+			qdf_mutex_release(&hdd_ipa->event_lock);
 			return -EINVAL;
 		}
 		hdd_ipa->sta_connected = 0;
@@ -3801,7 +3801,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
 		}
 
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 		break;
 
 	case WLAN_AP_DISCONNECT:
@@ -3812,7 +3812,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			return -EINVAL;
 		}
 
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 		hdd_ipa_cleanup_iface(adapter->ipa_context);
 		if ((!hdd_ipa->num_iface) &&
 			(HDD_IPA_UC_NUM_WDI_PIPE ==
@@ -3837,7 +3837,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 				SIR_AP_RX_DATA_OFFLOAD, 0);
 			vdev_to_iface[adapter->sessionId] = HDD_IPA_MAX_IFACE;
 		}
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 		break;
 
 	case WLAN_CLIENT_CONNECT_EX:
@@ -3851,13 +3851,13 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			return 0;
 		}
 
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 		if (hdd_ipa_uc_find_add_assoc_sta(hdd_ipa,
 				true, sta_id)) {
 			HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
 				"%s: STA ID %d found, not valid",
 				adapter->dev->name, sta_id);
-			cdf_mutex_release(&hdd_ipa->event_lock);
+			qdf_mutex_release(&hdd_ipa->event_lock);
 			return 0;
 		}
 
@@ -3867,7 +3867,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 		   !hdd_ipa->sta_connected)) {
 			ret = hdd_ipa_uc_handle_first_con(hdd_ipa);
 			if (ret) {
-				cdf_mutex_release(&hdd_ipa->event_lock);
+				qdf_mutex_release(&hdd_ipa->event_lock);
 				HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
 					    "%s: handle 1st con ret %d",
 					    adapter->dev->name, ret);
@@ -3877,7 +3877,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 
 		hdd_ipa->sap_num_connected_sta++;
 
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 
 		meta.msg_type = type;
 		meta.msg_len = (sizeof(struct ipa_wlan_msg_ex) +
@@ -3922,12 +3922,12 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			return 0;
 		}
 
-		cdf_mutex_acquire(&hdd_ipa->event_lock);
+		qdf_mutex_acquire(&hdd_ipa->event_lock);
 		if (!hdd_ipa_uc_find_add_assoc_sta(hdd_ipa, false, sta_id)) {
 			HDD_IPA_LOG(CDF_TRACE_LEVEL_ERROR,
 				    "%s: STA ID %d NOT found, not valid",
 				    msg_ex->name, sta_id);
-			cdf_mutex_release(&hdd_ipa->event_lock);
+			qdf_mutex_release(&hdd_ipa->event_lock);
 			return 0;
 		}
 		hdd_ipa->sap_num_connected_sta--;
@@ -3939,7 +3939,7 @@ int hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
 			&& (HDD_IPA_UC_NUM_WDI_PIPE ==
 				hdd_ipa->activated_fw_pipe))
 			hdd_ipa_uc_handle_last_discon(hdd_ipa);
-		cdf_mutex_release(&hdd_ipa->event_lock);
+		qdf_mutex_release(&hdd_ipa->event_lock);
 		break;
 
 	default:
@@ -4057,7 +4057,7 @@ QDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
 			hdd_ipa_adapter_2_client[i].prod_client;
 		iface_context->iface_id = i;
 		iface_context->adapter = NULL;
-		cdf_spinlock_init(&iface_context->interface_lock);
+		qdf_spinlock_create(&iface_context->interface_lock);
 	}
 
 #ifdef CONFIG_CNSS
@@ -4065,7 +4065,7 @@ QDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
 #else
 	INIT_WORK(&hdd_ipa->pm_work, hdd_ipa_pm_send_pkt_to_tl);
 #endif
-	cdf_spinlock_init(&hdd_ipa->pm_lock);
+	qdf_spinlock_create(&hdd_ipa->pm_lock);
 	cdf_nbuf_queue_init(&hdd_ipa->pm_queue_head);
 
 	ret = hdd_ipa_setup_rm(hdd_ipa);
@@ -4102,7 +4102,7 @@ QDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
 fail_create_sys_pipe:
 	hdd_ipa_destroy_rm_resource(hdd_ipa);
 fail_setup_rm:
-	cdf_spinlock_destroy(&hdd_ipa->pm_lock);
+	qdf_spinlock_destroy(&hdd_ipa->pm_lock);
 fail_get_resource:
 	cdf_mem_free(hdd_ipa);
 	hdd_ctx->hdd_ipa = NULL;
@@ -4161,24 +4161,24 @@ QDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
 	cancel_work_sync(&hdd_ipa->pm_work);
 #endif
 
-	cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 
 	while (((skb = cdf_nbuf_queue_remove(&hdd_ipa->pm_queue_head)) != NULL)) {
-		cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+		qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
 		pm_tx_cb = (struct hdd_ipa_pm_tx_cb *)skb->cb;
 		ipa_free_skb(pm_tx_cb->ipa_tx_desc);
 
-		cdf_spin_lock_bh(&hdd_ipa->pm_lock);
+		qdf_spin_lock_bh(&hdd_ipa->pm_lock);
 	}
-	cdf_spin_unlock_bh(&hdd_ipa->pm_lock);
+	qdf_spin_unlock_bh(&hdd_ipa->pm_lock);
 
-	cdf_spinlock_destroy(&hdd_ipa->pm_lock);
+	qdf_spinlock_destroy(&hdd_ipa->pm_lock);
 
 	/* destory the interface lock */
 	for (i = 0; i < HDD_IPA_MAX_IFACE; i++) {
 		iface_context = &hdd_ipa->iface_context[i];
-		cdf_spinlock_destroy(&iface_context->interface_lock);
+		qdf_spinlock_destroy(&iface_context->interface_lock);
 	}
 
 	/* This should never hit but still make sure that there are no pending
@@ -4207,8 +4207,8 @@ QDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
 		HDD_IPA_LOG(CDF_TRACE_LEVEL_INFO,
 			    "%s: Disconnect RX PIPE", __func__);
 		ipa_disconnect_wdi_pipe(hdd_ipa->rx_pipe_handle);
-		cdf_mutex_destroy(&hdd_ipa->event_lock);
-		cdf_mutex_destroy(&hdd_ipa->ipa_lock);
+		qdf_mutex_destroy(&hdd_ipa->event_lock);
+		qdf_mutex_destroy(&hdd_ipa->ipa_lock);
 		hdd_ipa_cleanup_pending_event(hdd_ipa);
 
 #ifdef WLAN_OPEN_SOURCE

+ 16 - 16
core/hdd/src/wlan_hdd_lro.c

@@ -101,7 +101,7 @@ static void hdd_lro_desc_pool_init(struct hdd_lro_desc_pool *lro_desc_pool,
 		list_add_tail(&lro_desc_pool->lro_desc_array[i].lro_node,
 			 &lro_desc_pool->lro_free_list_head);
 	}
-	cdf_spinlock_init(&lro_desc_pool->lro_pool_lock);
+	qdf_spinlock_create(&lro_desc_pool->lro_pool_lock);
 }
 
 /**
@@ -128,7 +128,7 @@ static void hdd_lro_desc_info_init(struct hdd_lro_s *hdd_info)
 			 lro_hash_table[i].lro_desc_list);
 	}
 
-	cdf_spinlock_init(&hdd_info->lro_desc_info.lro_hash_lock);
+	qdf_spinlock_create(&hdd_info->lro_desc_info.lro_hash_lock);
 }
 
 /**
@@ -142,7 +142,7 @@ static void hdd_lro_desc_info_init(struct hdd_lro_s *hdd_info)
 static void hdd_lro_desc_pool_deinit(struct hdd_lro_desc_pool *lro_desc_pool)
 {
 	INIT_LIST_HEAD(&lro_desc_pool->lro_free_list_head);
-	cdf_spinlock_destroy(&lro_desc_pool->lro_pool_lock);
+	qdf_spinlock_destroy(&lro_desc_pool->lro_pool_lock);
 }
 
 /**
@@ -160,7 +160,7 @@ static void hdd_lro_desc_info_deinit(struct hdd_lro_s *hdd_info)
 	struct hdd_lro_desc_info *desc_info = &hdd_info->lro_desc_info;
 
 	hdd_lro_desc_pool_deinit(&desc_info->lro_desc_pool);
-	cdf_spinlock_destroy(&desc_info->lro_hash_lock);
+	qdf_spinlock_destroy(&desc_info->lro_hash_lock);
 }
 
 /**
@@ -225,7 +225,7 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 		return -EINVAL;
 	}
 
-	cdf_spin_lock_bh(&desc_info->lro_hash_lock);
+	qdf_spin_lock_bh(&desc_info->lro_hash_lock);
 	/* Check if this flow exists in the descriptor list */
 	list_for_each(ptr, &lro_hash_table->lro_desc_list) {
 		struct net_lro_desc *tmp_lro_desc = NULL;
@@ -234,27 +234,27 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 		if (tmp_lro_desc->active) {
 			if (hdd_lro_tcp_flow_match(tmp_lro_desc, iph, tcph)) {
 				*lro_desc = entry->lro_desc;
-				cdf_spin_unlock_bh(&desc_info->lro_hash_lock);
+				qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 				return 0;
 			}
 		}
 	}
-	cdf_spin_unlock_bh(&desc_info->lro_hash_lock);
+	qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 
 	/* no existing flow found, a new LRO desc needs to be allocated */
 	free_pool = adapter->lro_info.lro_desc_info.lro_desc_pool;
-	cdf_spin_lock_bh(&free_pool.lro_pool_lock);
+	qdf_spin_lock_bh(&free_pool.lro_pool_lock);
 	entry = list_first_entry_or_null(
 		 &free_pool.lro_free_list_head,
 		 struct hdd_lro_desc_entry, lro_node);
 	if (NULL == entry) {
 		hdd_err("Could not allocate LRO desc!");
-		cdf_spin_unlock_bh(&free_pool.lro_pool_lock);
+		qdf_spin_unlock_bh(&free_pool.lro_pool_lock);
 		return -ENOMEM;
 	}
 
 	list_del_init(&entry->lro_node);
-	cdf_spin_unlock_bh(&free_pool.lro_pool_lock);
+	qdf_spin_unlock_bh(&free_pool.lro_pool_lock);
 
 	if (NULL == entry->lro_desc) {
 		hdd_err("entry->lro_desc is NULL!\n");
@@ -267,10 +267,10 @@ static int hdd_lro_desc_find(hdd_adapter_t *adapter,
 	 * lro_desc->active should be 0 and lro_desc->tcp_rcv_tsval
 	 * should be 0 for newly allocated lro descriptors
 	 */
-	cdf_spin_lock_bh(&desc_info->lro_hash_lock);
+	qdf_spin_lock_bh(&desc_info->lro_hash_lock);
 	list_add_tail(&entry->lro_node,
 		 &lro_hash_table->lro_desc_list);
-	cdf_spin_unlock_bh(&desc_info->lro_hash_lock);
+	qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 	*lro_desc = entry->lro_desc;
 
 	return 0;
@@ -386,14 +386,14 @@ static void hdd_lro_desc_free(struct net_lro_desc *desc,
 
 	entry = &desc_info->lro_desc_pool.lro_desc_array[i];
 
-	cdf_spin_lock_bh(&desc_info->lro_hash_lock);
+	qdf_spin_lock_bh(&desc_info->lro_hash_lock);
 	list_del_init(&entry->lro_node);
-	cdf_spin_unlock_bh(&desc_info->lro_hash_lock);
+	qdf_spin_unlock_bh(&desc_info->lro_hash_lock);
 
-	cdf_spin_lock_bh(&desc_info->lro_desc_pool.lro_pool_lock);
+	qdf_spin_lock_bh(&desc_info->lro_desc_pool.lro_pool_lock);
 	list_add_tail(&entry->lro_node, &desc_info->
 		 lro_desc_pool.lro_free_list_head);
-	cdf_spin_unlock_bh(&desc_info->lro_desc_pool.lro_pool_lock);
+	qdf_spin_unlock_bh(&desc_info->lro_desc_pool.lro_pool_lock);
 }
 
 /**

+ 28 - 28
core/hdd/src/wlan_hdd_main.c

@@ -147,7 +147,7 @@ static int wlan_hdd_inited;
  */
 DEFINE_SPINLOCK(hdd_context_lock);
 
-static cdf_wake_lock_t wlan_wake_lock;
+static qdf_wake_lock_t wlan_wake_lock;
 
 #define WOW_MAX_FILTER_LISTS 1
 #define WOW_MAX_FILTERS_PER_LIST 4
@@ -3149,10 +3149,10 @@ QDF_STATUS hdd_get_front_adapter(hdd_context_t *hdd_ctx,
 				 hdd_adapter_list_node_t **padapterNode)
 {
 	QDF_STATUS status;
-	cdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
 	status = qdf_list_peek_front(&hdd_ctx->hddAdapters,
 				     (qdf_list_node_t **) padapterNode);
-	cdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
 	return status;
 }
 
@@ -3161,12 +3161,12 @@ QDF_STATUS hdd_get_next_adapter(hdd_context_t *hdd_ctx,
 				hdd_adapter_list_node_t **pNextAdapterNode)
 {
 	QDF_STATUS status;
-	cdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
 	status = qdf_list_peek_next(&hdd_ctx->hddAdapters,
 				    (qdf_list_node_t *) adapterNode,
 				    (qdf_list_node_t **) pNextAdapterNode);
 
-	cdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
 	return status;
 }
 
@@ -3174,10 +3174,10 @@ QDF_STATUS hdd_remove_adapter(hdd_context_t *hdd_ctx,
 			      hdd_adapter_list_node_t *adapterNode)
 {
 	QDF_STATUS status;
-	cdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
 	status = qdf_list_remove_node(&hdd_ctx->hddAdapters,
 				      &adapterNode->node);
-	cdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
 	return status;
 }
 
@@ -3185,10 +3185,10 @@ QDF_STATUS hdd_remove_front_adapter(hdd_context_t *hdd_ctx,
 				    hdd_adapter_list_node_t **padapterNode)
 {
 	QDF_STATUS status;
-	cdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
 	status = qdf_list_remove_front(&hdd_ctx->hddAdapters,
 				       (qdf_list_node_t **) padapterNode);
-	cdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
 	return status;
 }
 
@@ -3196,10 +3196,10 @@ QDF_STATUS hdd_add_adapter_back(hdd_context_t *hdd_ctx,
 				hdd_adapter_list_node_t *adapterNode)
 {
 	QDF_STATUS status;
-	cdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
 	status = qdf_list_insert_back(&hdd_ctx->hddAdapters,
 				      (qdf_list_node_t *) adapterNode);
-	cdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
 	return status;
 }
 
@@ -3207,10 +3207,10 @@ QDF_STATUS hdd_add_adapter_front(hdd_context_t *hdd_ctx,
 				 hdd_adapter_list_node_t *adapterNode)
 {
 	QDF_STATUS status;
-	cdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_adapter_lock);
 	status = qdf_list_insert_front(&hdd_ctx->hddAdapters,
 				       (qdf_list_node_t *) adapterNode);
-	cdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_adapter_lock);
 	return status;
 }
 
@@ -3774,10 +3774,10 @@ void hdd_wlan_exit(hdd_context_t *hdd_ctx)
 	}
 #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
 	/* Destroy the wake lock */
-	cdf_wake_lock_destroy(&hdd_ctx->rx_wake_lock);
+	qdf_wake_lock_destroy(&hdd_ctx->rx_wake_lock);
 #endif
 	/* Destroy the wake lock */
-	cdf_wake_lock_destroy(&hdd_ctx->sap_wake_lock);
+	qdf_wake_lock_destroy(&hdd_ctx->sap_wake_lock);
 
 	hdd_hostapd_channel_wakelock_deinit(hdd_ctx);
 
@@ -3965,17 +3965,17 @@ QDF_STATUS hdd_post_cds_enable_config(hdd_context_t *hdd_ctx)
 /* wake lock APIs for HDD */
 void hdd_prevent_suspend(uint32_t reason)
 {
-	cdf_wake_lock_acquire(&wlan_wake_lock, reason);
+	qdf_wake_lock_acquire(&wlan_wake_lock, reason);
 }
 
 void hdd_allow_suspend(uint32_t reason)
 {
-	cdf_wake_lock_release(&wlan_wake_lock, reason);
+	qdf_wake_lock_release(&wlan_wake_lock, reason);
 }
 
 void hdd_prevent_suspend_timeout(uint32_t timeout, uint32_t reason)
 {
-	cdf_wake_lock_timeout_acquire(&wlan_wake_lock, timeout, reason);
+	qdf_wake_lock_timeout_acquire(&wlan_wake_lock, timeout, reason);
 }
 
 /**
@@ -5119,10 +5119,10 @@ hdd_context_t *hdd_init_context(struct device *dev, void *hif_sc)
 	init_completion(&hdd_ctx->mc_sus_event_var);
 	init_completion(&hdd_ctx->ready_to_suspend);
 
-	cdf_spinlock_init(&hdd_ctx->connection_status_lock);
-	cdf_spinlock_init(&hdd_ctx->sched_scan_lock);
+	qdf_spinlock_create(&hdd_ctx->connection_status_lock);
+	qdf_spinlock_create(&hdd_ctx->sched_scan_lock);
 
-	cdf_spinlock_init(&hdd_ctx->hdd_adapter_lock);
+	qdf_spinlock_create(&hdd_ctx->hdd_adapter_lock);
 	qdf_list_create(&hdd_ctx->hddAdapters, MAX_NUMBER_OF_ADAPTERS);
 
 	wlan_hdd_cfg80211_extscan_init(hdd_ctx);
@@ -5670,10 +5670,10 @@ int hdd_wlan_startup(struct device *dev, void *hif_sc)
 
 #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
 	/* Initialize the wake lcok */
-	cdf_wake_lock_init(&hdd_ctx->rx_wake_lock, "qcom_rx_wakelock");
+	qdf_wake_lock_create(&hdd_ctx->rx_wake_lock, "qcom_rx_wakelock");
 #endif
 	/* Initialize the wake lcok */
-	cdf_wake_lock_init(&hdd_ctx->sap_wake_lock, "qcom_sap_wakelock");
+	qdf_wake_lock_create(&hdd_ctx->sap_wake_lock, "qcom_sap_wakelock");
 
 	hdd_hostapd_channel_wakelock_init(hdd_ctx);
 
@@ -5746,9 +5746,9 @@ int hdd_wlan_startup(struct device *dev, void *hif_sc)
 				  hdd_ctx->target_hw_version,
 				  hdd_ctx->target_hw_name);
 
-	cdf_spinlock_init(&hdd_ctx->hdd_roc_req_q_lock);
+	qdf_spinlock_create(&hdd_ctx->hdd_roc_req_q_lock);
 	qdf_list_create((&hdd_ctx->hdd_roc_req_q), MAX_ROC_REQ_QUEUE_ENTRY);
-	cdf_spinlock_init(&hdd_ctx->hdd_scan_req_q_lock);
+	qdf_spinlock_create(&hdd_ctx->hdd_scan_req_q_lock);
 	qdf_list_create((&hdd_ctx->hdd_scan_req_q), CFG_MAX_SCAN_COUNT_MAX);
 #ifdef CONFIG_CNSS
 	cnss_init_delayed_work(&hdd_ctx->roc_req_work,
@@ -6809,7 +6809,7 @@ static int __hdd_module_init(void)
 	pr_info("%s: Loading driver v%s\n", WLAN_MODULE_NAME,
 		QWLAN_VERSIONSTR TIMER_MANAGER_STR MEMORY_DEBUG_STR);
 
-	cdf_wake_lock_init(&wlan_wake_lock, "wlan");
+	qdf_wake_lock_create(&wlan_wake_lock, "wlan");
 
 	hdd_set_conparam((uint32_t) con_mode);
 
@@ -6823,7 +6823,7 @@ static int __hdd_module_init(void)
 
 	return 0;
 out:
-	cdf_wake_lock_destroy(&wlan_wake_lock);
+	qdf_wake_lock_destroy(&wlan_wake_lock);
 	return ret;
 }
 
@@ -6839,7 +6839,7 @@ static void __hdd_module_exit(void)
 
 	wlan_hdd_unregister_driver();
 
-	cdf_wake_lock_destroy(&wlan_wake_lock);
+	qdf_wake_lock_destroy(&wlan_wake_lock);
 
 	return;
 }

+ 10 - 10
core/hdd/src/wlan_hdd_p2p.c

@@ -720,10 +720,10 @@ static int wlan_hdd_roc_request_enqueue(hdd_adapter_t *adapter,
 	hdd_roc_req->pRemainChanCtx = remain_chan_ctx;
 
 	/* Enqueue this RoC request */
-	cdf_spin_lock(&hdd_ctx->hdd_roc_req_q_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_roc_req_q_lock);
 	status = qdf_list_insert_back(&hdd_ctx->hdd_roc_req_q,
 					&hdd_roc_req->node);
-	cdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
 
 	if (QDF_STATUS_SUCCESS != status) {
 		hddLog(LOGP, FL("Not able to enqueue RoC Req context"));
@@ -790,15 +790,15 @@ void wlan_hdd_roc_request_dequeue(struct work_struct *work)
 	 * that any pending roc in the queue will be scheduled
 	 * on the current roc completion by scheduling the work queue.
 	 */
-	cdf_spin_lock(&hdd_ctx->hdd_roc_req_q_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_roc_req_q_lock);
 	if (list_empty(&hdd_ctx->hdd_roc_req_q.anchor)) {
-		cdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+		qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
 		hdd_debug("list is empty");
 		return;
 	}
 	status = qdf_list_remove_front(&hdd_ctx->hdd_roc_req_q,
 			(qdf_list_node_t **) &hdd_roc_req);
-	cdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
 	if (QDF_STATUS_SUCCESS != status) {
 		hdd_debug("unable to remove roc element from list");
 		return;
@@ -894,9 +894,9 @@ static int wlan_hdd_request_remain_on_channel(struct wiphy *wiphy,
 		}
 	}
 
-	cdf_spin_lock(&pHddCtx->hdd_roc_req_q_lock);
+	qdf_spin_lock(&pHddCtx->hdd_roc_req_q_lock);
 	size = qdf_list_size(&(pHddCtx->hdd_roc_req_q));
-	cdf_spin_unlock(&pHddCtx->hdd_roc_req_q_lock);
+	qdf_spin_unlock(&pHddCtx->hdd_roc_req_q_lock);
 	if ((isBusy == false) && (!size)) {
 		status = wlan_hdd_execute_remain_on_channel(pAdapter,
 							    pRemainChanCtx);
@@ -1103,14 +1103,14 @@ int __wlan_hdd_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
 
 	if (0 != status)
 		return status;
-	cdf_spin_lock(&pHddCtx->hdd_roc_req_q_lock);
+	qdf_spin_lock(&pHddCtx->hdd_roc_req_q_lock);
 	list_for_each_safe(tmp, q, &pHddCtx->hdd_roc_req_q.anchor) {
 		curr_roc_req = list_entry(tmp, hdd_roc_req_t, node);
 		if ((uintptr_t) curr_roc_req->pRemainChanCtx == cookie) {
 			qdf_status = qdf_list_remove_node(&pHddCtx->hdd_roc_req_q,
 						      (qdf_list_node_t *)
 						      curr_roc_req);
-			cdf_spin_unlock(&pHddCtx->hdd_roc_req_q_lock);
+			qdf_spin_unlock(&pHddCtx->hdd_roc_req_q_lock);
 			if (qdf_status == QDF_STATUS_SUCCESS) {
 				cdf_mem_free(curr_roc_req->pRemainChanCtx);
 				cdf_mem_free(curr_roc_req);
@@ -1118,7 +1118,7 @@ int __wlan_hdd_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
 			return 0;
 		}
 	}
-	cdf_spin_unlock(&pHddCtx->hdd_roc_req_q_lock);
+	qdf_spin_unlock(&pHddCtx->hdd_roc_req_q_lock);
 	/* FIXME cancel currently running remain on chan.
 	 * Need to check cookie and cancel accordingly
 	 */

+ 3 - 3
core/hdd/src/wlan_hdd_power.c

@@ -1755,16 +1755,16 @@ static int __wlan_hdd_cfg80211_resume_wlan(struct wiphy *wiphy)
 	MTRACE(cdf_trace(QDF_MODULE_ID_HDD,
 			 TRACE_CODE_HDD_CFG80211_RESUME_WLAN,
 			 NO_SESSION, pHddCtx->isWiphySuspended));
-	cdf_spin_lock(&pHddCtx->sched_scan_lock);
+	qdf_spin_lock(&pHddCtx->sched_scan_lock);
 	pHddCtx->isWiphySuspended = false;
 	if (true != pHddCtx->isSchedScanUpdatePending) {
-		cdf_spin_unlock(&pHddCtx->sched_scan_lock);
+		qdf_spin_unlock(&pHddCtx->sched_scan_lock);
 		hddLog(LOG1, FL("Return resume is not due to PNO indication"));
 		return 0;
 	}
 	/* Reset flag to avoid updatating cfg80211 data old results again */
 	pHddCtx->isSchedScanUpdatePending = false;
-	cdf_spin_unlock(&pHddCtx->sched_scan_lock);
+	qdf_spin_unlock(&pHddCtx->sched_scan_lock);
 
 	status = hdd_get_front_adapter(pHddCtx, &pAdapterNode);
 

+ 15 - 15
core/hdd/src/wlan_hdd_scan.c

@@ -538,10 +538,10 @@ static int wlan_hdd_scan_request_enqueue(hdd_adapter_t *adapter,
 	hdd_scan_req->scan_id = scan_id;
 	hdd_scan_req->timestamp = timestamp;
 
-	cdf_spin_lock(&hdd_ctx->hdd_scan_req_q_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_scan_req_q_lock);
 	status = qdf_list_insert_back(&hdd_ctx->hdd_scan_req_q,
 					&hdd_scan_req->node);
-	cdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
 
 	if (QDF_STATUS_SUCCESS != status) {
 		hdd_err("Failed to enqueue Scan Req");
@@ -575,16 +575,16 @@ QDF_STATUS wlan_hdd_scan_request_dequeue(hdd_context_t *hdd_ctx,
 	if ((source == NULL) && (timestamp == NULL) && (req == NULL))
 		return QDF_STATUS_E_NULL_VALUE;
 
-	cdf_spin_lock(&hdd_ctx->hdd_scan_req_q_lock);
+	qdf_spin_lock(&hdd_ctx->hdd_scan_req_q_lock);
 
 	if (list_empty(&hdd_ctx->hdd_scan_req_q.anchor)) {
-		cdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
+		qdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
 		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (QDF_STATUS_SUCCESS !=
 		qdf_list_peek_front(&hdd_ctx->hdd_scan_req_q, &ppNode)) {
-		cdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
+		qdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
 		hdd_err("Failed to remove Scan Req from queue");
 		return QDF_STATUS_E_FAILURE;
 	}
@@ -600,12 +600,12 @@ QDF_STATUS wlan_hdd_scan_request_dequeue(hdd_context_t *hdd_ctx,
 				*source = hdd_scan_req->source;
 				*timestamp = hdd_scan_req->timestamp;
 				cdf_mem_free(hdd_scan_req);
-				cdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
+				qdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
 				hdd_info("removed Scan id: %d, req = %p",
 					scan_id, req);
 				return QDF_STATUS_SUCCESS;
 			} else {
-				cdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
+				qdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
 				hdd_err("Failed to remove node scan id %d",
 					scan_id);
 				return status;
@@ -614,7 +614,7 @@ QDF_STATUS wlan_hdd_scan_request_dequeue(hdd_context_t *hdd_ctx,
 	} while (QDF_STATUS_SUCCESS ==
 		qdf_list_peek_next(&hdd_ctx->hdd_scan_req_q, pNode, &ppNode));
 
-	cdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
+	qdf_spin_unlock(&hdd_ctx->hdd_scan_req_q_lock);
 	hdd_err("Failed to find scan id %d", scan_id);
 	return status;
 }
@@ -673,12 +673,12 @@ hdd_scan_request_callback(tHalHandle halHandle, void *pContext,
 		hdd_err("Got unexpected request struct for Scan id %d",
 			scanId);
 
-	cdf_spin_lock(&hddctx->hdd_scan_req_q_lock);
+	qdf_spin_lock(&hddctx->hdd_scan_req_q_lock);
 	size = qdf_list_size(&(hddctx->hdd_scan_req_q));
 	if (!size)
 		/* Scan is no longer pending */
 		pAdapter->scan_info.mScanPending = false;
-	cdf_spin_unlock(&hddctx->hdd_scan_req_q_lock);
+	qdf_spin_unlock(&hddctx->hdd_scan_req_q_lock);
 
 	/* notify any applications that may be interested */
 	memset(&wrqu, '\0', sizeof(wrqu));
@@ -1141,14 +1141,14 @@ static QDF_STATUS hdd_cfg80211_scan_done_callback(tHalHandle halHandle,
 		aborted = true;
 	}
 
-	cdf_spin_lock(&hddctx->hdd_scan_req_q_lock);
+	qdf_spin_lock(&hddctx->hdd_scan_req_q_lock);
 	size = qdf_list_size(&(hddctx->hdd_scan_req_q));
 	if (!size) {
 		/* Scan is no longer pending */
 		pScanInfo->mScanPending = false;
 		complete(&pScanInfo->abortscan_event_var);
 	}
-	cdf_spin_unlock(&hddctx->hdd_scan_req_q_lock);
+	qdf_spin_unlock(&hddctx->hdd_scan_req_q_lock);
 	/*
 	 * Scan can be triggred from NL or vendor scan
 	 * - If scan is triggered from NL then cfg80211 scan done should be
@@ -1955,15 +1955,15 @@ hdd_sched_scan_callback(void *callbackContext,
 		return;
 	}
 
-	cdf_spin_lock(&pHddCtx->sched_scan_lock);
+	qdf_spin_lock(&pHddCtx->sched_scan_lock);
 	if (true == pHddCtx->isWiphySuspended) {
 		pHddCtx->isSchedScanUpdatePending = true;
-		cdf_spin_unlock(&pHddCtx->sched_scan_lock);
+		qdf_spin_unlock(&pHddCtx->sched_scan_lock);
 		hddLog(LOG1,
 		       FL("Update cfg80211 scan database after it resume"));
 		return;
 	}
-	cdf_spin_unlock(&pHddCtx->sched_scan_lock);
+	qdf_spin_unlock(&pHddCtx->sched_scan_lock);
 
 	ret = wlan_hdd_cfg80211_update_bss(pHddCtx->wiphy, pAdapter, 0);
 

+ 1 - 1
core/hdd/src/wlan_hdd_softap_tx_rx.c

@@ -570,7 +570,7 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *cds_context,
 
 	skb->protocol = eth_type_trans(skb, skb->dev);
 #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
-	cdf_wake_lock_timeout_acquire(&pHddCtx->rx_wake_lock,
+	qdf_wake_lock_timeout_acquire(&pHddCtx->rx_wake_lock,
 				      HDD_WAKE_LOCK_DURATION,
 				      WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
 #endif

+ 1 - 1
core/hdd/src/wlan_hdd_tx_rx.c

@@ -732,7 +732,7 @@ QDF_STATUS hdd_rx_packet_cbk(void *cds_context, cdf_nbuf_t rxBuf, uint8_t staId)
 	++pAdapter->stats.rx_packets;
 	pAdapter->stats.rx_bytes += skb->len;
 #ifdef WLAN_FEATURE_HOLD_RX_WAKELOCK
-	cdf_wake_lock_timeout_acquire(&pHddCtx->rx_wake_lock,
+	qdf_wake_lock_timeout_acquire(&pHddCtx->rx_wake_lock,
 				      HDD_WAKE_LOCK_DURATION,
 				      WIFI_POWER_EVENT_WAKELOCK_HOLD_RX);
 #endif

+ 2 - 2
core/mac/inc/ani_global.h

@@ -637,7 +637,7 @@ typedef struct sAniSirLim {
 
 	/* admission control policy information */
 	tLimAdmitPolicyInfo admitPolicyInfo;
-	cdf_mutex_t lkPeGlobalLock;
+	qdf_mutex_t lkPeGlobalLock;
 	uint8_t disableLDPCWithTxbfAP;
 #ifdef FEATURE_WLAN_TDLS
 	uint8_t gLimTDLSBufStaEnabled;
@@ -825,7 +825,7 @@ typedef struct sAniSirLim {
 #endif
 
 	tSirRemainOnChnReq *gpLimRemainOnChanReq;       /* hold remain on chan request in this buf */
-	cdf_mutex_t lim_frame_register_lock;
+	qdf_mutex_t lim_frame_register_lock;
 	qdf_list_t gLimMgmtFrameRegistratinQueue;
 	uint32_t mgmtFrameSessionId;
 

+ 7 - 7
core/mac/src/pe/lim/lim_api.c

@@ -606,7 +606,7 @@ tSirRetStatus lim_initialize(tpAniSirGlobal pMac)
 	rrm_initialize(pMac);
 #endif
 
-	cdf_mutex_init(&pMac->lim.lim_frame_register_lock);
+	qdf_mutex_create(&pMac->lim.lim_frame_register_lock);
 	qdf_list_create(&pMac->lim.gLimMgmtFrameRegistratinQueue, 0);
 
 	/* Initialize the configurations needed by PE */
@@ -656,7 +656,7 @@ void lim_cleanup(tpAniSirGlobal pMac)
 	struct mgmt_frm_reg_info *pLimMgmtRegistration = NULL;
 
 	if (QDF_GLOBAL_FTM_MODE != cds_get_conparam()) {
-		cdf_mutex_acquire(&pMac->lim.lim_frame_register_lock);
+		qdf_mutex_acquire(&pMac->lim.lim_frame_register_lock);
 		while (qdf_list_remove_front(
 			&pMac->lim.gLimMgmtFrameRegistratinQueue,
 			(qdf_list_node_t **) &pLimMgmtRegistration) ==
@@ -665,7 +665,7 @@ void lim_cleanup(tpAniSirGlobal pMac)
 			FL("Fixing leak! Deallocating pLimMgmtRegistration node"));
 			cdf_mem_free(pLimMgmtRegistration);
 		}
-		cdf_mutex_release(&pMac->lim.lim_frame_register_lock);
+		qdf_mutex_release(&pMac->lim.lim_frame_register_lock);
 		qdf_list_destroy(&pMac->lim.gLimMgmtFrameRegistratinQueue);
 	}
 
@@ -766,7 +766,7 @@ tSirRetStatus pe_open(tpAniSirGlobal pMac, tMacOpenParameters *pMacOpenParam)
 	pMac->lim.mgmtFrameSessionId = 0xff;
 	pMac->lim.deferredMsgCnt = 0;
 
-	if (!QDF_IS_STATUS_SUCCESS(cdf_mutex_init(&pMac->lim.lkPeGlobalLock))) {
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create(&pMac->lim.lkPeGlobalLock))) {
 		PELOGE(lim_log(pMac, LOGE, FL("pe lock init failed!"));)
 		status = eSIR_FAILURE;
 		goto pe_open_lock_fail;
@@ -830,7 +830,7 @@ tSirRetStatus pe_close(tpAniSirGlobal pMac)
 	cdf_mem_free(pMac->lim.gpSession);
 	pMac->lim.gpSession = NULL;
 	if (!QDF_IS_STATUS_SUCCESS
-		    (cdf_mutex_destroy(&pMac->lim.lkPeGlobalLock))) {
+		    (qdf_mutex_destroy(&pMac->lim.lkPeGlobalLock))) {
 		return eSIR_FAILURE;
 	}
 	return eSIR_SUCCESS;
@@ -2265,7 +2265,7 @@ QDF_STATUS pe_acquire_global_lock(tAniSirLim *psPe)
 
 	if (psPe) {
 		if (QDF_IS_STATUS_SUCCESS
-			    (cdf_mutex_acquire(&psPe->lkPeGlobalLock))) {
+			    (qdf_mutex_acquire(&psPe->lkPeGlobalLock))) {
 			status = QDF_STATUS_SUCCESS;
 		}
 	}
@@ -2277,7 +2277,7 @@ QDF_STATUS pe_release_global_lock(tAniSirLim *psPe)
 	QDF_STATUS status = QDF_STATUS_E_INVAL;
 	if (psPe) {
 		if (QDF_IS_STATUS_SUCCESS
-			    (cdf_mutex_release(&psPe->lkPeGlobalLock))) {
+			    (qdf_mutex_release(&psPe->lkPeGlobalLock))) {
 			status = QDF_STATUS_SUCCESS;
 		}
 	}

+ 4 - 4
core/mac/src/pe/lim/lim_process_message_queue.c

@@ -652,10 +652,10 @@ lim_check_mgmt_registered_frames(tpAniSirGlobal mac_ctx, uint8_t *buff_desc,
 	body = WMA_GET_RX_MPDU_DATA(buff_desc);
 	frm_len = WMA_GET_RX_PAYLOAD_LEN(buff_desc);
 
-	cdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
+	qdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
 	qdf_list_peek_front(&mac_ctx->lim.gLimMgmtFrameRegistratinQueue,
 			    (qdf_list_node_t **) &mgmt_frame);
-	cdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
+	qdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
 
 	while (mgmt_frame != NULL) {
 		type = (mgmt_frame->frameType >> 2) & 0x03;
@@ -683,13 +683,13 @@ lim_check_mgmt_registered_frames(tpAniSirGlobal mac_ctx, uint8_t *buff_desc,
 			}
 		}
 
-		cdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
+		qdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
 		qdf_status =
 			qdf_list_peek_next(
 			&mac_ctx->lim.gLimMgmtFrameRegistratinQueue,
 			(qdf_list_node_t *) mgmt_frame,
 			(qdf_list_node_t **) &next_frm);
-		cdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
+		qdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
 		mgmt_frame = next_frm;
 		next_frm = NULL;
 	}

+ 8 - 8
core/mac/src/pe/lim/lim_process_sme_req_messages.c

@@ -4502,10 +4502,10 @@ static void __lim_process_sme_register_mgmt_frame_req(tpAniSirGlobal mac_ctx,
 				sme_req->registerFrame, sme_req->frameType,
 				sme_req->matchLen);
 	/* First check whether entry exists already */
-	cdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
+	qdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
 	qdf_list_peek_front(&mac_ctx->lim.gLimMgmtFrameRegistratinQueue,
 			    (qdf_list_node_t **) &lim_mgmt_regn);
-	cdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
+	qdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
 
 	while (lim_mgmt_regn != NULL) {
 		if (lim_mgmt_regn->frameType != sme_req->frameType)
@@ -4525,21 +4525,21 @@ static void __lim_process_sme_register_mgmt_frame_req(tpAniSirGlobal mac_ctx,
 			break;
 		}
 skip_match:
-		cdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
+		qdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
 		qdf_status = qdf_list_peek_next(
 				&mac_ctx->lim.gLimMgmtFrameRegistratinQueue,
 				(qdf_list_node_t *)lim_mgmt_regn,
 				(qdf_list_node_t **)&next);
-		cdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
+		qdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
 		lim_mgmt_regn = next;
 		next = NULL;
 	}
 	if (match) {
-		cdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
+		qdf_mutex_acquire(&mac_ctx->lim.lim_frame_register_lock);
 		qdf_list_remove_node(
 				&mac_ctx->lim.gLimMgmtFrameRegistratinQueue,
 				(qdf_list_node_t *)lim_mgmt_regn);
-		cdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
+		qdf_mutex_release(&mac_ctx->lim.lim_frame_register_lock);
 		cdf_mem_free(lim_mgmt_regn);
 	}
 
@@ -4559,12 +4559,12 @@ skip_match:
 					     sme_req->matchData,
 					     sme_req->matchLen);
 			}
-			cdf_mutex_acquire(
+			qdf_mutex_acquire(
 					&mac_ctx->lim.lim_frame_register_lock);
 			qdf_list_insert_front(&mac_ctx->lim.
 					      gLimMgmtFrameRegistratinQueue,
 					      &lim_mgmt_regn->node);
-			cdf_mutex_release(
+			qdf_mutex_release(
 					&mac_ctx->lim.lim_frame_register_lock);
 		}
 	}

+ 11 - 11
core/sap/dfs/inc/dfs.h

@@ -51,7 +51,7 @@
 #include <cdf_net_types.h>      /* CDF_NBUF_EXEMPT_NO_EXEMPTION, etc. */
 #include <cdf_nbuf.h>           /* cdf_nbuf_t, etc. */
 #include <cdf_util.h>           /* cdf_assert */
-#include <cdf_lock.h>           /* cdf_spinlock */
+#include <qdf_lock.h>           /* cdf_spinlock */
 #include <cds_queue.h>          /* TAILQ */
 #include <qdf_time.h>
 #include <qdf_timer.h>
@@ -131,18 +131,18 @@
 #define DFS_EXT_CHAN_LOADING_THRESH     30
 #define DFS_DEFAULT_PRI_MARGIN          6
 #define DFS_DEFAULT_FIXEDPATTERN_PRI_MARGIN       4
-#define ATH_DFSQ_LOCK(_dfs)        cdf_spin_lock_bh((&(_dfs)->dfs_radarqlock))
-#define ATH_DFSQ_UNLOCK(_dfs)      cdf_spin_unlock_bh((&(_dfs)->dfs_radarqlock))
-#define ATH_DFSQ_LOCK_INIT(_dfs)   cdf_spinlock_init(&(_dfs)->dfs_radarqlock)
+#define ATH_DFSQ_LOCK(_dfs)        qdf_spin_lock_bh((&(_dfs)->dfs_radarqlock))
+#define ATH_DFSQ_UNLOCK(_dfs)      qdf_spin_unlock_bh((&(_dfs)->dfs_radarqlock))
+#define ATH_DFSQ_LOCK_INIT(_dfs)   qdf_spinlock_create(&(_dfs)->dfs_radarqlock)
 
-#define ATH_ARQ_LOCK(_dfs)         cdf_spin_lock_bh((&(_dfs)->dfs_arqlock))
-#define ATH_ARQ_UNLOCK(_dfs)       cdf_spin_unlock_bh((&(_dfs)->dfs_arqlock))
-#define ATH_ARQ_LOCK_INIT(_dfs)    cdf_spinlock_init(&(_dfs)->dfs_arqlock)
+#define ATH_ARQ_LOCK(_dfs)         qdf_spin_lock_bh((&(_dfs)->dfs_arqlock))
+#define ATH_ARQ_UNLOCK(_dfs)       qdf_spin_unlock_bh((&(_dfs)->dfs_arqlock))
+#define ATH_ARQ_LOCK_INIT(_dfs)    qdf_spinlock_create(&(_dfs)->dfs_arqlock)
 
-#define ATH_DFSEVENTQ_LOCK(_dfs)   cdf_spin_lock_bh((&(_dfs)->dfs_eventqlock))
-#define ATH_DFSEVENTQ_UNLOCK(_dfs) cdf_spin_unlock_bh((&(_dfs)->dfs_eventqlock))
+#define ATH_DFSEVENTQ_LOCK(_dfs)   qdf_spin_lock_bh((&(_dfs)->dfs_eventqlock))
+#define ATH_DFSEVENTQ_UNLOCK(_dfs) qdf_spin_unlock_bh((&(_dfs)->dfs_eventqlock))
 #define ATH_DFSEVENTQ_LOCK_INIT(_dfs) \
-				   cdf_spinlock_init((&(_dfs)->dfs_eventqlock))
+				   qdf_spinlock_create((&(_dfs)->dfs_eventqlock))
 /* Mask for time stamp from descriptor */
 #define DFS_TSMASK              0xFFFFFFFF
 /* Shift for time stamp from descriptor */
@@ -222,7 +222,7 @@
 #define DFS_ETSI_TYPE3_WAR_PRI_UPPER_LIMIT 435
 #define DFS_ETSI_WAR_VALID_PULSE_DURATION 15
 
-typedef cdf_spinlock_t dfsq_lock_t;
+typedef qdf_spinlock_t dfsq_lock_t;
 
 #ifdef WIN32
 #pragma pack(push, dfs_pulseparams, 1)

+ 2 - 2
core/sap/dfs/src/dfs.c

@@ -169,9 +169,9 @@ static os_timer_func(dfs_task)
 			 */
 			OS_CANCEL_TIMER(&dfs->ath_dfstesttimer);
 			dfs->ath_dfstest = 1;
-			cdf_spin_lock_bh(&ic->chan_lock);
+			qdf_spin_lock_bh(&ic->chan_lock);
 			dfs->ath_dfstest_ieeechan = ic->ic_curchan->ic_ieee;
-			cdf_spin_unlock_bh(&ic->chan_lock);
+			qdf_spin_unlock_bh(&ic->chan_lock);
 			dfs->ath_dfstesttime = 1;       /* 1ms */
 			OS_SET_TIMER(&dfs->ath_dfstesttimer,
 				     dfs->ath_dfstesttime);

+ 12 - 12
core/sap/dfs/src/dfs_fcc_bin5.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002-2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -27,7 +27,7 @@
 
 /*===========================================================================
 
-                              dfs_fcc_bin5.c
+				dfs_fcc_bin5.c
 
    OVERVIEW:
 
@@ -41,7 +41,7 @@
 
 /*===========================================================================
 
-                      EDIT HISTORY FOR FILE
+			EDIT HISTORY FOR FILE
 
    This section contains comments describing changes made to the module.
    Notice that changes are listed in reverse chronological order.
@@ -95,15 +95,15 @@ dfs_bin5_check_pulse(struct ath_dfs *dfs, struct dfs_event *re,
 			    !!(re->re_flags & DFS_EVENT_CHECKCHIRP),
 			    !!(re->re_flags & DFS_EVENT_HW_CHIRP),
 			    !!(re->re_flags & DFS_EVENT_SW_CHIRP));
-		return (0);
+		return 0;
 	}
 
 	/* Adjust the filter threshold for rssi in non TURBO mode */
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	if (!(dfs->ic->ic_curchan->ic_flags & CHANNEL_TURBO))
 		b5_rssithresh += br->br_pulse.b5_rssimargin;
 
-	cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+	qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 
 	/*
 	 * Check if the pulse is within duration and rssi
@@ -115,7 +115,7 @@ dfs_bin5_check_pulse(struct ath_dfs *dfs, struct dfs_event *re,
 		DFS_DPRINTK(dfs, ATH_DEBUG_DFS_BIN5,
 			    "%s: dur=%d, rssi=%d - adding!\n",
 			    __func__, (int)re->re_dur, (int)re->re_rssi);
-		return (1);
+		return 1;
 	}
 
 	DFS_DPRINTK(dfs, ATH_DEBUG_DFS_BIN5,
@@ -124,7 +124,7 @@ dfs_bin5_check_pulse(struct ath_dfs *dfs, struct dfs_event *re,
 		    (unsigned long long)re->re_full_ts,
 		    (int)re->re_dur, (int)re->re_rssi);
 
-	return (0);
+	return 0;
 }
 
 int dfs_bin5_addpulse(struct ath_dfs *dfs, struct dfs_bin5radars *br,
@@ -567,7 +567,7 @@ dfs_check_chirping_merlin(struct ath_dfs *dfs, void *buf, uint16_t datalen,
 	int same_sign;
 	int temp;
 
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	if (IS_CHAN_HT40(dfs->ic->ic_curchan)) {
 		num_fft_bytes = NUM_FFT_BYTES_HT40;
 		num_bin_bytes = NUM_BIN_BYTES_HT40;
@@ -598,7 +598,7 @@ dfs_check_chirping_merlin(struct ath_dfs *dfs, void *buf, uint16_t datalen,
 		upper_mag_byte = UPPER_MAG_BYTE_HT20;
 	}
 
-	cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+	qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 	ptr = (uint8_t *) buf;
 	/*
 	 * sanity check for FFT buffer
@@ -634,7 +634,7 @@ dfs_check_chirping_merlin(struct ath_dfs *dfs, void *buf, uint16_t datalen,
 		max_index_upper[i] =
 			(ptr[fft_start + upper_index_byte] >> 2) + num_subchan_bins;
 
-		cdf_spin_lock_bh(&dfs->ic->chan_lock);
+		qdf_spin_lock_bh(&dfs->ic->chan_lock);
 		if (!IS_CHAN_HT40(dfs->ic->ic_curchan)) {
 			/*
 			 * for HT20 mode indices are 6 bit signed number
@@ -643,7 +643,7 @@ dfs_check_chirping_merlin(struct ath_dfs *dfs, void *buf, uint16_t datalen,
 			max_index_upper[i] = 0;
 		}
 
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 		/*
 		 * Reconstruct the maximum magnitude for each sub-channel. Also select
 		 * and flag the max overall magnitude between the two sub-channels.

+ 12 - 12
core/sap/dfs/src/dfs_misc.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2002-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -27,7 +27,7 @@
 
 /*===========================================================================
 
-                     dfs_misc.c
+			dfs_misc.c
 
    OVERVIEW:
 
@@ -41,7 +41,7 @@
 
 /*===========================================================================
 
-                      EDIT HISTORY FOR FILE
+			EDIT HISTORY FOR FILE
 
    This section contains comments describing changes made to the module.
    Notice that changes are listed in reverse chronological order.
@@ -107,9 +107,9 @@ dfs_get_pri_margin(struct ath_dfs *dfs, int is_extchan_detect,
 	else
 		pri_margin = DFS_DEFAULT_PRI_MARGIN;
 
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	if (IS_CHAN_HT40(dfs->ic->ic_curchan)) {
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 		ext_chan_busy = dfs->ic->ic_get_ext_busy(dfs->ic);
 		if (ext_chan_busy >= 0) {
 			dfs->dfs_rinfo.ext_chan_busy_ts =
@@ -134,7 +134,7 @@ dfs_get_pri_margin(struct ath_dfs *dfs, int is_extchan_detect,
 
 		pri_margin -= adjust_pri;
 	} else {
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 	}
 	return pri_margin;
 }
@@ -150,9 +150,9 @@ int dfs_get_filter_threshold(struct ath_dfs *dfs, struct dfs_filter *rf,
 
 	thresh = rf->rf_threshold;
 
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	if (IS_CHAN_HT40(dfs->ic->ic_curchan)) {
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 		ext_chan_busy = dfs->ic->ic_get_ext_busy(dfs->ic);
 		if (ext_chan_busy >= 0) {
 			dfs->dfs_rinfo.ext_chan_busy_ts =
@@ -186,7 +186,7 @@ int dfs_get_filter_threshold(struct ath_dfs *dfs, struct dfs_filter *rf,
 
 		thresh += adjust_thresh;
 	} else {
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 	}
 	return thresh;
 }
@@ -202,7 +202,7 @@ uint32_t dfs_round(int32_t val)
 	if (rem < 50)
 		return ival;
 	else
-		return (ival + 1);
+		return ival + 1;
 }
 
 struct dfs_ieee80211_channel *ieee80211_get_extchan(struct ieee80211com *ic)
@@ -273,12 +273,12 @@ struct dfs_state *dfs_getchanstate(struct ath_dfs *dfs, uint8_t *index,
 
 			if (index != NULL)
 				*index = (uint8_t) i;
-			return (rs);
+			return rs;
 		}
 	}
 	DFS_DPRINTK(dfs, ATH_DEBUG_DFS2, "%s: No more radar states left.\n",
 		    __func__);
-	return (NULL);
+	return NULL;
 }
 
 #endif /* ATH_SUPPORT_DFS */

+ 4 - 4
core/sap/dfs/src/dfs_phyerr_tlv.c

@@ -388,7 +388,7 @@ radar_summary_parse(struct ath_dfs *dfs, const char *buf, size_t len,
 	 *   Set pulse duration to 20 us
 	 */
 
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	freq = ieee80211_chan2freq(dfs->ic, dfs->ic->ic_curchan);
 	freq_centre = dfs->ic->ic_curchan->ic_vhtop_ch_freq_seg1;
 
@@ -399,7 +399,7 @@ radar_summary_parse(struct ath_dfs *dfs, const char *buf, size_t len,
 		rsu->pulse_duration = 20;
 	}
 
-	cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+	qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 }
 
 static void
@@ -599,7 +599,7 @@ static int tlv_calc_freq_info(struct ath_dfs *dfs, struct rx_radar_status *rs)
 		return 0;
 	}
 
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	/*
 	 * calculate the channel center frequency for
 	 * 160MHz and 80p80 MHz including the legacy
@@ -644,7 +644,7 @@ static int tlv_calc_freq_info(struct ath_dfs *dfs, struct rx_radar_status *rs)
 		chan_centre += (chan_offset / 2);
 	}
 
-	cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+	qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 	/*
 	 * XXX half/quarter rate support!
 	 */

+ 17 - 17
core/sap/dfs/src/dfs_process_phyerr.c

@@ -88,7 +88,7 @@ dfs_get_event_freqcentre(struct ath_dfs *dfs, int is_pri, int is_ext, int is_dc)
 	 * based on whether it's an upper or lower channel.
 	 */
 	chan_width = dfs_get_event_freqwidth(dfs);
-	cdf_spin_lock_bh(&ic->chan_lock);
+	qdf_spin_lock_bh(&ic->chan_lock);
 	if (IEEE80211_IS_CHAN_11N_HT40PLUS(ic->ic_curchan))
 		chan_offset = chan_width;
 	else if (IEEE80211_IS_CHAN_11N_HT40MINUS(ic->ic_curchan))
@@ -96,7 +96,7 @@ dfs_get_event_freqcentre(struct ath_dfs *dfs, int is_pri, int is_ext, int is_dc)
 	else
 		chan_offset = 0;
 
-	cdf_spin_unlock_bh(&ic->chan_lock);
+	qdf_spin_unlock_bh(&ic->chan_lock);
 
 	/*
 	 * Check for DC events first - the sowl code may just set all
@@ -106,10 +106,10 @@ dfs_get_event_freqcentre(struct ath_dfs *dfs, int is_pri, int is_ext, int is_dc)
 		/*
 		 * XXX TODO: Should DC events be considered 40MHz wide here?
 		 */
-		cdf_spin_lock_bh(&ic->chan_lock);
+		qdf_spin_lock_bh(&ic->chan_lock);
 		freq = ieee80211_chan2freq(ic, ic->ic_curchan) +
 			(chan_offset / 2);
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		return freq;
 	}
 
@@ -118,23 +118,23 @@ dfs_get_event_freqcentre(struct ath_dfs *dfs, int is_pri, int is_ext, int is_dc)
 	 * The centre frequency for pri events is still ic_freq.
 	 */
 	if (is_pri) {
-		cdf_spin_lock_bh(&ic->chan_lock);
+		qdf_spin_lock_bh(&ic->chan_lock);
 		freq = ieee80211_chan2freq(ic, ic->ic_curchan);
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		return freq;
 	}
 
 	if (is_ext) {
-		cdf_spin_lock_bh(&ic->chan_lock);
+		qdf_spin_lock_bh(&ic->chan_lock);
 		freq = ieee80211_chan2freq(ic, ic->ic_curchan) + chan_width;
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		return freq;
 	}
 
 	/* XXX shouldn't get here */
-	cdf_spin_lock_bh(&ic->chan_lock);
+	qdf_spin_lock_bh(&ic->chan_lock);
 	freq = ieee80211_chan2freq(ic, ic->ic_curchan);
-	cdf_spin_unlock_bh(&ic->chan_lock);
+	qdf_spin_unlock_bh(&ic->chan_lock);
 	return freq;
 }
 
@@ -514,16 +514,16 @@ dfs_process_phyerr(struct ieee80211com *ic, void *buf, uint16_t datalen,
 		return;
 	}
 
-	cdf_spin_lock_bh(&ic->chan_lock);
+	qdf_spin_lock_bh(&ic->chan_lock);
 	if (IEEE80211_IS_CHAN_RADAR(chan)) {
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
 			    "%s: Radar already found in the channel, "
 			    " do not queue radar data\n", __func__);
 		return;
 	}
 
-	cdf_spin_unlock_bh(&ic->chan_lock);
+	qdf_spin_unlock_bh(&ic->chan_lock);
 	dfs->ath_dfs_stats.total_phy_errors++;
 	DFS_DPRINTK(dfs, ATH_DEBUG_DFS2,
 		    "%s[%d] phyerr %d len %d\n",
@@ -711,9 +711,9 @@ dfs_process_phyerr(struct ieee80211com *ic, void *buf, uint16_t datalen,
 	 * for the adaptive radio (AR) pattern matching rather than
 	 * radar detection.
 	 */
-	cdf_spin_lock_bh(&ic->chan_lock);
+	qdf_spin_lock_bh(&ic->chan_lock);
 	if ((chan->ic_flags & CHANNEL_108G) == CHANNEL_108G) {
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		if (!(dfs->dfs_proc_phyerr & DFS_AR_EN)) {
 			DFS_DPRINTK(dfs, ATH_DEBUG_DFS2,
 				    "%s: DFS_AR_EN not enabled\n", __func__);
@@ -763,7 +763,7 @@ dfs_process_phyerr(struct ieee80211com *ic, void *buf, uint16_t datalen,
 		ATH_ARQ_UNLOCK(dfs);
 	} else {
 		if (IEEE80211_IS_CHAN_DFS(chan)) {
-			cdf_spin_unlock_bh(&ic->chan_lock);
+			qdf_spin_unlock_bh(&ic->chan_lock);
 			if (!(dfs->dfs_proc_phyerr & DFS_RADAR_EN)) {
 				DFS_DPRINTK(dfs, ATH_DEBUG_DFS3,
 					    "%s: DFS_RADAR_EN not enabled\n",
@@ -857,7 +857,7 @@ dfs_process_phyerr(struct ieee80211com *ic, void *buf, uint16_t datalen,
 			STAILQ_INSERT_TAIL(&(dfs->dfs_radarq), event, re_list);
 			ATH_DFSQ_UNLOCK(dfs);
 		} else {
-			cdf_spin_unlock_bh(&ic->chan_lock);
+			qdf_spin_unlock_bh(&ic->chan_lock);
 		}
 	}
 

+ 9 - 9
core/sap/dfs/src/dfs_process_radarevent.c

@@ -141,9 +141,9 @@ int dfs_process_radarevent(struct ath_dfs *dfs,
 			  "%s[%d]: dfs is NULL", __func__, __LINE__);
 		return 0;
 	}
-	cdf_spin_lock_bh(&dfs->ic->chan_lock);
+	qdf_spin_lock_bh(&dfs->ic->chan_lock);
 	if (!(IEEE80211_IS_CHAN_DFS(dfs->ic->ic_curchan))) {
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 		DFS_DPRINTK(dfs, ATH_DEBUG_DFS2,
 			    "%s: radar event on non-DFS chan", __func__);
 		dfs_reset_radarq(dfs);
@@ -155,7 +155,7 @@ int dfs_process_radarevent(struct ath_dfs *dfs,
 		return 0;
 	}
 
-	cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+	qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 #ifndef ATH_DFS_RADAR_DETECTION_ONLY
 	/* TEST : Simulate radar bang, make sure we add the channel to NOL (bug 29968) */
 	if (dfs->dfs_bangradar) {
@@ -235,16 +235,16 @@ int dfs_process_radarevent(struct ath_dfs *dfs,
 		ATH_DFSEVENTQ_UNLOCK(dfs);
 
 		found = 0;
-		cdf_spin_lock_bh(&dfs->ic->chan_lock);
+		qdf_spin_lock_bh(&dfs->ic->chan_lock);
 		if (dfs->ic->disable_phy_err_processing) {
 			ATH_DFSQ_LOCK(dfs);
 			empty = STAILQ_EMPTY(&(dfs->dfs_radarq));
 			ATH_DFSQ_UNLOCK(dfs);
-			cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+			qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 			continue;
 		}
 
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 		if (re.re_chanindex < DFS_NUM_RADAR_STATES)
 			rs = &dfs->dfs_radar[re.re_chanindex];
 		else {
@@ -848,15 +848,15 @@ dfsfound:
 		DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
 			    "Primary channel freq = %u flags=0x%x",
 			    chan->ic_freq, chan->ic_flagext);
-		cdf_spin_lock_bh(&dfs->ic->chan_lock);
+		qdf_spin_lock_bh(&dfs->ic->chan_lock);
 		if ((dfs->ic->ic_curchan->ic_freq != thischan->ic_freq)) {
-			cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+			qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 			DFS_DPRINTK(dfs, ATH_DEBUG_DFS1,
 				    "Ext channel freq = %u flags=0x%x",
 				    thischan->ic_freq, thischan->ic_flagext);
 		}
 
-		cdf_spin_unlock_bh(&dfs->ic->chan_lock);
+		qdf_spin_unlock_bh(&dfs->ic->chan_lock);
 		dfs->dfs_phyerr_freq_min = 0x7fffffff;
 		dfs->dfs_phyerr_freq_max = 0;
 		dfs->dfs_phyerr_w53_counter = 0;

+ 1 - 1
core/sap/src/sap_internal.h

@@ -136,7 +136,7 @@ struct sap_avoid_channels_info {
 
 typedef struct sSapContext {
 
-	cdf_mutex_t SapGlobalLock;
+	qdf_mutex_t SapGlobalLock;
 
 	/* Include the current channel of AP */
 	uint32_t channel;

+ 4 - 4
core/sap/src/sap_module.c

@@ -185,7 +185,7 @@ QDF_STATUS wlansap_start(void *pCtx)
 	/* Now configure the auth type in the roaming profile. To open. */
 	pSapCtx->csr_roamProfile.negotiatedAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM;        /* open is the default */
 
-	if (!QDF_IS_STATUS_SUCCESS(cdf_mutex_init(&pSapCtx->SapGlobalLock))) {
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create(&pSapCtx->SapGlobalLock))) {
 		CDF_TRACE(QDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR,
 			  "wlansap_start failed init lock");
 		return QDF_STATUS_E_FAULT;
@@ -226,7 +226,7 @@ QDF_STATUS wlansap_stop(void *pCtx)
 
 	sap_free_roam_profile(&pSapCtx->csr_roamProfile);
 
-	if (!QDF_IS_STATUS_SUCCESS(cdf_mutex_destroy(&pSapCtx->SapGlobalLock))) {
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_destroy(&pSapCtx->SapGlobalLock))) {
 		CDF_TRACE(QDF_MODULE_ID_SAP, CDF_TRACE_LEVEL_ERROR,
 			  "wlansap_stop failed destroy lock");
 		return QDF_STATUS_E_FAULT;
@@ -1891,7 +1891,7 @@ QDF_STATUS sap_acquire_global_lock(ptSapContext pSapCtx)
 {
 	QDF_STATUS qdf_status = QDF_STATUS_E_FAULT;
 
-	if (QDF_IS_STATUS_SUCCESS(cdf_mutex_acquire(&pSapCtx->SapGlobalLock))) {
+	if (QDF_IS_STATUS_SUCCESS(qdf_mutex_acquire(&pSapCtx->SapGlobalLock))) {
 		qdf_status = QDF_STATUS_SUCCESS;
 	}
 
@@ -1902,7 +1902,7 @@ QDF_STATUS sap_release_global_lock(ptSapContext pSapCtx)
 {
 	QDF_STATUS qdf_status = QDF_STATUS_E_FAULT;
 
-	if (QDF_IS_STATUS_SUCCESS(cdf_mutex_release(&pSapCtx->SapGlobalLock))) {
+	if (QDF_IS_STATUS_SUCCESS(qdf_mutex_release(&pSapCtx->SapGlobalLock))) {
 		qdf_status = QDF_STATUS_SUCCESS;
 	}
 

+ 1 - 1
core/sme/inc/csr_internal.h

@@ -34,7 +34,7 @@
 #define CSRINTERNAL_H__
 
 #include "qdf_status.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 
 #include "cdf_mc_timer.h"
 #include "csr_support.h"

+ 2 - 2
core/sme/inc/csr_link_list.h

@@ -34,7 +34,7 @@
 #ifndef CSR_LINK_LIST_H__
 #define CSR_LINK_LIST_H__
 
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_mc_timer.h"
 #include "cds_api.h"
 #include "sir_types.h"
@@ -55,7 +55,7 @@ typedef enum {
 /* This is a circular double link list */
 typedef struct tagDblLinkList {
 	tListElem ListHead;
-	cdf_mutex_t Lock;
+	qdf_mutex_t Lock;
 	uint32_t Count;
 	tHddHandle hHdd;
 	tListFlag Flag;

+ 1 - 1
core/sme/inc/p2p_api.h

@@ -39,7 +39,7 @@
 
 #include "qdf_types.h"
 #include "cdf_mc_timer.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 
 typedef struct sP2pPsConfig {
 	uint8_t opp_ps;

+ 1 - 1
core/sme/inc/sme_api.h

@@ -39,7 +39,7 @@
   ------------------------------------------------------------------------*/
 #include "csr_api.h"
 #include "cds_mq.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "qdf_types.h"
 #include "sir_api.h"
 #include "cds_reg_service.h"

+ 1 - 1
core/sme/inc/sme_inside.h

@@ -38,7 +38,7 @@
   Include Files
   ------------------------------------------------------------------------*/
 #include "qdf_status.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"

+ 2 - 2
core/sme/inc/sme_internal.h

@@ -38,7 +38,7 @@
   Include Files
   ------------------------------------------------------------------------*/
 #include "qdf_status.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"
@@ -141,7 +141,7 @@ typedef void (*sme_set_thermal_level_callback)(void *context, u_int8_t level);
 
 typedef struct tagSmeStruct {
 	eSmeState state;
-	cdf_mutex_t lkSmeGlobalLock;
+	qdf_mutex_t lkSmeGlobalLock;
 	uint32_t totalSmeCmd;
 	/* following pointer contains array of pointers for tSmeCmd* */
 	void **pSmeCmdBufAddr;

+ 1 - 1
core/sme/inc/sme_power_save.h

@@ -27,7 +27,7 @@
 
 #if !defined(__SME_POWER_SAVE_H)
 #define __SME_POWER_SAVE_H
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"

+ 1 - 1
core/sme/inc/sme_qos_api.h

@@ -37,7 +37,7 @@
 /*--------------------------------------------------------------------------
   Include Files
   ------------------------------------------------------------------------*/
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"

+ 1 - 1
core/sme/inc/sme_qos_internal.h

@@ -37,7 +37,7 @@
 /*--------------------------------------------------------------------------
   Include Files
   ------------------------------------------------------------------------*/
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"

+ 1 - 1
core/sme/inc/sme_rrm_api.h

@@ -37,7 +37,7 @@
 /*--------------------------------------------------------------------------
   Include Files
   ------------------------------------------------------------------------*/
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"

+ 1 - 1
core/sme/inc/sme_rrm_internal.h

@@ -37,7 +37,7 @@
 /*--------------------------------------------------------------------------
   Include Files
   ------------------------------------------------------------------------*/
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_trace.h"
 #include "cdf_memory.h"
 #include "qdf_types.h"

+ 6 - 6
core/sme/src/common/sme_api.c

@@ -116,7 +116,7 @@ QDF_STATUS sme_acquire_global_lock(tSmeStruct *psSme)
 
 	if (psSme) {
 		if (QDF_IS_STATUS_SUCCESS
-			    (cdf_mutex_acquire(&psSme->lkSmeGlobalLock))) {
+			    (qdf_mutex_acquire(&psSme->lkSmeGlobalLock))) {
 			status = QDF_STATUS_SUCCESS;
 		}
 	}
@@ -130,7 +130,7 @@ QDF_STATUS sme_release_global_lock(tSmeStruct *psSme)
 
 	if (psSme) {
 		if (QDF_IS_STATUS_SUCCESS
-			    (cdf_mutex_release(&psSme->lkSmeGlobalLock))) {
+			    (qdf_mutex_release(&psSme->lkSmeGlobalLock))) {
 			status = QDF_STATUS_SUCCESS;
 		}
 	}
@@ -477,7 +477,7 @@ static QDF_STATUS free_sme_cmd_list(tpAniSirGlobal pMac)
 	cdf_mem_free(pMac->sme.smeCmdActiveList.cmdTimeoutTimer);
 	pMac->sme.smeCmdActiveList.cmdTimeoutTimer = NULL;
 
-	status = cdf_mutex_acquire(&pMac->sme.lkSmeGlobalLock);
+	status = qdf_mutex_acquire(&pMac->sme.lkSmeGlobalLock);
 	if (status != QDF_STATUS_SUCCESS) {
 		sms_log(pMac, LOGE,
 			FL("Failed to acquire the lock status = %d"), status);
@@ -486,7 +486,7 @@ static QDF_STATUS free_sme_cmd_list(tpAniSirGlobal pMac)
 
 	free_sme_cmds(pMac);
 
-	status = cdf_mutex_release(&pMac->sme.lkSmeGlobalLock);
+	status = qdf_mutex_release(&pMac->sme.lkSmeGlobalLock);
 	if (status != QDF_STATUS_SUCCESS) {
 		sms_log(pMac, LOGE,
 			FL("Failed to release the lock status = %d"), status);
@@ -1048,7 +1048,7 @@ QDF_STATUS sme_open(tHalHandle hHal)
 
 	pMac->sme.state = SME_STATE_STOP;
 	pMac->sme.currDeviceMode = QDF_STA_MODE;
-	if (!QDF_IS_STATUS_SUCCESS(cdf_mutex_init(
+	if (!QDF_IS_STATUS_SUCCESS(qdf_mutex_create(
 					&pMac->sme.lkSmeGlobalLock))) {
 		sms_log(pMac, LOGE, FL("sme_open failed init lock"));
 		return  QDF_STATUS_E_FAILURE;
@@ -2899,7 +2899,7 @@ QDF_STATUS sme_close(tHalHandle hHal)
 	free_sme_cmd_list(pMac);
 
 	if (!QDF_IS_STATUS_SUCCESS
-		    (cdf_mutex_destroy(&pMac->sme.lkSmeGlobalLock))) {
+		    (qdf_mutex_destroy(&pMac->sme.lkSmeGlobalLock))) {
 		fail_status = QDF_STATUS_E_FAILURE;
 	}
 

+ 5 - 5
core/sme/src/csr/csr_link_list.c

@@ -33,7 +33,7 @@
    ========================================================================== */
 
 #include "csr_link_list.h"
-#include "cdf_lock.h"
+#include "qdf_lock.h"
 #include "cdf_memory.h"
 #include "cdf_trace.h"
 #include "cdf_mc_timer.h"
@@ -146,7 +146,7 @@ void csr_ll_lock(tDblLinkList *pList)
 	}
 
 	if (LIST_FLAG_OPEN == pList->Flag) {
-		cdf_mutex_acquire(&pList->Lock);
+		qdf_mutex_acquire(&pList->Lock);
 	}
 }
 
@@ -160,7 +160,7 @@ void csr_ll_unlock(tDblLinkList *pList)
 	}
 
 	if (LIST_FLAG_OPEN == pList->Flag) {
-		cdf_mutex_release(&pList->Lock);
+		qdf_mutex_release(&pList->Lock);
 	}
 }
 
@@ -231,7 +231,7 @@ QDF_STATUS csr_ll_open(tHddHandle hHdd, tDblLinkList *pList)
 	if (LIST_FLAG_OPEN != pList->Flag) {
 		pList->Count = 0;
 		pList->cmdTimeoutTimer = NULL;
-		qdf_status = cdf_mutex_init(&pList->Lock);
+		qdf_status = qdf_mutex_create(&pList->Lock);
 
 		if (QDF_IS_STATUS_SUCCESS(qdf_status)) {
 			csr_list_init(&pList->ListHead);
@@ -255,7 +255,7 @@ void csr_ll_close(tDblLinkList *pList)
 	if (LIST_FLAG_OPEN == pList->Flag) {
 		/* Make sure the list is empty... */
 		csr_ll_purge(pList, LL_ACCESS_LOCK);
-		cdf_mutex_destroy(&pList->Lock);
+		qdf_mutex_destroy(&pList->Lock);
 		pList->Flag = LIST_FLAG_CLOSE;
 	}
 }

+ 2 - 2
core/utils/epping/inc/epping_internal.h

@@ -125,7 +125,7 @@ typedef struct epping_context {
 	struct epping_cookie *cookie_list;
 	int cookie_count;
 	struct epping_cookie *s_cookie_mem[MAX_COOKIE_SLOTS_NUM];
-	cdf_spinlock_t cookie_lock;
+	qdf_spinlock_t cookie_lock;
 } epping_context_t;
 
 typedef enum {
@@ -141,7 +141,7 @@ typedef struct epping_adapter_s {
 	struct qdf_mac_addr macAddressCurrent;
 	uint8_t sessionId;
 	/* for mboxping */
-	cdf_spinlock_t data_lock;
+	qdf_spinlock_t data_lock;
 	cdf_nbuf_queue_t nodrop_queue;
 	qdf_timer_t epping_timer;
 	epping_tx_timer_state_t epping_timer_state;

+ 2 - 2
core/utils/epping/inc/epping_main.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -38,7 +38,7 @@
 /*---------------------------------------------------------------------------
    Include files
    -------------------------------------------------------------------------*/
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 
 #define WLAN_EPPING_ENABLE_BIT          (1 << 8)
 #define WLAN_EPPING_IRQ_BIT             (1 << 9)

+ 7 - 7
core/utils/epping/src/epping_helper.c

@@ -72,7 +72,7 @@ int epping_cookie_init(epping_context_t *pEpping_ctx)
 			     sizeof(struct epping_cookie) *
 			     MAX_COOKIE_SLOT_SIZE);
 	}
-	cdf_spinlock_init(&pEpping_ctx->cookie_lock);
+	qdf_spinlock_create(&pEpping_ctx->cookie_lock);
 
 	for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) {
 		struct epping_cookie *cookie_mem = pEpping_ctx->s_cookie_mem[i];
@@ -95,10 +95,10 @@ error:
 void epping_cookie_cleanup(epping_context_t *pEpping_ctx)
 {
 	int i;
-	cdf_spin_lock_bh(&pEpping_ctx->cookie_lock);
+	qdf_spin_lock_bh(&pEpping_ctx->cookie_lock);
 	pEpping_ctx->cookie_list = NULL;
 	pEpping_ctx->cookie_count = 0;
-	cdf_spin_unlock_bh(&pEpping_ctx->cookie_lock);
+	qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock);
 	for (i = 0; i < MAX_COOKIE_SLOTS_NUM; i++) {
 		if (pEpping_ctx->s_cookie_mem[i]) {
 			cdf_mem_free(pEpping_ctx->s_cookie_mem[i]);
@@ -110,24 +110,24 @@ void epping_cookie_cleanup(epping_context_t *pEpping_ctx)
 void epping_free_cookie(epping_context_t *pEpping_ctx,
 			struct epping_cookie *cookie)
 {
-	cdf_spin_lock_bh(&pEpping_ctx->cookie_lock);
+	qdf_spin_lock_bh(&pEpping_ctx->cookie_lock);
 	cookie->next = pEpping_ctx->cookie_list;
 	pEpping_ctx->cookie_list = cookie;
 	pEpping_ctx->cookie_count++;
-	cdf_spin_unlock_bh(&pEpping_ctx->cookie_lock);
+	qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock);
 }
 
 struct epping_cookie *epping_alloc_cookie(epping_context_t *pEpping_ctx)
 {
 	struct epping_cookie *cookie;
 
-	cdf_spin_lock_bh(&pEpping_ctx->cookie_lock);
+	qdf_spin_lock_bh(&pEpping_ctx->cookie_lock);
 	cookie = pEpping_ctx->cookie_list;
 	if (cookie != NULL) {
 		pEpping_ctx->cookie_list = cookie->next;
 		pEpping_ctx->cookie_count--;
 	}
-	cdf_spin_unlock_bh(&pEpping_ctx->cookie_lock);
+	qdf_spin_unlock_bh(&pEpping_ctx->cookie_lock);
 	return cookie;
 }
 

+ 6 - 6
core/utils/epping/src/epping_tx.c

@@ -200,14 +200,14 @@ void epping_tx_timer_expire(epping_adapter_t *pAdapter)
 
 	/* if nodrop queue is not empty, continue to arm timer */
 	if (nodrop_skb) {
-		cdf_spin_lock_bh(&pAdapter->data_lock);
+		qdf_spin_lock_bh(&pAdapter->data_lock);
 		/* if nodrop queue is not empty, continue to arm timer */
 		if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
 			pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
 			qdf_timer_mod(&pAdapter->epping_timer,
 					      TX_RETRY_TIMEOUT_IN_MS);
 		}
-		cdf_spin_unlock_bh(&pAdapter->data_lock);
+		qdf_spin_unlock_bh(&pAdapter->data_lock);
 	} else {
 		pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
 	}
@@ -288,13 +288,13 @@ tx_fail:
 		EPPING_LOG(CDF_TRACE_LEVEL_FATAL,
 			   "%s: nodrop: %p queued\n", __func__, skb);
 		cdf_nbuf_queue_add(&pAdapter->nodrop_queue, skb);
-		cdf_spin_lock_bh(&pAdapter->data_lock);
+		qdf_spin_lock_bh(&pAdapter->data_lock);
 		if (pAdapter->epping_timer_state != EPPING_TX_TIMER_RUNNING) {
 			pAdapter->epping_timer_state = EPPING_TX_TIMER_RUNNING;
 			qdf_timer_mod(&pAdapter->epping_timer,
 					      TX_RETRY_TIMEOUT_IN_MS);
 		}
-		cdf_spin_unlock_bh(&pAdapter->data_lock);
+		qdf_spin_unlock_bh(&pAdapter->data_lock);
 	}
 
 	return 0;
@@ -325,7 +325,7 @@ void epping_tx_complete_multiple(void *ctx, HTC_PACKET_QUEUE *pPacketQueue)
 
 	cdf_nbuf_queue_init(&skb_queue);
 
-	cdf_spin_lock_bh(&pAdapter->data_lock);
+	qdf_spin_lock_bh(&pAdapter->data_lock);
 
 	while (!HTC_QUEUE_EMPTY(pPacketQueue)) {
 		htc_pkt = htc_packet_dequeue(pPacketQueue);
@@ -367,7 +367,7 @@ void epping_tx_complete_multiple(void *ctx, HTC_PACKET_QUEUE *pPacketQueue)
 		epping_free_cookie(pAdapter->pEpping_ctx, cookie);
 	}
 
-	cdf_spin_unlock_bh(&pAdapter->data_lock);
+	qdf_spin_unlock_bh(&pAdapter->data_lock);
 
 	/* free all skbs in our local list */
 	while (cdf_nbuf_queue_len(&skb_queue)) {

+ 2 - 2
core/utils/epping/src/epping_txrx.c

@@ -308,7 +308,7 @@ void epping_destroy_adapter(epping_adapter_t *pAdapter)
 	pEpping_ctx = pAdapter->pEpping_ctx;
 	epping_unregister_adapter(pAdapter);
 
-	cdf_spinlock_destroy(&pAdapter->data_lock);
+	qdf_spinlock_destroy(&pAdapter->data_lock);
 	qdf_timer_free(&pAdapter->epping_timer);
 	pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
 
@@ -367,7 +367,7 @@ epping_adapter_t *epping_add_adapter(epping_context_t *pEpping_ctx,
 	cdf_mem_copy(dev->dev_addr, (void *)macAddr, sizeof(tSirMacAddr));
 	cdf_mem_copy(pAdapter->macAddressCurrent.bytes,
 		     macAddr, sizeof(tSirMacAddr));
-	cdf_spinlock_init(&pAdapter->data_lock);
+	qdf_spinlock_create(&pAdapter->data_lock);
 	cdf_nbuf_queue_init(&pAdapter->nodrop_queue);
 	pAdapter->epping_timer_state = EPPING_TX_TIMER_STOPPED;
 	qdf_timer_init(epping_get_cdf_ctx(), &pAdapter->epping_timer,

+ 9 - 9
core/wma/inc/wma.h

@@ -519,7 +519,7 @@ struct beacon_info {
 	uint16_t noa_sub_ie_len;
 	uint8_t *noa_ie;
 	uint16_t p2p_ie_offset;
-	cdf_spinlock_t lock;
+	qdf_spinlock_t lock;
 };
 
 /**
@@ -1227,9 +1227,9 @@ typedef struct {
 	struct wma_txrx_node *interfaces;
 	pdev_cli_config_t pdevconfig;
 	qdf_list_t vdev_resp_queue;
-	cdf_spinlock_t vdev_respq_lock;
+	qdf_spinlock_t vdev_respq_lock;
 	qdf_list_t wma_hold_req_queue;
-	cdf_spinlock_t wma_hold_req_q_lock;
+	qdf_spinlock_t wma_hold_req_q_lock;
 	uint32_t ht_cap_info;
 #ifdef WLAN_FEATURE_11AC
 	uint32_t vht_cap_info;
@@ -1266,12 +1266,12 @@ typedef struct {
 	struct ieee80211com *dfs_ic;
 
 #ifdef FEATURE_WLAN_SCAN_PNO
-	cdf_wake_lock_t pno_wake_lock;
+	qdf_wake_lock_t pno_wake_lock;
 #endif
 #ifdef FEATURE_WLAN_EXTSCAN
-	cdf_wake_lock_t extscan_wake_lock;
+	qdf_wake_lock_t extscan_wake_lock;
 #endif
-	cdf_wake_lock_t wow_wake_lock;
+	qdf_wake_lock_t wow_wake_lock;
 	int wow_nack;
 	qdf_atomic_t is_wow_bus_suspended;
 	cdf_mc_timer_t wma_scan_comp_timer;
@@ -1327,7 +1327,7 @@ typedef struct {
 		uint16_t num_free;
 		union wmi_desc_elem_t *array;
 		union wmi_desc_elem_t *freelist;
-		cdf_spinlock_t wmi_desc_pool_lock;
+		qdf_spinlock_t wmi_desc_pool_lock;
 	} wmi_desc_pool;
 	uint8_t max_scan;
 	struct wmi_init_cmd saved_wmi_init_cmd;
@@ -1349,8 +1349,8 @@ typedef struct {
 	QDF_STATUS (*pe_roam_synch_cb)(tpAniSirGlobal mac,
 		roam_offload_synch_ind *roam_synch_data,
 		tpSirBssDescription  bss_desc_ptr);
-	cdf_wake_lock_t wmi_cmd_rsp_wake_lock;
-	cdf_runtime_lock_t wmi_cmd_rsp_runtime_lock;
+	qdf_wake_lock_t wmi_cmd_rsp_wake_lock;
+	qdf_runtime_lock_t wmi_cmd_rsp_runtime_lock;
 	uint32_t fine_time_measurement_cap;
 	struct wma_ini_config ini_config;
 } t_wma_handle, *tp_wma_handle;

+ 4 - 4
core/wma/inc/wma_dfs_interface.h

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -26,7 +26,7 @@
  */
 
 #include "ath_dfs_structs.h"
-#include <cdf_lock.h>
+#include <qdf_lock.h>
 #include "cds_ieee80211_common.h"
 
 #define IEEE80211_CHAN_MAX      255
@@ -177,7 +177,7 @@ struct ieee80211_dfs_state {
 	const struct dfs_ieee80211_channel *lastchan;
 	struct dfs_ieee80211_channel *newchan;
 	int cac_timeout_override;
-	uint8_t enable : 1, cac_timer_running : 1, ignore_dfs : 1, ignore_cac : 1;
+	uint8_t enable:1, cac_timer_running:1, ignore_dfs:1, ignore_cac:1;
 };
 
 /**
@@ -262,7 +262,7 @@ typedef struct ieee80211com {
 	uint8_t vdev_id;
 	uint8_t last_radar_found_chan;
 	int32_t dfs_pri_multiplier;
-	cdf_spinlock_t chan_lock;
+	qdf_spinlock_t chan_lock;
 	bool disable_phy_err_processing;
 	DFS_HWBD_ID dfs_hw_bd_id;
 } IEEE80211COM, *PIEEE80211COM;

+ 8 - 8
core/wma/src/wma_data.c

@@ -2275,7 +2275,7 @@ int wmi_desc_pool_init(tp_wma_handle wma_handle, uint32_t pool_size)
 	wma_handle->wmi_desc_pool.array[i].next = NULL;
 	wma_handle->wmi_desc_pool.array[i].wmi_desc.desc_id = i;
 
-	cdf_spinlock_init(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spinlock_create(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 	return 0;
 }
 
@@ -2287,7 +2287,7 @@ int wmi_desc_pool_init(tp_wma_handle wma_handle, uint32_t pool_size)
  */
 void wmi_desc_pool_deinit(tp_wma_handle wma_handle)
 {
-	cdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 	if (wma_handle->wmi_desc_pool.array) {
 		cdf_mem_free(wma_handle->wmi_desc_pool.array);
 		wma_handle->wmi_desc_pool.array = NULL;
@@ -2298,8 +2298,8 @@ void wmi_desc_pool_deinit(tp_wma_handle wma_handle)
 	wma_handle->wmi_desc_pool.freelist = NULL;
 	wma_handle->wmi_desc_pool.pool_size = 0;
 	wma_handle->wmi_desc_pool.num_free = 0;
-	cdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
-	cdf_spinlock_destroy(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spinlock_destroy(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 }
 
 /**
@@ -2312,14 +2312,14 @@ struct wmi_desc_t *wmi_desc_get(tp_wma_handle wma_handle)
 {
 	struct wmi_desc_t *wmi_desc = NULL;
 
-	cdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 	if (wma_handle->wmi_desc_pool.freelist) {
 		wma_handle->wmi_desc_pool.num_free--;
 		wmi_desc = &wma_handle->wmi_desc_pool.freelist->wmi_desc;
 		wma_handle->wmi_desc_pool.freelist =
 			wma_handle->wmi_desc_pool.freelist->next;
 	}
-	cdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 
 	return wmi_desc;
 }
@@ -2333,12 +2333,12 @@ struct wmi_desc_t *wmi_desc_get(tp_wma_handle wma_handle)
  */
 void wmi_desc_put(tp_wma_handle wma_handle, struct wmi_desc_t *wmi_desc)
 {
-	cdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spin_lock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 	((union wmi_desc_elem_t *)wmi_desc)->next =
 		wma_handle->wmi_desc_pool.freelist;
 	wma_handle->wmi_desc_pool.freelist = (union wmi_desc_elem_t *)wmi_desc;
 	wma_handle->wmi_desc_pool.num_free++;
-	cdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
+	qdf_spin_unlock_bh(&wma_handle->wmi_desc_pool.wmi_desc_pool_lock);
 }
 
 #define mgmt_tx_dl_frm_len 64

+ 41 - 41
core/wma/src/wma_dev_if.c

@@ -310,10 +310,10 @@ static struct wma_target_req *wma_find_req(tp_wma_handle wma,
 	qdf_list_node_t *node1 = NULL, *node2 = NULL;
 	QDF_STATUS status;
 
-	cdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
 	if (QDF_STATUS_SUCCESS != qdf_list_peek_front(&wma->wma_hold_req_queue,
 						      &node2)) {
-		cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 		WMA_LOGE(FL("unable to get msg node from request queue"));
 		return NULL;
 	}
@@ -329,7 +329,7 @@ static struct wma_target_req *wma_find_req(tp_wma_handle wma,
 		found = true;
 		status = qdf_list_remove_node(&wma->wma_hold_req_queue, node1);
 		if (QDF_STATUS_SUCCESS != status) {
-			cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+			qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 			WMA_LOGD(FL("Failed to remove request for vdev_id %d type %d"),
 				 vdev_id, type);
 			return NULL;
@@ -339,7 +339,7 @@ static struct wma_target_req *wma_find_req(tp_wma_handle wma,
 			qdf_list_peek_next(&wma->wma_hold_req_queue, node1,
 					   &node2));
 
-	cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 	if (!found) {
 		WMA_LOGE(FL("target request not found for vdev_id %d type %d"),
 			 vdev_id, type);
@@ -371,10 +371,10 @@ static struct wma_target_req *wma_find_remove_req_msgtype(tp_wma_handle wma,
 	qdf_list_node_t *node1 = NULL, *node2 = NULL;
 	QDF_STATUS status;
 
-	cdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
 	if (QDF_STATUS_SUCCESS != qdf_list_peek_front(&wma->wma_hold_req_queue,
 						      &node2)) {
-		cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 		WMA_LOGE(FL("unable to get msg node from request queue"));
 		return NULL;
 	}
@@ -390,7 +390,7 @@ static struct wma_target_req *wma_find_remove_req_msgtype(tp_wma_handle wma,
 		found = true;
 		status = qdf_list_remove_node(&wma->wma_hold_req_queue, node1);
 		if (QDF_STATUS_SUCCESS != status) {
-			cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+			qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 			WMA_LOGD(FL("Failed to remove request. vdev_id %d type %d"),
 				 vdev_id, msg_type);
 			return NULL;
@@ -400,7 +400,7 @@ static struct wma_target_req *wma_find_remove_req_msgtype(tp_wma_handle wma,
 			qdf_list_peek_next(&wma->wma_hold_req_queue, node1,
 					   &node2));
 
-	cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 	if (!found) {
 		WMA_LOGE(FL("target request not found for vdev_id %d type %d"),
 			 vdev_id, msg_type);
@@ -430,10 +430,10 @@ static struct wma_target_req *wma_find_vdev_req(tp_wma_handle wma,
 	qdf_list_node_t *node1 = NULL, *node2 = NULL;
 	QDF_STATUS status;
 
-	cdf_spin_lock_bh(&wma->vdev_respq_lock);
+	qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	if (QDF_STATUS_SUCCESS != qdf_list_peek_front(&wma->vdev_resp_queue,
 						      &node2)) {
-		cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 		WMA_LOGE(FL("unable to get target req from vdev resp queue"));
 		return NULL;
 	}
@@ -449,7 +449,7 @@ static struct wma_target_req *wma_find_vdev_req(tp_wma_handle wma,
 		found = true;
 		status = qdf_list_remove_node(&wma->vdev_resp_queue, node1);
 		if (QDF_STATUS_SUCCESS != status) {
-			cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+			qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 			WMA_LOGD(FL("Failed to target req for vdev_id %d type %d"),
 				 vdev_id, type);
 			return NULL;
@@ -459,7 +459,7 @@ static struct wma_target_req *wma_find_vdev_req(tp_wma_handle wma,
 			qdf_list_peek_next(&wma->vdev_resp_queue,
 					   node1, &node2));
 
-	cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+	qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 	if (!found) {
 		WMA_LOGP(FL("target request not found for vdev_id %d type %d"),
 			 vdev_id, type);
@@ -632,11 +632,11 @@ static QDF_STATUS wma_handle_vdev_detach(tp_wma_handle wma_handle,
 	/* Acquire wake lock only when you expect a response from firmware */
 	if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
 				    WMI_SERVICE_SYNC_DELETE_CMDS)) {
-		cdf_wake_lock_timeout_acquire(
+		qdf_wake_lock_timeout_acquire(
 					 &wma_handle->wmi_cmd_rsp_wake_lock,
 					 WMA_FW_RSP_EVENT_WAKE_LOCK_DURATION,
 					 WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP);
-		cdf_runtime_pm_prevent_suspend(
+		qdf_runtime_pm_prevent_suspend(
 					wma_handle->wmi_cmd_rsp_runtime_lock);
 	}
 	WMA_LOGD("Call txrx detach with callback for vdev %d", vdev_id);
@@ -811,7 +811,7 @@ static void wma_vdev_start_rsp(tp_wma_handle wma,
 			goto send_fail_resp;
 		}
 		bcn->seq_no = MIN_SW_SEQ;
-		cdf_spinlock_init(&bcn->lock);
+		qdf_spinlock_create(&bcn->lock);
 		qdf_atomic_set(&wma->interfaces[resp_event->vdev_id].bss_status,
 			       WMA_BSS_STATUS_STARTED);
 		WMA_LOGD("%s: AP mode (type %d subtype %d) BSS is started",
@@ -927,9 +927,9 @@ int wma_vdev_start_resp_handler(void *handle, uint8_t *cmd_param_info,
 	}
 
 	if (wma_is_vdev_in_ap_mode(wma, resp_event->vdev_id)) {
-		cdf_spin_lock_bh(&wma->dfs_ic->chan_lock);
+		qdf_spin_lock_bh(&wma->dfs_ic->chan_lock);
 		wma->dfs_ic->disable_phy_err_processing = false;
-		cdf_spin_unlock_bh(&wma->dfs_ic->chan_lock);
+		qdf_spin_unlock_bh(&wma->dfs_ic->chan_lock);
 	}
 
 	if (resp_event->status == QDF_STATUS_SUCCESS) {
@@ -1403,17 +1403,17 @@ static void wma_delete_all_ibss_peers(tp_wma_handle wma, A_UINT32 vdev_id)
 		return;
 
 	/* remove all remote peers of IBSS */
-	cdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
 
 	temp = NULL;
 	TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t, peer_list_elem) {
 		if (temp) {
-			cdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
+			qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
 			if (qdf_atomic_read(&temp->delete_in_progress) == 0) {
 				wma_remove_peer(wma, temp->mac_addr.raw,
 					vdev_id, temp, false);
 			}
-			cdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
+			qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
 		}
 		/* self peer is deleted last */
 		if (peer == TAILQ_FIRST(&vdev->peer_list)) {
@@ -1422,7 +1422,7 @@ static void wma_delete_all_ibss_peers(tp_wma_handle wma, A_UINT32 vdev_id)
 		} else
 			temp = peer;
 	}
-	cdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
 
 	/* remove IBSS bss peer last */
 	peer = TAILQ_FIRST(&vdev->peer_list);
@@ -1473,18 +1473,18 @@ static void wma_delete_all_ap_remote_peers(tp_wma_handle wma, A_UINT32 vdev_id)
 
 	WMA_LOGE("%s: vdev_id - %d", __func__, vdev_id);
 	/* remove all remote peers of SAP */
-	cdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
 
 	temp = NULL;
 	TAILQ_FOREACH_REVERSE(peer, &vdev->peer_list, peer_list_t,
 			      peer_list_elem) {
 		if (temp) {
-			cdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
+			qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
 			if (qdf_atomic_read(&temp->delete_in_progress) == 0) {
 				wma_remove_peer(wma, temp->mac_addr.raw,
 						vdev_id, temp, false);
 			}
-			cdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
+			qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
 		}
 		/* self peer is deleted by caller */
 		if (peer == TAILQ_FIRST(&vdev->peer_list)) {
@@ -1494,7 +1494,7 @@ static void wma_delete_all_ap_remote_peers(tp_wma_handle wma, A_UINT32 vdev_id)
 			temp = peer;
 	}
 
-	cdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
 }
 
 #ifdef QCA_IBSS_SUPPORT
@@ -2215,7 +2215,7 @@ QDF_STATUS wma_vdev_start(tp_wma_handle wma,
 				return QDF_STATUS_E_FAILURE;
 			}
 
-			cdf_spin_lock_bh(&wma->dfs_ic->chan_lock);
+			qdf_spin_lock_bh(&wma->dfs_ic->chan_lock);
 			if (isRestart)
 				wma->dfs_ic->disable_phy_err_processing = true;
 
@@ -2223,7 +2223,7 @@ QDF_STATUS wma_vdev_start(tp_wma_handle wma,
 			wma->dfs_ic->ic_curchan =
 				wma_dfs_configure_channel(wma->dfs_ic, chan,
 							  chanmode, req);
-			cdf_spin_unlock_bh(&wma->dfs_ic->chan_lock);
+			qdf_spin_unlock_bh(&wma->dfs_ic->chan_lock);
 
 			wma_unified_dfs_phyerr_filter_offload_enable(wma);
 			dfs->disable_dfs_ch_switch =
@@ -2460,9 +2460,9 @@ int wma_vdev_delete_handler(void *handle, uint8_t *cmd_param_info,
 				event->vdev_id);
 		return -EINVAL;
 	}
-	cdf_wake_lock_release(&wma->wmi_cmd_rsp_wake_lock,
+	qdf_wake_lock_release(&wma->wmi_cmd_rsp_wake_lock,
 				WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP);
-	cdf_runtime_pm_allow_suspend(wma->wmi_cmd_rsp_runtime_lock);
+	qdf_runtime_pm_allow_suspend(wma->wmi_cmd_rsp_runtime_lock);
 	/* Send response to upper layers */
 	wma_vdev_detach_callback(req_msg->user_data);
 	cdf_mc_timer_stop(&req_msg->event_timeout);
@@ -2513,9 +2513,9 @@ int wma_peer_delete_handler(void *handle, uint8_t *cmd_param_info,
 		return -EINVAL;
 	}
 
-	cdf_wake_lock_release(&wma->wmi_cmd_rsp_wake_lock,
+	qdf_wake_lock_release(&wma->wmi_cmd_rsp_wake_lock,
 				WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP);
-	cdf_runtime_pm_allow_suspend(wma->wmi_cmd_rsp_runtime_lock);
+	qdf_runtime_pm_allow_suspend(wma->wmi_cmd_rsp_runtime_lock);
 		/* Cleanup timeout handler */
 	cdf_mc_timer_stop(&req_msg->event_timeout);
 	cdf_mc_timer_destroy(&req_msg->event_timeout);
@@ -2633,15 +2633,15 @@ struct wma_target_req *wma_fill_hold_req(tp_wma_handle wma,
 	cdf_mc_timer_init(&req->event_timeout, QDF_TIMER_TYPE_SW,
 			  wma_hold_req_timer, req);
 	cdf_mc_timer_start(&req->event_timeout, timeout);
-	cdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
 	status = qdf_list_insert_back(&wma->wma_hold_req_queue, &req->node);
 	if (QDF_STATUS_SUCCESS != status) {
-		cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 		WMA_LOGE(FL("Failed add request in queue"));
 		cdf_mem_free(req);
 		return NULL;
 	}
-	cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 	return req;
 }
 
@@ -2815,9 +2815,9 @@ void wma_vdev_resp_timer(void *data)
 
 		if (WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
 			 WMI_SERVICE_SYNC_DELETE_CMDS)) {
-			cdf_wake_lock_release(&wma->wmi_cmd_rsp_wake_lock,
+			qdf_wake_lock_release(&wma->wmi_cmd_rsp_wake_lock,
 				WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP);
-			cdf_runtime_pm_allow_suspend(
+			qdf_runtime_pm_allow_suspend(
 				wma->wmi_cmd_rsp_runtime_lock);
 		}
 		params->status = QDF_STATUS_E_TIMEOUT;
@@ -2941,17 +2941,17 @@ struct wma_target_req *wma_fill_vdev_req(tp_wma_handle wma,
 	cdf_mc_timer_init(&req->event_timeout, QDF_TIMER_TYPE_SW,
 			  wma_vdev_resp_timer, req);
 	cdf_mc_timer_start(&req->event_timeout, timeout);
-	cdf_spin_lock_bh(&wma->vdev_respq_lock);
+	qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	status = qdf_list_insert_back(&wma->vdev_resp_queue, &req->node);
 	if (QDF_STATUS_SUCCESS != status) {
-		cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 		WMA_LOGE(FL("Failed add request in queue for vdev_id %d type %d"),
 			 vdev_id, type);
 		cdf_mem_free(req);
 		return NULL;
 	}
 
-	cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+	qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 	return req;
 }
 
@@ -4385,10 +4385,10 @@ static void wma_delete_sta_req_ap_mode(tp_wma_handle wma,
 		 * Acquire wake lock and bus lock till
 		 * firmware sends the response
 		 */
-		cdf_wake_lock_timeout_acquire(&wma->wmi_cmd_rsp_wake_lock,
+		qdf_wake_lock_timeout_acquire(&wma->wmi_cmd_rsp_wake_lock,
 				      WMA_FW_RSP_EVENT_WAKE_LOCK_DURATION,
 				      WIFI_POWER_EVENT_WAKELOCK_WMI_CMD_RSP);
-		cdf_runtime_pm_prevent_suspend(wma->wmi_cmd_rsp_runtime_lock);
+		qdf_runtime_pm_prevent_suspend(wma->wmi_cmd_rsp_runtime_lock);
 		return;
 	}
 

+ 12 - 12
core/wma/src/wma_features.c

@@ -2034,12 +2034,12 @@ static int wma_unified_dfs_radar_rx_event_handler(void *handle,
 
 	radar_event = param_tlvs->fixed_param;
 
-	cdf_spin_lock_bh(&ic->chan_lock);
+	qdf_spin_lock_bh(&ic->chan_lock);
 	chan = ic->ic_curchan;
 	if (ic->disable_phy_err_processing) {
 		WMA_LOGD("%s: radar indication done,drop phyerror event",
 			__func__);
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		return 0;
 	}
 
@@ -2047,11 +2047,11 @@ static int wma_unified_dfs_radar_rx_event_handler(void *handle,
 		WMA_LOGE
 			("%s: Invalid DFS Phyerror event. Channel=%d is Non-DFS",
 			__func__, chan->ic_ieee);
-		cdf_spin_unlock_bh(&ic->chan_lock);
+		qdf_spin_unlock_bh(&ic->chan_lock);
 		return 0;
 	}
 
-	cdf_spin_unlock_bh(&ic->chan_lock);
+	qdf_spin_unlock_bh(&ic->chan_lock);
 	dfs->ath_dfs_stats.total_phy_errors++;
 
 	if (dfs->dfs_caps.ath_chip_is_bb_tlv) {
@@ -2890,7 +2890,7 @@ int wma_wow_wakeup_host_event(void *handle, uint8_t *event,
 		if (node) {
 			WMA_LOGD("NLO match happened");
 			node->nlo_match_evt_received = true;
-			cdf_wake_lock_timeout_acquire(&wma->pno_wake_lock,
+			qdf_wake_lock_timeout_acquire(&wma->pno_wake_lock,
 					WMA_PNO_MATCH_WAKE_LOCK_TIMEOUT,
 					WIFI_POWER_EVENT_WAKELOCK_PNO);
 		}
@@ -3065,7 +3065,7 @@ int wma_wow_wakeup_host_event(void *handle, uint8_t *event,
 	}
 
 	if (wake_lock_duration) {
-		cdf_wake_lock_timeout_acquire(&wma->wow_wake_lock,
+		qdf_wake_lock_timeout_acquire(&wma->wow_wake_lock,
 					      wake_lock_duration,
 					      WIFI_POWER_EVENT_WAKELOCK_WOW);
 		WMA_LOGA("Holding %d msec wake_lock", wake_lock_duration);
@@ -6634,7 +6634,7 @@ void wma_target_suspend_acknowledge(void *context)
 	wma->wow_nack = wow_nack;
 	qdf_event_set(&wma->target_suspend);
 	if (wow_nack)
-		cdf_wake_lock_timeout_acquire(&wma->wow_wake_lock,
+		qdf_wake_lock_timeout_acquire(&wma->wow_wake_lock,
 					      WMA_WAKE_LOCK_TIMEOUT,
 					      WIFI_POWER_EVENT_WAKELOCK_WOW);
 }
@@ -7301,7 +7301,7 @@ struct ieee80211com *wma_dfs_attach(struct ieee80211com *dfs_ic)
 	 * and shared DFS code
 	 */
 	dfs_ic->ic_dfs_notify_radar = ieee80211_mark_dfs;
-	cdf_spinlock_init(&dfs_ic->chan_lock);
+	qdf_spinlock_create(&dfs_ic->chan_lock);
 	/* Initializes DFS Data Structures and queues */
 	dfs_attach(dfs_ic);
 
@@ -7318,7 +7318,7 @@ void wma_dfs_detach(struct ieee80211com *dfs_ic)
 {
 	dfs_detach(dfs_ic);
 
-	cdf_spinlock_destroy(&dfs_ic->chan_lock);
+	qdf_spinlock_destroy(&dfs_ic->chan_lock);
 	if (NULL != dfs_ic->ic_curchan) {
 		OS_FREE(dfs_ic->ic_curchan);
 		dfs_ic->ic_curchan = NULL;
@@ -7636,7 +7636,7 @@ int wma_dfs_indicate_radar(struct ieee80211com *ic,
 	 * But, when DFS test mode is enabled, allow multiple dfs
 	 * radar events to be posted on the same channel.
 	 */
-	cdf_spin_lock_bh(&ic->chan_lock);
+	qdf_spin_lock_bh(&ic->chan_lock);
 	if (!pmac->sap.SapDfsInfo.disable_dfs_ch_switch)
 		wma->dfs_ic->disable_phy_err_processing = true;
 
@@ -7652,7 +7652,7 @@ int wma_dfs_indicate_radar(struct ieee80211com *ic,
 			WMA_LOGE("%s:Application triggered channel switch in progress!.. drop radar event indiaction to SAP",
 				__func__);
 			cdf_mem_free(radar_event);
-			cdf_spin_unlock_bh(&ic->chan_lock);
+			qdf_spin_unlock_bh(&ic->chan_lock);
 			return 0;
 		}
 
@@ -7670,7 +7670,7 @@ int wma_dfs_indicate_radar(struct ieee80211com *ic,
 		wma_send_msg(wma, WMA_DFS_RADAR_IND, (void *)radar_event, 0);
 		WMA_LOGE("%s:DFS- WMA_DFS_RADAR_IND Message Posted", __func__);
 	}
-	cdf_spin_unlock_bh(&ic->chan_lock);
+	qdf_spin_unlock_bh(&ic->chan_lock);
 
 	return 0;
 }

+ 27 - 27
core/wma/src/wma_main.c

@@ -1624,13 +1624,13 @@ QDF_STATUS wma_open(void *cds_context,
 
 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
 #ifdef FEATURE_WLAN_SCAN_PNO
-		cdf_wake_lock_init(&wma_handle->pno_wake_lock, "wlan_pno_wl");
+		qdf_wake_lock_create(&wma_handle->pno_wake_lock, "wlan_pno_wl");
 #endif /* FEATURE_WLAN_SCAN_PNO */
 #ifdef FEATURE_WLAN_EXTSCAN
-		cdf_wake_lock_init(&wma_handle->extscan_wake_lock,
+		qdf_wake_lock_create(&wma_handle->extscan_wake_lock,
 					"wlan_extscan_wl");
 #endif /* FEATURE_WLAN_EXTSCAN */
-		cdf_wake_lock_init(&wma_handle->wow_wake_lock, "wlan_wow_wl");
+		qdf_wake_lock_create(&wma_handle->wow_wake_lock, "wlan_wow_wl");
 	}
 
 	/* attach the wmi */
@@ -1836,10 +1836,10 @@ QDF_STATUS wma_open(void *cds_context,
 
 	qdf_list_create(&wma_handle->vdev_resp_queue,
 		      MAX_ENTRY_VDEV_RESP_QUEUE);
-	cdf_spinlock_init(&wma_handle->vdev_respq_lock);
+	qdf_spinlock_create(&wma_handle->vdev_respq_lock);
 	qdf_list_create(&wma_handle->wma_hold_req_queue,
 		      MAX_ENTRY_HOLD_REQ_QUEUE);
-	cdf_spinlock_init(&wma_handle->wma_hold_req_q_lock);
+	qdf_spinlock_create(&wma_handle->wma_hold_req_q_lock);
 	qdf_atomic_init(&wma_handle->is_wow_bus_suspended);
 	qdf_atomic_init(&wma_handle->scan_id_counter);
 
@@ -1992,10 +1992,10 @@ QDF_STATUS wma_open(void *cds_context,
 				WMI_RSSI_BREACH_EVENTID,
 				wma_rssi_breached_event_handler);
 
-	cdf_wake_lock_init(&wma_handle->wmi_cmd_rsp_wake_lock,
+	qdf_wake_lock_create(&wma_handle->wmi_cmd_rsp_wake_lock,
 				"wlan_fw_rsp_wakelock");
 	wma_handle->wmi_cmd_rsp_runtime_lock =
-			cdf_runtime_lock_init("wlan_fw_rsp_runtime_lock");
+			qdf_runtime_lock_init("wlan_fw_rsp_runtime_lock");
 
 	/* Register peer assoc conf event handler */
 	wmi_unified_register_event_handler(wma_handle->wmi_handle,
@@ -2010,10 +2010,10 @@ QDF_STATUS wma_open(void *cds_context,
 	return QDF_STATUS_SUCCESS;
 
 err_dbglog_init:
-	cdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock);
-	cdf_runtime_lock_deinit(wma_handle->wmi_cmd_rsp_runtime_lock);
-	cdf_spinlock_destroy(&wma_handle->vdev_respq_lock);
-	cdf_spinlock_destroy(&wma_handle->wma_hold_req_q_lock);
+	qdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock);
+	qdf_runtime_lock_deinit(wma_handle->wmi_cmd_rsp_runtime_lock);
+	qdf_spinlock_destroy(&wma_handle->vdev_respq_lock);
+	qdf_spinlock_destroy(&wma_handle->wma_hold_req_q_lock);
 err_event_init:
 	wmi_unified_unregister_event_handler(wma_handle->wmi_handle,
 					     WMI_DEBUG_PRINT_EVENTID);
@@ -2031,12 +2031,12 @@ err_wma_handle:
 
 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
 #ifdef FEATURE_WLAN_SCAN_PNO
-		cdf_wake_lock_destroy(&wma_handle->pno_wake_lock);
+		qdf_wake_lock_destroy(&wma_handle->pno_wake_lock);
 #endif /* FEATURE_WLAN_SCAN_PNO */
 #ifdef FEATURE_WLAN_EXTSCAN
-		cdf_wake_lock_destroy(&wma_handle->extscan_wake_lock);
+		qdf_wake_lock_destroy(&wma_handle->extscan_wake_lock);
 #endif /* FEATURE_WLAN_EXTSCAN */
-		cdf_wake_lock_destroy(&wma_handle->wow_wake_lock);
+		qdf_wake_lock_destroy(&wma_handle->wow_wake_lock);
 	}
 	cds_free_context(cds_context, QDF_MODULE_ID_WMA, wma_handle);
 
@@ -2918,9 +2918,9 @@ static void wma_cleanup_hold_req(tp_wma_handle wma)
 	qdf_list_node_t *node1 = NULL;
 	QDF_STATUS status;
 
-	cdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_lock_bh(&wma->wma_hold_req_q_lock);
 	if (!qdf_list_size(&wma->wma_hold_req_queue)) {
-		cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+		qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 		WMA_LOGI(FL("request queue is empty"));
 		return;
 	}
@@ -2930,7 +2930,7 @@ static void wma_cleanup_hold_req(tp_wma_handle wma)
 		req_msg = cdf_container_of(node1, struct wma_target_req, node);
 		status = qdf_list_remove_node(&wma->wma_hold_req_queue, node1);
 		if (QDF_STATUS_SUCCESS != status) {
-			cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+			qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 			WMA_LOGE(FL("Failed to remove request for vdev_id %d type %d"),
 				 req_msg->vdev_id, req_msg->type);
 			return;
@@ -2938,7 +2938,7 @@ static void wma_cleanup_hold_req(tp_wma_handle wma)
 		cdf_mc_timer_destroy(&req_msg->event_timeout);
 		cdf_mem_free(req_msg);
 	}
-	cdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
+	qdf_spin_unlock_bh(&wma->wma_hold_req_q_lock);
 }
 
 /**
@@ -2953,9 +2953,9 @@ static void wma_cleanup_vdev_resp(tp_wma_handle wma)
 	qdf_list_node_t *node1 = NULL;
 	QDF_STATUS status;
 
-	cdf_spin_lock_bh(&wma->vdev_respq_lock);
+	qdf_spin_lock_bh(&wma->vdev_respq_lock);
 	if (!qdf_list_size(&wma->vdev_resp_queue)) {
-		cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+		qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 		WMA_LOGI(FL("request queue maybe empty"));
 		return;
 	}
@@ -2965,7 +2965,7 @@ static void wma_cleanup_vdev_resp(tp_wma_handle wma)
 		req_msg = cdf_container_of(node1, struct wma_target_req, node);
 		status = qdf_list_remove_node(&wma->vdev_resp_queue, node1);
 		if (QDF_STATUS_SUCCESS != status) {
-			cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+			qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 			WMA_LOGE(FL("Failed to remove request for vdev_id %d type %d"),
 				 req_msg->vdev_id, req_msg->type);
 			return;
@@ -2973,7 +2973,7 @@ static void wma_cleanup_vdev_resp(tp_wma_handle wma)
 		cdf_mc_timer_destroy(&req_msg->event_timeout);
 		cdf_mem_free(req_msg);
 	}
-	cdf_spin_unlock_bh(&wma->vdev_respq_lock);
+	qdf_spin_unlock_bh(&wma->vdev_respq_lock);
 }
 
 /**
@@ -3135,12 +3135,12 @@ QDF_STATUS wma_close(void *cds_ctx)
 
 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE) {
 #ifdef FEATURE_WLAN_SCAN_PNO
-		cdf_wake_lock_destroy(&wma_handle->pno_wake_lock);
+		qdf_wake_lock_destroy(&wma_handle->pno_wake_lock);
 #endif /* FEATURE_WLAN_SCAN_PNO */
 #ifdef FEATURE_WLAN_EXTSCAN
-		cdf_wake_lock_destroy(&wma_handle->extscan_wake_lock);
+		qdf_wake_lock_destroy(&wma_handle->extscan_wake_lock);
 #endif /* FEATURE_WLAN_EXTSCAN */
-		cdf_wake_lock_destroy(&wma_handle->wow_wake_lock);
+		qdf_wake_lock_destroy(&wma_handle->wow_wake_lock);
 	}
 
 	/* unregister Firmware debug log */
@@ -3161,8 +3161,8 @@ QDF_STATUS wma_close(void *cds_ctx)
 	cdf_event_destroy(&wma_handle->recovery_event);
 	wma_cleanup_vdev_resp(wma_handle);
 	wma_cleanup_hold_req(wma_handle);
-	cdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock);
-	cdf_runtime_lock_deinit(wma_handle->wmi_cmd_rsp_runtime_lock);
+	qdf_wake_lock_destroy(&wma_handle->wmi_cmd_rsp_wake_lock);
+	qdf_runtime_lock_deinit(wma_handle->wmi_cmd_rsp_runtime_lock);
 	for (idx = 0; idx < wma_handle->num_mem_chunks; ++idx) {
 		cdf_os_mem_free_consistent(wma_handle->qdf_dev,
 					   wma_handle->mem_chunks[idx].len,

+ 7 - 7
core/wma/src/wma_mgmt.c

@@ -109,7 +109,7 @@ static void wma_send_bcn_buf_ll(tp_wma_handle wma,
 		return;
 	}
 
-	cdf_spin_lock_bh(&bcn->lock);
+	qdf_spin_lock_bh(&bcn->lock);
 
 	bcn_payload = cdf_nbuf_data(bcn->buf);
 
@@ -207,7 +207,7 @@ static void wma_send_bcn_buf_ll(tp_wma_handle wma,
 	if (ret != QDF_STATUS_SUCCESS) {
 		cdf_nbuf_free(wmi_buf);
 		WMA_LOGE("%s: failed map beacon buf to DMA region", __func__);
-		cdf_spin_unlock_bh(&bcn->lock);
+		qdf_spin_unlock_bh(&bcn->lock);
 		return;
 	}
 
@@ -237,7 +237,7 @@ static void wma_send_bcn_buf_ll(tp_wma_handle wma,
 		WMA_LOGE("Failed to send WMI_PDEV_SEND_BCN_CMDID command");
 		wmi_buf_free(wmi_buf);
 	}
-	cdf_spin_unlock_bh(&bcn->lock);
+	qdf_spin_unlock_bh(&bcn->lock);
 }
 
 /**
@@ -2320,7 +2320,7 @@ QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id,
 	}
 	WMA_LOGD("%s: Storing received beacon template buf to local buffer",
 		 __func__);
-	cdf_spin_lock_bh(&bcn->lock);
+	qdf_spin_lock_bh(&bcn->lock);
 
 	/*
 	 * Copy received beacon template content in local buffer.
@@ -2358,7 +2358,7 @@ QDF_STATUS wma_store_bcn_tmpl(tp_wma_handle wma, uint8_t vdev_id,
 	cdf_nbuf_put_tail(bcn->buf, len);
 	bcn->len = len;
 
-	cdf_spin_unlock_bh(&bcn->lock);
+	qdf_spin_unlock_bh(&bcn->lock);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -2419,13 +2419,13 @@ int wma_tbttoffset_update_event_handler(void *handle, uint8_t *event,
 		/* Save the adjusted TSF */
 		intf[if_id].tsfadjust = adjusted_tsf[if_id];
 
-		cdf_spin_lock_bh(&bcn->lock);
+		qdf_spin_lock_bh(&bcn->lock);
 		cdf_mem_zero(&bcn_info, sizeof(bcn_info));
 		bcn_info.beacon = cdf_nbuf_data(bcn->buf);
 		bcn_info.p2pIeOffset = bcn->p2p_ie_offset;
 		bcn_info.beaconLength = bcn->len;
 		bcn_info.timIeOffset = bcn->tim_ie_offset;
-		cdf_spin_unlock_bh(&bcn->lock);
+		qdf_spin_unlock_bh(&bcn->lock);
 
 		/* Update beacon template in firmware */
 		wmi_unified_bcn_tmpl_send(wma, if_id, &bcn_info, 0);

+ 5 - 5
core/wma/src/wma_scan_roam.c

@@ -4033,7 +4033,7 @@ int wma_nlo_match_evt_handler(void *handle, uint8_t *event,
 	if (node)
 		node->nlo_match_evt_received = true;
 
-	cdf_wake_lock_timeout_acquire(&wma->pno_wake_lock,
+	qdf_wake_lock_timeout_acquire(&wma->pno_wake_lock,
 				      WMA_PNO_MATCH_WAKE_LOCK_TIMEOUT,
 				      WIFI_POWER_EVENT_WAKELOCK_PNO);
 
@@ -4078,7 +4078,7 @@ int wma_nlo_scan_cmp_evt_handler(void *handle, uint8_t *event,
 		goto skip_pno_cmp_ind;
 	}
 
-	cdf_wake_lock_release(&wma->pno_wake_lock,
+	qdf_wake_lock_release(&wma->pno_wake_lock,
 		WIFI_POWER_EVENT_WAKELOCK_PNO);
 	scan_event =
 		(tSirScanOffloadEvent *)
@@ -4087,7 +4087,7 @@ int wma_nlo_scan_cmp_evt_handler(void *handle, uint8_t *event,
 		/* Posting scan completion msg would take scan cache result
 		 * from LIM module and update in scan cache maintained in SME.*/
 		WMA_LOGE("Posting PNO Scan completion to umac");
-		cdf_wake_lock_timeout_acquire(&wma->pno_wake_lock,
+		qdf_wake_lock_timeout_acquire(&wma->pno_wake_lock,
 				WMA_PNO_SCAN_COMPLETE_WAKE_LOCK_TIMEOUT,
 				WIFI_POWER_EVENT_WAKELOCK_PNO);
 		cdf_mem_zero(scan_event, sizeof(tSirScanOffloadEvent));
@@ -4313,14 +4313,14 @@ int wma_extscan_operations_event_handler(void *handle,
 	case WMI_EXTSCAN_CYCLE_STARTED_EVENT:
 		WMA_LOGD("%s: received WMI_EXTSCAN_CYCLE_STARTED_EVENT",
 			 __func__);
-		cdf_wake_lock_timeout_acquire(&wma->extscan_wake_lock,
+		qdf_wake_lock_timeout_acquire(&wma->extscan_wake_lock,
 				      WMA_EXTSCAN_CYCLE_WAKE_LOCK_DURATION,
 				      WIFI_POWER_EVENT_WAKELOCK_EXT_SCAN);
 		goto exit_handler;
 	case WMI_EXTSCAN_CYCLE_COMPLETED_EVENT:
 		WMA_LOGD("%s: received WMI_EXTSCAN_CYCLE_COMPLETED_EVENT",
 			 __func__);
-		cdf_wake_lock_release(&wma->extscan_wake_lock,
+		qdf_wake_lock_release(&wma->extscan_wake_lock,
 				      WIFI_POWER_EVENT_WAKELOCK_EXT_SCAN);
 		goto exit_handler;
 	default:

+ 3 - 3
core/wma/src/wma_utils.c

@@ -2170,20 +2170,20 @@ void *wma_get_beacon_buffer_by_vdev_id(uint8_t vdev_id, uint32_t *buffer_size)
 		return NULL;
 	}
 
-	cdf_spin_lock_bh(&beacon->lock);
+	qdf_spin_lock_bh(&beacon->lock);
 
 	buf_size = cdf_nbuf_len(beacon->buf);
 	buf = cdf_mem_malloc(buf_size);
 
 	if (!buf) {
-		cdf_spin_unlock_bh(&beacon->lock);
+		qdf_spin_unlock_bh(&beacon->lock);
 		WMA_LOGE("%s: alloc failed for beacon buf", __func__);
 		return NULL;
 	}
 
 	cdf_mem_copy(buf, cdf_nbuf_data(beacon->buf), buf_size);
 
-	cdf_spin_unlock_bh(&beacon->lock);
+	qdf_spin_unlock_bh(&beacon->lock);
 
 	if (buffer_size)
 		*buffer_size = buf_size;

+ 20 - 20
core/wmi/wmi_unified.c

@@ -863,7 +863,7 @@ int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len,
 	wma_log_cmd_id(cmd_id);
 
 #ifdef WMI_INTERFACE_EVENT_LOGGING
-	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+	qdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
 	/*Record 16 bytes of WMI cmd data - exclude TLV and WMI headers */
 	if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
 		WMI_MGMT_COMMAND_RECORD(cmd_id,
@@ -873,7 +873,7 @@ int wmi_unified_cmd_send(wmi_unified_t wmi_handle, wmi_buf_t buf, int len,
 					    2));
 	}
 
-	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+	qdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
 #endif
 
 	status = htc_send_pkt(wmi_handle->htc_handle, pkt);
@@ -1027,13 +1027,13 @@ static void wmi_process_fw_event_worker_thread_ctx
 	id = WMI_GET_FIELD(cdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
 	data = cdf_nbuf_data(evt_buf);
 
-	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+	qdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
 	/* Exclude 4 bytes of TLV header */
 	WMI_RX_EVENT_RECORD(id, ((uint8_t *) data + 4));
-	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
-	cdf_spin_lock_bh(&wmi_handle->eventq_lock);
+	qdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
 	cdf_nbuf_queue_add(&wmi_handle->event_queue, evt_buf);
-	cdf_spin_unlock_bh(&wmi_handle->eventq_lock);
+	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
 	schedule_work(&wmi_handle->rx_event_work);
 	return;
 }
@@ -1127,14 +1127,14 @@ void __wmi_control_rx(struct wmi_unified *wmi_handle, wmi_buf_t evt_buf)
 			goto end;
 		}
 #ifdef WMI_INTERFACE_EVENT_LOGGING
-		cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+		qdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
 		/* Exclude 4 bytes of TLV header */
 		if (id == WMI_MGMT_TX_COMPLETION_EVENTID) {
 			WMI_MGMT_EVENT_RECORD(id, ((uint8_t *) data + 4));
 		} else {
 			WMI_EVENT_RECORD(id, ((uint8_t *) data + 4));
 		}
-		cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+		qdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
 #endif
 		/* Call the WMI registered event handler */
 		wmi_handle->event_handler[idx] (wmi_handle->scn_handle,
@@ -1173,14 +1173,14 @@ void wmi_rx_event_work(struct work_struct *work)
 					       rx_event_work);
 	wmi_buf_t buf;
 
-	cdf_spin_lock_bh(&wmi->eventq_lock);
+	qdf_spin_lock_bh(&wmi->eventq_lock);
 	buf = cdf_nbuf_queue_remove(&wmi->event_queue);
-	cdf_spin_unlock_bh(&wmi->eventq_lock);
+	qdf_spin_unlock_bh(&wmi->eventq_lock);
 	while (buf) {
 		__wmi_control_rx(wmi, buf);
-		cdf_spin_lock_bh(&wmi->eventq_lock);
+		qdf_spin_lock_bh(&wmi->eventq_lock);
 		buf = cdf_nbuf_queue_remove(&wmi->event_queue);
-		cdf_spin_unlock_bh(&wmi->eventq_lock);
+		qdf_spin_unlock_bh(&wmi->eventq_lock);
 	}
 }
 
@@ -1218,7 +1218,7 @@ void *wmi_unified_attach(ol_scn_t scn_handle,
 	qdf_atomic_init(&wmi_handle->pending_cmds);
 	qdf_atomic_init(&wmi_handle->is_target_suspended);
 	wmi_runtime_pm_init(wmi_handle);
-	cdf_spinlock_init(&wmi_handle->eventq_lock);
+	qdf_spinlock_create(&wmi_handle->eventq_lock);
 	cdf_nbuf_queue_init(&wmi_handle->event_queue);
 #ifdef CONFIG_CNSS
 	cnss_init_work(&wmi_handle->rx_event_work, wmi_rx_event_work);
@@ -1226,7 +1226,7 @@ void *wmi_unified_attach(ol_scn_t scn_handle,
 	INIT_WORK(&wmi_handle->rx_event_work, wmi_rx_event_work);
 #endif
 #ifdef WMI_INTERFACE_EVENT_LOGGING
-	cdf_spinlock_init(&wmi_handle->wmi_record_lock);
+	qdf_spinlock_create(&wmi_handle->wmi_record_lock);
 #endif
 	wmi_handle->wma_process_fw_event_handler_cbk = func;
 	return wmi_handle;
@@ -1237,13 +1237,13 @@ void wmi_unified_detach(struct wmi_unified *wmi_handle)
 	wmi_buf_t buf;
 
 	cds_flush_work(&wmi_handle->rx_event_work);
-	cdf_spin_lock_bh(&wmi_handle->eventq_lock);
+	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
 	buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
 	while (buf) {
 		cdf_nbuf_free(buf);
 		buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
 	}
-	cdf_spin_unlock_bh(&wmi_handle->eventq_lock);
+	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
 	if (wmi_handle != NULL) {
 		OS_FREE(wmi_handle);
 		wmi_handle = NULL;
@@ -1269,13 +1269,13 @@ wmi_unified_remove_work(struct wmi_unified *wmi_handle)
 	CDF_TRACE(QDF_MODULE_ID_WMI, CDF_TRACE_LEVEL_INFO,
 		"Enter: %s", __func__);
 	cds_flush_work(&wmi_handle->rx_event_work);
-	cdf_spin_lock_bh(&wmi_handle->eventq_lock);
+	qdf_spin_lock_bh(&wmi_handle->eventq_lock);
 	buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
 	while (buf) {
 		cdf_nbuf_free(buf);
 		buf = cdf_nbuf_queue_remove(&wmi_handle->event_queue);
 	}
-	cdf_spin_unlock_bh(&wmi_handle->eventq_lock);
+	qdf_spin_unlock_bh(&wmi_handle->eventq_lock);
 	CDF_TRACE(QDF_MODULE_ID_WMA, CDF_TRACE_LEVEL_INFO,
 		"Done: %s", __func__);
 }
@@ -1298,7 +1298,7 @@ void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
 		 get_wmi_cmd_string(cmd_id), cmd_id);
 #endif
 
-	cdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
+	qdf_spin_lock_bh(&wmi_handle->wmi_record_lock);
 	/* Record 16 bytes of WMI cmd tx complete data
 	   - exclude TLV and WMI headers */
 	if (cmd_id == WMI_MGMT_TX_SEND_CMDID) {
@@ -1309,7 +1309,7 @@ void wmi_htc_tx_complete(void *ctx, HTC_PACKET *htc_pkt)
 				((uint32_t *) cdf_nbuf_data(wmi_cmd_buf) + 2));
 	}
 
-	cdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
+	qdf_spin_unlock_bh(&wmi_handle->wmi_record_lock);
 #endif
 	cdf_nbuf_free(wmi_cmd_buf);
 	cdf_mem_free(htc_pkt);

+ 2 - 2
core/wmi/wmi_unified_priv.h

@@ -76,7 +76,7 @@ struct wmi_unified {
 	wmi_unified_event_handler event_handler[WMI_UNIFIED_MAX_EVENT];
 	uint32_t max_event_idx;
 	void *htc_handle;
-	cdf_spinlock_t eventq_lock;
+	qdf_spinlock_t eventq_lock;
 	cdf_nbuf_queue_t event_queue;
 	struct work_struct rx_event_work;
 #ifdef WLAN_OPEN_SOURCE
@@ -85,7 +85,7 @@ struct wmi_unified {
 #endif /* WLAN_OPEN_SOURCE */
 
 #ifdef WMI_INTERFACE_EVENT_LOGGING
-	cdf_spinlock_t wmi_record_lock;
+	qdf_spinlock_t wmi_record_lock;
 #endif /*WMI_INTERFACE_EVENT_LOGGING */
 
 	qdf_atomic_t is_target_suspended;