qdf_mem.h 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. /*
  2. * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /**
  19. * DOC: qdf_mem
  20. * QCA driver framework (QDF) memory management APIs
  21. */
  22. #if !defined(__QDF_MEMORY_H)
  23. #define __QDF_MEMORY_H
  24. /* Include Files */
  25. #include <qdf_types.h>
  26. #include <i_qdf_mem.h>
  27. #include <i_qdf_trace.h>
  28. #include <qdf_atomic.h>
  29. #define QDF_CACHE_LINE_SZ __qdf_cache_line_sz
  30. /**
  31. * qdf_align() - align to the given size.
  32. * @a: input that needs to be aligned.
  33. * @align_size: boundary on which 'a' has to be alinged.
  34. *
  35. * Return: aligned value.
  36. */
  37. #define qdf_align(a, align_size) __qdf_align(a, align_size)
  38. #define qdf_page_size __page_size
  39. /**
  40. * struct qdf_mem_dma_page_t - Allocated dmaable page
  41. * @page_v_addr_start: Page start virtual address
  42. * @page_v_addr_end: Page end virtual address
  43. * @page_p_addr: Page start physical address
  44. */
  45. struct qdf_mem_dma_page_t {
  46. char *page_v_addr_start;
  47. char *page_v_addr_end;
  48. qdf_dma_addr_t page_p_addr;
  49. };
  50. /**
  51. * struct qdf_mem_multi_page_t - multiple page allocation information storage
  52. * @num_element_per_page: Number of element in single page
  53. * @num_pages: Number of allocation needed pages
  54. * @dma_pages: page information storage in case of coherent memory
  55. * @cacheable_pages: page information storage in case of cacheable memory
  56. * @is_mem_prealloc: flag for multiple pages pre-alloc or not
  57. */
  58. struct qdf_mem_multi_page_t {
  59. uint16_t num_element_per_page;
  60. uint16_t num_pages;
  61. struct qdf_mem_dma_page_t *dma_pages;
  62. void **cacheable_pages;
  63. qdf_size_t page_size;
  64. #ifdef DP_MEM_PRE_ALLOC
  65. uint8_t is_mem_prealloc;
  66. #endif
  67. };
  68. /* Preprocessor definitions and constants */
  69. typedef __qdf_mempool_t qdf_mempool_t;
  70. /**
  71. * qdf_mem_init() - Initialize QDF memory module
  72. *
  73. * Return: None
  74. *
  75. */
  76. void qdf_mem_init(void);
  77. /**
  78. * qdf_mem_exit() - Exit QDF memory module
  79. *
  80. * Return: None
  81. *
  82. */
  83. void qdf_mem_exit(void);
  84. #define QDF_MEM_FUNC_NAME_SIZE 48
  85. #ifdef MEMORY_DEBUG
  86. /**
  87. * qdf_mem_debug_config_get() - Get the user configuration of mem_debug_disabled
  88. *
  89. * Return: value of mem_debug_disabled qdf module argument
  90. */
  91. bool qdf_mem_debug_config_get(void);
  92. /**
  93. * qdf_mem_malloc_debug() - debug version of QDF memory allocation API
  94. * @size: Number of bytes of memory to allocate.
  95. * @func: Function name of the call site
  96. * @line: Line number of the call site
  97. * @caller: Address of the caller function
  98. * @flag: GFP flag
  99. *
  100. * This function will dynamicallly allocate the specified number of bytes of
  101. * memory and add it to the qdf tracking list to check for memory leaks and
  102. * corruptions
  103. *
  104. * Return: A valid memory location on success, or NULL on failure
  105. */
  106. void *qdf_mem_malloc_debug(size_t size, const char *func, uint32_t line,
  107. void *caller, uint32_t flag);
  108. #define qdf_mem_malloc(size) \
  109. qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, 0)
  110. #define qdf_mem_malloc_fl(size, func, line) \
  111. qdf_mem_malloc_debug(size, func, line, QDF_RET_IP, 0)
  112. #define qdf_mem_malloc_atomic(size) \
  113. qdf_mem_malloc_debug(size, __func__, __LINE__, QDF_RET_IP, GFP_ATOMIC)
  114. /**
  115. * qdf_mem_free_debug() - debug version of qdf_mem_free
  116. * @ptr: Pointer to the starting address of the memory to be freed.
  117. *
  118. * This function will free the memory pointed to by 'ptr'. It also checks for
  119. * memory corruption, underrun, overrun, double free, domain mismatch, etc.
  120. *
  121. * Return: none
  122. */
  123. void qdf_mem_free_debug(void *ptr, const char *file, uint32_t line);
  124. #define qdf_mem_free(ptr) \
  125. qdf_mem_free_debug(ptr, __func__, __LINE__)
  126. void qdf_mem_multi_pages_alloc_debug(qdf_device_t osdev,
  127. struct qdf_mem_multi_page_t *pages,
  128. size_t element_size, uint16_t element_num,
  129. qdf_dma_context_t memctxt, bool cacheable,
  130. const char *func, uint32_t line,
  131. void *caller);
  132. #define qdf_mem_multi_pages_alloc(osdev, pages, element_size, element_num,\
  133. memctxt, cacheable) \
  134. qdf_mem_multi_pages_alloc_debug(osdev, pages, element_size, \
  135. element_num, memctxt, cacheable, \
  136. __func__, __LINE__, QDF_RET_IP)
  137. void qdf_mem_multi_pages_free_debug(qdf_device_t osdev,
  138. struct qdf_mem_multi_page_t *pages,
  139. qdf_dma_context_t memctxt, bool cacheable,
  140. const char *func, uint32_t line);
  141. #define qdf_mem_multi_pages_free(osdev, pages, memctxt, cacheable) \
  142. qdf_mem_multi_pages_free_debug(osdev, pages, memctxt, cacheable, \
  143. __func__, __LINE__)
  144. /**
  145. * qdf_mem_check_for_leaks() - Assert that the current memory domain is empty
  146. *
  147. * Call this to ensure there are no active memory allocations being tracked
  148. * against the current debug domain. For example, one should call this function
  149. * immediately before a call to qdf_debug_domain_set() as a memory leak
  150. * detection mechanism.
  151. *
  152. * e.g.
  153. * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_ACTIVE);
  154. *
  155. * ...
  156. *
  157. * // memory is allocated and freed
  158. *
  159. * ...
  160. *
  161. * // before transitioning back to inactive state,
  162. * // make sure all active memory has been freed
  163. * qdf_mem_check_for_leaks();
  164. * qdf_debug_domain_set(QDF_DEBUG_DOMAIN_INIT);
  165. *
  166. * ...
  167. *
  168. * // also, before program exit, make sure init time memory is freed
  169. * qdf_mem_check_for_leaks();
  170. * exit();
  171. *
  172. * Return: None
  173. */
  174. void qdf_mem_check_for_leaks(void);
  175. /**
  176. * qdf_mem_alloc_consistent_debug() - allocates consistent qdf memory
  177. * @osdev: OS device handle
  178. * @dev: Pointer to device handle
  179. * @size: Size to be allocated
  180. * @paddr: Physical address
  181. * @func: Function name of the call site
  182. * @line: line numbe rof the call site
  183. * @caller: Address of the caller function
  184. *
  185. * Return: pointer of allocated memory or null if memory alloc fails
  186. */
  187. void *qdf_mem_alloc_consistent_debug(qdf_device_t osdev, void *dev,
  188. qdf_size_t size, qdf_dma_addr_t *paddr,
  189. const char *func, uint32_t line,
  190. void *caller);
  191. #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
  192. qdf_mem_alloc_consistent_debug(osdev, dev, size, paddr, \
  193. __func__, __LINE__, QDF_RET_IP)
  194. /**
  195. * qdf_mem_free_consistent_debug() - free consistent qdf memory
  196. * @osdev: OS device handle
  197. * @size: Size to be allocated
  198. * @vaddr: virtual address
  199. * @paddr: Physical address
  200. * @memctx: Pointer to DMA context
  201. * @func: Function name of the call site
  202. * @line: line numbe rof the call site
  203. *
  204. * Return: none
  205. */
  206. void qdf_mem_free_consistent_debug(qdf_device_t osdev, void *dev,
  207. qdf_size_t size, void *vaddr,
  208. qdf_dma_addr_t paddr,
  209. qdf_dma_context_t memctx,
  210. const char *func, uint32_t line);
  211. #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
  212. qdf_mem_free_consistent_debug(osdev, dev, size, vaddr, paddr, memctx, \
  213. __func__, __LINE__)
  214. #else
  215. static inline bool qdf_mem_debug_config_get(void)
  216. {
  217. return false;
  218. }
  219. /**
  220. * qdf_mem_malloc() - allocation QDF memory
  221. * @size: Number of bytes of memory to allocate.
  222. *
  223. * This function will dynamicallly allocate the specified number of bytes of
  224. * memory.
  225. *
  226. * Return:
  227. * Upon successful allocate, returns a non-NULL pointer to the allocated
  228. * memory. If this function is unable to allocate the amount of memory
  229. * specified (for any reason) it returns NULL.
  230. */
  231. #define qdf_mem_malloc(size) \
  232. __qdf_mem_malloc(size, __func__, __LINE__)
  233. #define qdf_mem_malloc_fl(size, func, line) \
  234. __qdf_mem_malloc(size, func, line)
  235. /**
  236. * qdf_mem_malloc_atomic() - allocation QDF memory atomically
  237. * @size: Number of bytes of memory to allocate.
  238. *
  239. * This function will dynamicallly allocate the specified number of bytes of
  240. * memory.
  241. *
  242. * Return:
  243. * Upon successful allocate, returns a non-NULL pointer to the allocated
  244. * memory. If this function is unable to allocate the amount of memory
  245. * specified (for any reason) it returns NULL.
  246. */
  247. #define qdf_mem_malloc_atomic(size) \
  248. qdf_mem_malloc_atomic_fl(size, __func__, __LINE__)
  249. void *qdf_mem_malloc_atomic_fl(qdf_size_t size,
  250. const char *func,
  251. uint32_t line);
  252. #define qdf_mem_free(ptr) \
  253. __qdf_mem_free(ptr)
  254. static inline void qdf_mem_check_for_leaks(void) { }
  255. #define qdf_mem_alloc_consistent(osdev, dev, size, paddr) \
  256. __qdf_mem_alloc_consistent(osdev, dev, size, paddr, __func__, __LINE__)
  257. #define qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx) \
  258. __qdf_mem_free_consistent(osdev, dev, size, vaddr, paddr, memctx)
  259. void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
  260. struct qdf_mem_multi_page_t *pages,
  261. size_t element_size, uint16_t element_num,
  262. qdf_dma_context_t memctxt, bool cacheable);
  263. void qdf_mem_multi_pages_free(qdf_device_t osdev,
  264. struct qdf_mem_multi_page_t *pages,
  265. qdf_dma_context_t memctxt, bool cacheable);
  266. #endif /* MEMORY_DEBUG */
  267. /**
  268. * qdf_mem_multi_pages_zero() - zero out each page memory
  269. * @pages: Multi page information storage
  270. * @cacheable: Coherent memory or cacheable memory
  271. *
  272. * This function will zero out each page memory
  273. *
  274. * Return: None
  275. */
  276. void qdf_mem_multi_pages_zero(struct qdf_mem_multi_page_t *pages,
  277. bool cacheable);
  278. /**
  279. * qdf_aligned_malloc() - allocates aligned QDF memory.
  280. * @size: Size to be allocated
  281. * @vaddr_unaligned: Unaligned virtual address.
  282. * @paddr_unaligned: Unaligned physical address.
  283. * @paddr_aligned: Aligned physical address.
  284. * @align: Base address alignment.
  285. * @func: Function name of the call site.
  286. * @line: Line number of the call site.
  287. *
  288. * This function will dynamically allocate the specified number of bytes of
  289. * memory. Checks if the allocated base address is aligned with base_align.
  290. * If not, it frees the allocated memory, adds base_align to alloc size and
  291. * re-allocates the memory.
  292. *
  293. * Return:
  294. * Upon successful allocate, returns an aligned base address of the allocated
  295. * memory. If this function is unable to allocate the amount of memory
  296. * specified (for any reason) it returns NULL.
  297. */
  298. #define qdf_aligned_malloc(size, vaddr_unaligned, paddr_unaligned, \
  299. paddr_aligned, align) \
  300. qdf_aligned_malloc_fl(size, vaddr_unaligned, paddr_unaligned, \
  301. paddr_aligned, align, __func__, __LINE__)
  302. void *qdf_aligned_malloc_fl(uint32_t *size, void **vaddr_unaligned,
  303. qdf_dma_addr_t *paddr_unaligned,
  304. qdf_dma_addr_t *paddr_aligned,
  305. uint32_t align,
  306. const char *func, uint32_t line);
  307. /**
  308. * qdf_aligned_mem_alloc_consistent() - allocates consistent qdf memory
  309. * @osdev: OS device handle
  310. * @size: Size to be allocated
  311. * @vaddr_unaligned: Unaligned virtual address.
  312. * @paddr_unaligned: Unaligned physical address.
  313. * @paddr_aligned: Aligned physical address.
  314. * @align: Base address alignment.
  315. * @func: Function name of the call site.
  316. * @line: Line number of the call site.
  317. *
  318. * Return: pointer of allocated memory or null if memory alloc fails.
  319. */
  320. #define qdf_aligned_mem_alloc_consistent(osdev, size, vaddr_unaligned, \
  321. paddr_unaligned, paddr_aligned, \
  322. align) \
  323. qdf_aligned_mem_alloc_consistent_fl(osdev, size, vaddr_unaligned, \
  324. paddr_unaligned, paddr_aligned, \
  325. align, __func__, __LINE__)
  326. void *qdf_aligned_mem_alloc_consistent_fl(qdf_device_t osdev, uint32_t *size,
  327. void **vaddr_unaligned,
  328. qdf_dma_addr_t *paddr_unaligned,
  329. qdf_dma_addr_t *paddr_aligned,
  330. uint32_t align, const char *func,
  331. uint32_t line);
  332. #define qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
  333. void qdf_mem_set_io(void *ptr, uint32_t num_bytes, uint32_t value);
  334. void qdf_mem_copy_toio(void *dst_addr, const void *src_addr,
  335. uint32_t num_bytes);
  336. /**
  337. * qdf_mem_set() - set (fill) memory with a specified byte value.
  338. * @ptr: Pointer to memory that will be set
  339. * @num_bytes: Number of bytes to be set
  340. * @value: Byte set in memory
  341. *
  342. * WARNING: parameter @num_bytes and @value are swapped comparing with
  343. * standard C function "memset", please ensure correct usage of this function!
  344. *
  345. * Return: None
  346. */
  347. void qdf_mem_set(void *ptr, uint32_t num_bytes, uint32_t value);
  348. /**
  349. * qdf_mem_zero() - zero out memory
  350. * @ptr: pointer to memory that will be set to zero
  351. * @num_bytes: number of bytes zero
  352. *
  353. * This function sets the memory location to all zeros, essentially clearing
  354. * the memory.
  355. *
  356. * Return: None
  357. */
  358. static inline void qdf_mem_zero(void *ptr, uint32_t num_bytes)
  359. {
  360. qdf_mem_set(ptr, num_bytes, 0);
  361. }
  362. /**
  363. * qdf_mem_copy() - copy memory
  364. * @dst_addr: Pointer to destination memory location (to copy to)
  365. * @src_addr: Pointer to source memory location (to copy from)
  366. * @num_bytes: Number of bytes to copy.
  367. *
  368. * Copy host memory from one location to another, similar to memcpy in
  369. * standard C. Note this function does not specifically handle overlapping
  370. * source and destination memory locations. Calling this function with
  371. * overlapping source and destination memory locations will result in
  372. * unpredictable results. Use qdf_mem_move() if the memory locations
  373. * for the source and destination are overlapping (or could be overlapping!)
  374. *
  375. * Return: none
  376. */
  377. void qdf_mem_copy(void *dst_addr, const void *src_addr, uint32_t num_bytes);
  378. /**
  379. * qdf_mem_move() - move memory
  380. * @dst_addr: pointer to destination memory location (to move to)
  381. * @src_addr: pointer to source memory location (to move from)
  382. * @num_bytes: number of bytes to move.
  383. *
  384. * Move host memory from one location to another, similar to memmove in
  385. * standard C. Note this function *does* handle overlapping
  386. * source and destination memory locations.
  387. * Return: None
  388. */
  389. void qdf_mem_move(void *dst_addr, const void *src_addr, uint32_t num_bytes);
  390. /**
  391. * qdf_mem_cmp() - memory compare
  392. * @left: pointer to one location in memory to compare
  393. * @right: pointer to second location in memory to compare
  394. * @size: the number of bytes to compare
  395. *
  396. * Function to compare two pieces of memory, similar to memcmp function
  397. * in standard C.
  398. *
  399. * Return:
  400. * 0 -- equal
  401. * < 0 -- *memory1 is less than *memory2
  402. * > 0 -- *memory1 is bigger than *memory2
  403. */
  404. int qdf_mem_cmp(const void *left, const void *right, size_t size);
  405. void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
  406. /**
  407. * qdf_mem_map_nbytes_single - Map memory for DMA
  408. * @osdev: pomter OS device context
  409. * @buf: pointer to memory to be dma mapped
  410. * @dir: DMA map direction
  411. * @nbytes: number of bytes to be mapped.
  412. * @phy_addr: ponter to recive physical address.
  413. *
  414. * Return: success/failure
  415. */
  416. static inline uint32_t qdf_mem_map_nbytes_single(qdf_device_t osdev, void *buf,
  417. qdf_dma_dir_t dir, int nbytes,
  418. qdf_dma_addr_t *phy_addr)
  419. {
  420. #if defined(HIF_PCI) || defined(HIF_IPCI)
  421. return __qdf_mem_map_nbytes_single(osdev, buf, dir, nbytes, phy_addr);
  422. #else
  423. return 0;
  424. #endif
  425. }
  426. static inline void qdf_mem_dma_cache_sync(qdf_device_t osdev,
  427. qdf_dma_addr_t buf,
  428. qdf_dma_dir_t dir,
  429. int nbytes)
  430. {
  431. __qdf_mem_dma_cache_sync(osdev, buf, dir, nbytes);
  432. }
  433. /**
  434. * qdf_mem_unmap_nbytes_single() - un_map memory for DMA
  435. * @osdev: pomter OS device context
  436. * @phy_addr: physical address of memory to be dma unmapped
  437. * @dir: DMA unmap direction
  438. * @nbytes: number of bytes to be unmapped.
  439. *
  440. * Return: none
  441. */
  442. static inline void qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
  443. qdf_dma_addr_t phy_addr,
  444. qdf_dma_dir_t dir,
  445. int nbytes)
  446. {
  447. #if defined(HIF_PCI) || defined(HIF_IPCI)
  448. __qdf_mem_unmap_nbytes_single(osdev, phy_addr, dir, nbytes);
  449. #endif
  450. }
  451. /**
  452. * qdf_mempool_init - Create and initialize memory pool
  453. * @osdev: platform device object
  454. * @pool_addr: address of the pool created
  455. * @elem_cnt: no. of elements in pool
  456. * @elem_size: size of each pool element in bytes
  457. * @flags: flags
  458. * Return: Handle to memory pool or NULL if allocation failed
  459. */
  460. static inline int qdf_mempool_init(qdf_device_t osdev,
  461. qdf_mempool_t *pool_addr, int elem_cnt,
  462. size_t elem_size, uint32_t flags)
  463. {
  464. return __qdf_mempool_init(osdev, pool_addr, elem_cnt, elem_size,
  465. flags);
  466. }
  467. /**
  468. * qdf_mempool_destroy - Destroy memory pool
  469. * @osdev: platform device object
  470. * @Handle: to memory pool
  471. * Return: none
  472. */
  473. static inline void qdf_mempool_destroy(qdf_device_t osdev, qdf_mempool_t pool)
  474. {
  475. __qdf_mempool_destroy(osdev, pool);
  476. }
  477. /**
  478. * qdf_mempool_alloc - Allocate an element memory pool
  479. * @osdev: platform device object
  480. * @Handle: to memory pool
  481. * Return: Pointer to the allocated element or NULL if the pool is empty
  482. */
  483. static inline void *qdf_mempool_alloc(qdf_device_t osdev, qdf_mempool_t pool)
  484. {
  485. return (void *)__qdf_mempool_alloc(osdev, pool);
  486. }
  487. /**
  488. * qdf_mempool_free - Free a memory pool element
  489. * @osdev: Platform device object
  490. * @pool: Handle to memory pool
  491. * @buf: Element to be freed
  492. * Return: none
  493. */
  494. static inline void qdf_mempool_free(qdf_device_t osdev, qdf_mempool_t pool,
  495. void *buf)
  496. {
  497. __qdf_mempool_free(osdev, pool, buf);
  498. }
  499. void qdf_mem_dma_sync_single_for_device(qdf_device_t osdev,
  500. qdf_dma_addr_t bus_addr,
  501. qdf_size_t size,
  502. __dma_data_direction direction);
  503. void qdf_mem_dma_sync_single_for_cpu(qdf_device_t osdev,
  504. qdf_dma_addr_t bus_addr,
  505. qdf_size_t size,
  506. __dma_data_direction direction);
  507. int qdf_mem_multi_page_link(qdf_device_t osdev,
  508. struct qdf_mem_multi_page_t *pages,
  509. uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
  510. /**
  511. * qdf_mem_kmalloc_inc() - increment kmalloc allocated bytes count
  512. * @size: number of bytes to increment by
  513. *
  514. * Return: None
  515. */
  516. void qdf_mem_kmalloc_inc(qdf_size_t size);
  517. /**
  518. * qdf_mem_kmalloc_dec() - decrement kmalloc allocated bytes count
  519. * @size: number of bytes to decrement by
  520. *
  521. * Return: None
  522. */
  523. void qdf_mem_kmalloc_dec(qdf_size_t size);
  524. #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
  525. /**
  526. * qdf_mem_skb_inc() - increment total skb allocation size
  527. * @size: size to be added
  528. *
  529. * Return: none
  530. */
  531. void qdf_mem_skb_inc(qdf_size_t size);
  532. /**
  533. * qdf_mem_skb_dec() - decrement total skb allocation size
  534. * @size: size to be decremented
  535. *
  536. * Return: none
  537. */
  538. void qdf_mem_skb_dec(qdf_size_t size);
  539. /**
  540. * qdf_mem_skb_total_inc() - increment total skb allocation size
  541. * in host driver in both debug and perf builds
  542. * @size: size to be added
  543. *
  544. * Return: none
  545. */
  546. void qdf_mem_skb_total_inc(qdf_size_t size);
  547. /**
  548. * qdf_mem_skb_total_dec() - decrement total skb allocation size
  549. * in the host driver in debug and perf flavors
  550. * @size: size to be decremented
  551. *
  552. * Return: none
  553. */
  554. void qdf_mem_skb_total_dec(qdf_size_t size);
  555. /**
  556. * qdf_mem_dp_tx_skb_inc() - Increment Tx skb allocation size
  557. * @size: size to be added
  558. *
  559. * Return: none
  560. */
  561. void qdf_mem_dp_tx_skb_inc(qdf_size_t size);
  562. /**
  563. * qdf_mem_dp_tx_skb_dec() - Decrement Tx skb allocation size
  564. * @size: size to be decreased
  565. *
  566. * Return: none
  567. */
  568. void qdf_mem_dp_tx_skb_dec(qdf_size_t size);
  569. /**
  570. * qdf_mem_dp_rx_skb_inc() - Increment Rx skb allocation size
  571. * @size: size to be added
  572. *
  573. * Return: none
  574. */
  575. void qdf_mem_dp_rx_skb_inc(qdf_size_t size);
  576. /**
  577. * qdf_mem_dp_rx_skb_dec() - Decrement Rx skb allocation size
  578. * @size: size to be decreased
  579. *
  580. * Return: none
  581. */
  582. void qdf_mem_dp_rx_skb_dec(qdf_size_t size);
  583. /**
  584. * qdf_mem_dp_tx_skb_cnt_inc() - Increment Tx buffer count
  585. *
  586. * Return: none
  587. */
  588. void qdf_mem_dp_tx_skb_cnt_inc(void);
  589. /**
  590. * qdf_mem_dp_tx_skb_cnt_dec() - Decrement Tx buffer count
  591. *
  592. * Return: none
  593. */
  594. void qdf_mem_dp_tx_skb_cnt_dec(void);
  595. /**
  596. * qdf_mem_dp_rx_skb_cnt_inc() - Increment Rx buffer count
  597. *
  598. * Return: none
  599. */
  600. void qdf_mem_dp_rx_skb_cnt_inc(void);
  601. /**
  602. * qdf_mem_dp_rx_skb_cnt_dec() - Decrement Rx buffer count
  603. *
  604. * Return: none
  605. */
  606. void qdf_mem_dp_rx_skb_cnt_dec(void);
  607. #else
  608. static inline void qdf_mem_skb_inc(qdf_size_t size)
  609. {
  610. }
  611. static inline void qdf_mem_skb_dec(qdf_size_t size)
  612. {
  613. }
  614. static inline void qdf_mem_skb_total_inc(qdf_size_t size)
  615. {
  616. }
  617. static inline void qdf_mem_skb_total_dec(qdf_size_t size)
  618. {
  619. }
  620. static inline void qdf_mem_dp_tx_skb_inc(qdf_size_t size)
  621. {
  622. }
  623. static inline void qdf_mem_dp_tx_skb_dec(qdf_size_t size)
  624. {
  625. }
  626. static inline void qdf_mem_dp_rx_skb_inc(qdf_size_t size)
  627. {
  628. }
  629. static inline void qdf_mem_dp_rx_skb_dec(qdf_size_t size)
  630. {
  631. }
  632. static inline void qdf_mem_dp_tx_skb_cnt_inc(void)
  633. {
  634. }
  635. static inline void qdf_mem_dp_tx_skb_cnt_dec(void)
  636. {
  637. }
  638. static inline void qdf_mem_dp_rx_skb_cnt_inc(void)
  639. {
  640. }
  641. static inline void qdf_mem_dp_rx_skb_cnt_dec(void)
  642. {
  643. }
  644. #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
  645. /**
  646. * qdf_mem_map_table_alloc() - Allocate shared memory info structure
  647. * @num: number of required storage
  648. *
  649. * Allocate mapping table for DMA memory allocation. This is needed for
  650. * IPA-WLAN buffer sharing when SMMU Stage1 Translation is enabled.
  651. *
  652. * Return: shared memory info storage table pointer
  653. */
  654. static inline qdf_mem_info_t *qdf_mem_map_table_alloc(uint32_t num)
  655. {
  656. qdf_mem_info_t *mem_info_arr;
  657. mem_info_arr = qdf_mem_malloc(num * sizeof(mem_info_arr[0]));
  658. return mem_info_arr;
  659. }
  660. /**
  661. * qdf_update_mem_map_table() - Update DMA memory map info
  662. * @osdev: Parent device instance
  663. * @mem_info: Pointer to shared memory information
  664. * @dma_addr: dma address
  665. * @mem_size: memory size allocated
  666. *
  667. * Store DMA shared memory information
  668. *
  669. * Return: none
  670. */
  671. static inline void qdf_update_mem_map_table(qdf_device_t osdev,
  672. qdf_mem_info_t *mem_info,
  673. qdf_dma_addr_t dma_addr,
  674. uint32_t mem_size)
  675. {
  676. if (!mem_info) {
  677. qdf_nofl_err("%s: NULL mem_info", __func__);
  678. return;
  679. }
  680. __qdf_update_mem_map_table(osdev, mem_info, dma_addr, mem_size);
  681. }
  682. /**
  683. * qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
  684. * @osdev parent device instance
  685. *
  686. * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
  687. */
  688. static inline bool qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
  689. {
  690. return __qdf_mem_smmu_s1_enabled(osdev);
  691. }
  692. /**
  693. * qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma address
  694. * @osdev: Parent device instance
  695. * @dma_addr: DMA/IOVA address
  696. *
  697. * Get actual physical address from dma_addr based on SMMU enablement status.
  698. * IF SMMU Stage 1 tranlation is enabled, DMA APIs return IO virtual address
  699. * (IOVA) otherwise returns physical address. So get SMMU physical address
  700. * mapping from IOVA.
  701. *
  702. * Return: dmaable physical address
  703. */
  704. static inline qdf_dma_addr_t qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
  705. qdf_dma_addr_t dma_addr)
  706. {
  707. return __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
  708. }
  709. /**
  710. * qdf_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
  711. * @dev: device instace
  712. * @sgt: scatter gather table pointer
  713. * @cpu_addr: HLOS virtual address
  714. * @dma_addr: dma address
  715. * @size: allocated memory size
  716. *
  717. * Return: physical address
  718. */
  719. static inline int
  720. qdf_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
  721. qdf_dma_addr_t dma_addr, size_t size)
  722. {
  723. return __qdf_os_mem_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  724. }
  725. /**
  726. * qdf_mem_free_sgtable() - Free a previously allocated sg table
  727. * @sgt: the mapped sg table header
  728. *
  729. * Return: None
  730. */
  731. static inline void
  732. qdf_mem_free_sgtable(struct sg_table *sgt)
  733. {
  734. __qdf_os_mem_free_sgtable(sgt);
  735. }
  736. /**
  737. * qdf_dma_get_sgtable_dma_addr() - Assigns DMA address to scatterlist elements
  738. * @sgt: scatter gather table pointer
  739. *
  740. * Return: None
  741. */
  742. static inline void
  743. qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
  744. {
  745. __qdf_dma_get_sgtable_dma_addr(sgt);
  746. }
  747. /**
  748. * qdf_mem_get_dma_addr() - Return dma address based on SMMU translation status.
  749. * @osdev: Parent device instance
  750. * @mem_info: Pointer to allocated memory information
  751. *
  752. * Get dma address based on SMMU enablement status. If SMMU Stage 1
  753. * tranlation is enabled, DMA APIs return IO virtual address otherwise
  754. * returns physical address.
  755. *
  756. * Return: dma address
  757. */
  758. static inline qdf_dma_addr_t qdf_mem_get_dma_addr(qdf_device_t osdev,
  759. qdf_mem_info_t *mem_info)
  760. {
  761. return __qdf_mem_get_dma_addr(osdev, mem_info);
  762. }
  763. /**
  764. * qdf_mem_get_dma_addr_ptr() - Return DMA address pointer from mem info struct
  765. * @osdev: Parent device instance
  766. * @mem_info: Pointer to allocated memory information
  767. *
  768. * Based on smmu stage 1 translation enablement, return corresponding dma
  769. * address storage pointer.
  770. *
  771. * Return: dma address storage pointer
  772. */
  773. static inline qdf_dma_addr_t *qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
  774. qdf_mem_info_t *mem_info)
  775. {
  776. return __qdf_mem_get_dma_addr_ptr(osdev, mem_info);
  777. }
  778. /**
  779. * qdf_mem_get_dma_size() - Return DMA memory size
  780. * @osdev: parent device instance
  781. * @mem_info: Pointer to allocated memory information
  782. *
  783. * Return: DMA memory size
  784. */
  785. static inline uint32_t
  786. qdf_mem_get_dma_size(qdf_device_t osdev,
  787. qdf_mem_info_t *mem_info)
  788. {
  789. return __qdf_mem_get_dma_size(osdev, mem_info);
  790. }
  791. /**
  792. * qdf_mem_set_dma_size() - Set DMA memory size
  793. * @osdev: parent device instance
  794. * @mem_info: Pointer to allocated memory information
  795. * @mem_size: memory size allocated
  796. *
  797. * Return: none
  798. */
  799. static inline void
  800. qdf_mem_set_dma_size(qdf_device_t osdev,
  801. qdf_mem_info_t *mem_info,
  802. uint32_t mem_size)
  803. {
  804. __qdf_mem_set_dma_size(osdev, mem_info, mem_size);
  805. }
  806. /**
  807. * qdf_mem_get_dma_size() - Return DMA physical address
  808. * @osdev: parent device instance
  809. * @mem_info: Pointer to allocated memory information
  810. *
  811. * Return: DMA physical address
  812. */
  813. static inline qdf_dma_addr_t
  814. qdf_mem_get_dma_pa(qdf_device_t osdev,
  815. qdf_mem_info_t *mem_info)
  816. {
  817. return __qdf_mem_get_dma_pa(osdev, mem_info);
  818. }
  819. /**
  820. * qdf_mem_set_dma_size() - Set DMA physical address
  821. * @osdev: parent device instance
  822. * @mem_info: Pointer to allocated memory information
  823. * @dma_pa: DMA phsical address
  824. *
  825. * Return: none
  826. */
  827. static inline void
  828. qdf_mem_set_dma_pa(qdf_device_t osdev,
  829. qdf_mem_info_t *mem_info,
  830. qdf_dma_addr_t dma_pa)
  831. {
  832. __qdf_mem_set_dma_pa(osdev, mem_info, dma_pa);
  833. }
  834. /**
  835. * qdf_mem_shared_mem_alloc() - Allocate DMA memory for shared resource
  836. * @osdev: parent device instance
  837. * @mem_info: Pointer to allocated memory information
  838. * @size: size to be allocated
  839. *
  840. * Allocate DMA memory which will be shared with external kernel module. This
  841. * information is needed for SMMU mapping.
  842. *
  843. * Return: 0 success
  844. */
  845. qdf_shared_mem_t *qdf_mem_shared_mem_alloc(qdf_device_t osdev, uint32_t size);
  846. /**
  847. * qdf_mem_shared_mem_free() - Free shared memory
  848. * @osdev: parent device instance
  849. * @shared_mem: shared memory information storage
  850. *
  851. * Free DMA shared memory resource
  852. *
  853. * Return: None
  854. */
  855. static inline void qdf_mem_shared_mem_free(qdf_device_t osdev,
  856. qdf_shared_mem_t *shared_mem)
  857. {
  858. if (!shared_mem) {
  859. qdf_nofl_err("%s: NULL shared mem struct passed",
  860. __func__);
  861. return;
  862. }
  863. if (shared_mem->vaddr) {
  864. qdf_mem_free_consistent(osdev, osdev->dev,
  865. qdf_mem_get_dma_size(osdev,
  866. &shared_mem->mem_info),
  867. shared_mem->vaddr,
  868. qdf_mem_get_dma_addr(osdev,
  869. &shared_mem->mem_info),
  870. qdf_get_dma_mem_context(shared_mem,
  871. memctx));
  872. }
  873. qdf_mem_free_sgtable(&shared_mem->sgtable);
  874. qdf_mem_free(shared_mem);
  875. }
  876. /**
  877. * qdf_dma_mem_stats_read() - Return the DMA memory allocated in
  878. * host driver
  879. *
  880. * Return: Total DMA memory allocated
  881. */
  882. int32_t qdf_dma_mem_stats_read(void);
  883. /**
  884. * qdf_heap_mem_stats_read() - Return the heap memory allocated
  885. * in host driver
  886. *
  887. * Return: Total heap memory allocated
  888. */
  889. int32_t qdf_heap_mem_stats_read(void);
  890. /**
  891. * qdf_skb_mem_stats_read() - Return the SKB memory allocated in
  892. * host driver
  893. *
  894. * Return: Total SKB memory allocated
  895. */
  896. int32_t qdf_skb_mem_stats_read(void);
  897. /**
  898. * qdf_skb_total_mem_stats_read() - Return the SKB memory allocated
  899. * in the host driver tracked in both debug and perf builds
  900. *
  901. * Return: Total SKB memory allocated
  902. */
  903. int32_t qdf_skb_total_mem_stats_read(void);
  904. /**
  905. * qdf_skb_max_mem_stats_read() - Return the max SKB memory
  906. * allocated in host driver. This is the high watermark for the
  907. * total SKB allocated in the host driver
  908. *
  909. * Return: None
  910. */
  911. int32_t qdf_skb_max_mem_stats_read(void);
  912. /**
  913. * qdf_mem_tx_desc_cnt_read() - Return the outstanding Tx descs
  914. * which are waiting on Tx completions
  915. *
  916. * Return: Outstanding Tx desc count
  917. */
  918. int32_t qdf_mem_tx_desc_cnt_read(void);
  919. /**
  920. * qdf_mem_tx_desc_max_read() - Return the max outstanding Tx
  921. * descs which are waiting on Tx completions. This is the high
  922. * watermark for the pending desc count
  923. *
  924. * Return: Max outstanding Tx desc count
  925. */
  926. int32_t qdf_mem_tx_desc_max_read(void);
  927. /**
  928. * qdf_mem_stats_init() - Initialize the qdf memstats fields on
  929. * creating the sysfs node
  930. *
  931. * Return: None
  932. */
  933. void qdf_mem_stats_init(void);
  934. /**
  935. * qdf_dp_tx_skb_mem_stats_read() - Return the SKB memory
  936. * allocated for Tx data path
  937. *
  938. * Return: Tx SKB memory allocated
  939. */
  940. int32_t qdf_dp_tx_skb_mem_stats_read(void);
  941. /**
  942. * qdf_dp_rx_skb_mem_stats_read() - Return the SKB memory
  943. * allocated for Rx data path
  944. *
  945. * Return: Rx SKB memory allocated
  946. */
  947. int32_t qdf_dp_rx_skb_mem_stats_read(void);
  948. /**
  949. * qdf_dp_tx_skb_max_mem_stats_read() - Return the high
  950. * watermark for the SKB memory allocated for Tx data path
  951. *
  952. * Return: Max Tx SKB memory allocated
  953. */
  954. int32_t qdf_dp_tx_skb_max_mem_stats_read(void);
  955. /**
  956. * qdf_dp_rx_skb_max_mem_stats_read() - Return the high
  957. * watermark for the SKB memory allocated for Rx data path
  958. *
  959. * Return: Max Rx SKB memory allocated
  960. */
  961. int32_t qdf_dp_rx_skb_max_mem_stats_read(void);
  962. /**
  963. * qdf_mem_dp_tx_skb_cnt_read() - Return number of buffers
  964. * allocated in the Tx data path by the host driver or
  965. * buffers coming from the n/w stack
  966. *
  967. * Return: Number of DP Tx buffers allocated
  968. */
  969. int32_t qdf_mem_dp_tx_skb_cnt_read(void);
  970. /**
  971. * qdf_mem_dp_tx_skb_max_cnt_read() - Return max number of
  972. * buffers allocated in the Tx data path
  973. *
  974. * Return: Max number of DP Tx buffers allocated
  975. */
  976. int32_t qdf_mem_dp_tx_skb_max_cnt_read(void);
  977. /**
  978. * qdf_mem_dp_rx_skb_cnt_read() - Return number of buffers
  979. * allocated in the Rx data path
  980. *
  981. * Return: Number of DP Rx buffers allocated
  982. */
  983. int32_t qdf_mem_dp_rx_skb_cnt_read(void);
  984. /**
  985. * qdf_mem_dp_rx_skb_max_cnt_read() - Return max number of
  986. * buffers allocated in the Rx data path
  987. *
  988. * Return: Max number of DP Rx buffers allocated
  989. */
  990. int32_t qdf_mem_dp_rx_skb_max_cnt_read(void);
  991. /**
  992. * qdf_mem_tx_desc_cnt_update() - Update the pending tx desc
  993. * count and the high watermark for pending tx desc count
  994. *
  995. * @pending_tx_descs: outstanding Tx desc count
  996. * @tx_descs_max: high watermark for outstanding Tx desc count
  997. *
  998. * Return: None
  999. */
  1000. void qdf_mem_tx_desc_cnt_update(qdf_atomic_t pending_tx_descs,
  1001. int32_t tx_descs_max);
  1002. #endif /* __QDF_MEMORY_H */