i_qdf_mem.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. /**
  20. * DOC: i_qdf_mem.h
  21. * Linux-specific definitions for QDF memory API's
  22. */
  23. #ifndef __I_QDF_MEM_H
  24. #define __I_QDF_MEM_H
  25. #ifdef __KERNEL__
  26. #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
  27. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
  28. #include <linux/autoconf.h>
  29. #else
  30. #include <generated/autoconf.h>
  31. #endif
  32. #endif
  33. #include <linux/slab.h>
  34. #include <linux/hardirq.h>
  35. #include <linux/vmalloc.h>
  36. #include <linux/pci.h> /* pci_alloc_consistent */
  37. #include <linux/cache.h> /* L1_CACHE_BYTES */
  38. #define __qdf_cache_line_sz L1_CACHE_BYTES
  39. #include "queue.h"
  40. #else
  41. /*
  42. * Provide dummy defs for kernel data types, functions, and enums
  43. * used in this header file.
  44. */
  45. #define GFP_KERNEL 0
  46. #define GFP_ATOMIC 0
  47. #define __GFP_KSWAPD_RECLAIM 0
  48. #define __GFP_DIRECT_RECLAIM 0
  49. #define kzalloc(size, flags) NULL
  50. #define vmalloc(size) NULL
  51. #define kfree(buf)
  52. #define vfree(buf)
  53. #define pci_alloc_consistent(dev, size, paddr) NULL
  54. #define __qdf_mempool_t void*
  55. #define QDF_RET_IP NULL
  56. #endif /* __KERNEL__ */
  57. #include <qdf_status.h>
  58. #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && \
  59. (defined(MSM_PLATFORM) || defined(QCA_IPA_LL_TX_FLOW_CONTROL)))
  60. #include <linux/qcom-iommu-util.h>
  61. #endif
  62. #if IS_ENABLED(CONFIG_ARM_SMMU)
  63. #include <pld_common.h>
  64. #ifdef ENABLE_SMMU_S1_TRANSLATION
  65. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
  66. #include <asm/dma-iommu.h>
  67. #endif
  68. #endif
  69. #include <linux/iommu.h>
  70. #endif
  71. #ifdef __KERNEL__
  72. typedef struct mempool_elem {
  73. STAILQ_ENTRY(mempool_elem) mempool_entry;
  74. } mempool_elem_t;
  75. /**
  76. * typedef __qdf_mempool_ctxt_t - Memory pool context
  77. * @pool_id: pool identifier
  78. * @flags: flags
  79. * @elem_size: size of each pool element in bytes
  80. * @pool_mem: pool_addr address of the pool created
  81. * @mem_size: Total size of the pool in bytes
  82. * @free_list: free pool list
  83. * @lock: spinlock object
  84. * @max_elem: Maximum number of elements in the pool
  85. * @free_cnt: Number of free elements available
  86. */
  87. typedef struct __qdf_mempool_ctxt {
  88. int pool_id;
  89. u_int32_t flags;
  90. size_t elem_size;
  91. void *pool_mem;
  92. u_int32_t mem_size;
  93. STAILQ_HEAD(, mempool_elem) free_list;
  94. spinlock_t lock;
  95. u_int32_t max_elem;
  96. u_int32_t free_cnt;
  97. } __qdf_mempool_ctxt_t;
  98. typedef struct kmem_cache *qdf_kmem_cache_t;
  99. #endif /* __KERNEL__ */
  100. #define __page_size ((size_t)PAGE_SIZE)
  101. #define __qdf_align(a, mask) ALIGN(a, mask)
  102. #ifdef DISABLE_MEMDEBUG_PANIC
  103. #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
  104. do { \
  105. /* no-op */ \
  106. } while (false)
  107. #else
  108. #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
  109. QDF_DEBUG_PANIC(reason_fmt, ## args)
  110. #endif
  111. /**
  112. * typedef __dma_data_direction - typedef for dma_data_direction
  113. */
  114. typedef enum dma_data_direction __dma_data_direction;
  115. /**
  116. * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
  117. * @qdf_dir: QDF DMA data direction
  118. *
  119. * Return:
  120. * enum dma_data_direction
  121. */
  122. static inline
  123. enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
  124. {
  125. switch (qdf_dir) {
  126. case QDF_DMA_BIDIRECTIONAL:
  127. return DMA_BIDIRECTIONAL;
  128. case QDF_DMA_TO_DEVICE:
  129. return DMA_TO_DEVICE;
  130. case QDF_DMA_FROM_DEVICE:
  131. return DMA_FROM_DEVICE;
  132. default:
  133. return DMA_NONE;
  134. }
  135. }
  136. /**
  137. * __qdf_mem_map_nbytes_single - Map memory for DMA
  138. * @osdev: pomter OS device context
  139. * @buf: pointer to memory to be dma mapped
  140. * @dir: DMA map direction
  141. * @nbytes: number of bytes to be mapped.
  142. * @phy_addr: pointer to receive physical address.
  143. *
  144. * Return: success/failure
  145. */
  146. static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
  147. void *buf, qdf_dma_dir_t dir,
  148. int nbytes,
  149. qdf_dma_addr_t *phy_addr)
  150. {
  151. /* assume that the OS only provides a single fragment */
  152. *phy_addr = dma_map_single(osdev->dev, buf, nbytes,
  153. __qdf_dma_dir_to_os(dir));
  154. return dma_mapping_error(osdev->dev, *phy_addr) ?
  155. QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
  156. }
  157. #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
  158. static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
  159. qdf_dma_addr_t buf,
  160. qdf_dma_dir_t dir,
  161. int nbytes)
  162. {
  163. dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
  164. }
  165. #else
  166. static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
  167. qdf_dma_addr_t buf,
  168. qdf_dma_dir_t dir,
  169. int nbytes)
  170. {
  171. dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
  172. __qdf_dma_dir_to_os(dir));
  173. }
  174. #endif
  175. /**
  176. * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
  177. *
  178. * @osdev: pomter OS device context
  179. * @phy_addr: physical address of memory to be dma unmapped
  180. * @dir: DMA unmap direction
  181. * @nbytes: number of bytes to be unmapped.
  182. *
  183. * Return - none
  184. */
  185. static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
  186. qdf_dma_addr_t phy_addr,
  187. qdf_dma_dir_t dir, int nbytes)
  188. {
  189. dma_unmap_single(osdev->dev, phy_addr, nbytes,
  190. __qdf_dma_dir_to_os(dir));
  191. }
  192. #ifdef __KERNEL__
  193. typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
  194. /**
  195. * __qdf_mempool_init() - Create and initialize memory pool
  196. * @osdev: platform device object
  197. * @pool_addr: address of the pool created
  198. * @elem_cnt: no. of elements in pool
  199. * @elem_size: size of each pool element in bytes
  200. * @flags: flags
  201. *
  202. * Return: Handle to memory pool or NULL if allocation failed
  203. */
  204. int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
  205. int elem_cnt, size_t elem_size, u_int32_t flags);
  206. /**
  207. * __qdf_mempool_destroy() - Destroy memory pool
  208. * @osdev: platform device object
  209. * @pool: memory pool
  210. *
  211. * Returns: none
  212. */
  213. void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
  214. /**
  215. * __qdf_mempool_alloc() - Allocate an element memory pool
  216. * @osdev: platform device object
  217. * @pool: to memory pool
  218. *
  219. * Return: Pointer to the allocated element or NULL if the pool is empty
  220. */
  221. void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
  222. /**
  223. * __qdf_mempool_free() - Free a memory pool element
  224. * @osdev: Platform device object
  225. * @pool: Handle to memory pool
  226. * @buf: Element to be freed
  227. *
  228. * Return: none
  229. */
  230. void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
  231. /**
  232. * __qdf_kmem_cache_create() - OS abstraction for cache creation
  233. * @cache_name: Cache name
  234. * @size: Size of the object to be created
  235. *
  236. * Return: Cache address on successful creation, else NULL
  237. */
  238. qdf_kmem_cache_t __qdf_kmem_cache_create(const char *cache_name,
  239. qdf_size_t size);
  240. /**
  241. * __qdf_kmem_cache_destroy() - OS abstraction for cache destruction
  242. * @cache: Cache pointer
  243. *
  244. * Return: void
  245. */
  246. void __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache);
  247. /**
  248. * __qdf_kmem_cache_alloc() - Function to allocation object from a cache
  249. * @cache: Cache address
  250. *
  251. * Return: Object from cache
  252. *
  253. */
  254. void *__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache);
  255. /**
  256. * __qdf_kmem_cache_free() - Function to free cache object
  257. * @cache: Cache address
  258. * @node: Object to be returned to cache
  259. *
  260. * Return: void
  261. */
  262. void __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node);
  263. #define QDF_RET_IP ((void *)_RET_IP_)
  264. #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
  265. #endif
  266. /**
  267. * __qdf_ioremap() - map bus memory into cpu space
  268. * @HOST_CE_ADDRESS: bus address of the memory
  269. * @HOST_CE_SIZE: memory size to map
  270. */
  271. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
  272. #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
  273. ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
  274. #else
  275. #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
  276. ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE)
  277. #endif
  278. /**
  279. * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
  280. * @osdev: parent device instance
  281. *
  282. * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
  283. */
  284. static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
  285. {
  286. return osdev->smmu_s1_enabled;
  287. }
  288. #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
  289. /**
  290. * typedef __qdf_iommu_domain_t - abstraction for struct iommu_domain
  291. */
  292. typedef struct iommu_domain __qdf_iommu_domain_t;
  293. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
  294. #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL)
  295. /**
  296. * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping
  297. * configurations bitmap
  298. * @attr: QDF iommu attribute
  299. *
  300. * Return: IOMMU mapping configuration bitmaps
  301. */
  302. static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
  303. {
  304. switch (attr) {
  305. case QDF_DOMAIN_ATTR_S1_BYPASS:
  306. return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS;
  307. case QDF_DOMAIN_ATTR_ATOMIC:
  308. return QCOM_IOMMU_MAPPING_CONF_ATOMIC;
  309. case QDF_DOMAIN_ATTR_FAST:
  310. return QCOM_IOMMU_MAPPING_CONF_FAST;
  311. default:
  312. return -EINVAL;
  313. }
  314. }
  315. /**
  316. * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
  317. *
  318. * @domain: iommu domain
  319. * @attr: iommu attribute
  320. * @data: data pointer
  321. *
  322. * Return: 0 for success, and negative values otherwise
  323. */
  324. static inline int
  325. __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
  326. enum qdf_iommu_attr attr, void *data)
  327. {
  328. int mapping_config;
  329. int mapping_bitmap;
  330. int *value;
  331. mapping_bitmap = __qdf_iommu_attr_to_os(attr);
  332. if (mapping_bitmap < 0)
  333. return -EINVAL;
  334. mapping_config = qcom_iommu_get_mappings_configuration(domain);
  335. if (mapping_config < 0)
  336. return -EINVAL;
  337. value = data;
  338. *value = (mapping_config & mapping_bitmap) ? 1 : 0;
  339. return 0;
  340. }
  341. #else /* !CONFIG_QCOM_IOMMU_UTIL */
  342. static inline int
  343. __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
  344. enum qdf_iommu_attr attr, void *data)
  345. {
  346. return -ENOTSUPP;
  347. }
  348. #endif /* CONFIG_QCOM_IOMMU_UTIL */
  349. #else
  350. /**
  351. * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
  352. * @attr: QDF iommu attribute
  353. *
  354. * Return: enum iommu_attr
  355. */
  356. static inline
  357. enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
  358. {
  359. switch (attr) {
  360. case QDF_DOMAIN_ATTR_GEOMETRY:
  361. return DOMAIN_ATTR_GEOMETRY;
  362. case QDF_DOMAIN_ATTR_PAGING:
  363. return DOMAIN_ATTR_PAGING;
  364. case QDF_DOMAIN_ATTR_WINDOWS:
  365. return DOMAIN_ATTR_WINDOWS;
  366. case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
  367. return DOMAIN_ATTR_FSL_PAMU_STASH;
  368. case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
  369. return DOMAIN_ATTR_FSL_PAMU_ENABLE;
  370. case QDF_DOMAIN_ATTR_FSL_PAMUV1:
  371. return DOMAIN_ATTR_FSL_PAMUV1;
  372. case QDF_DOMAIN_ATTR_NESTING:
  373. return DOMAIN_ATTR_NESTING;
  374. case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
  375. return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
  376. case QDF_DOMAIN_ATTR_CONTEXT_BANK:
  377. return DOMAIN_ATTR_CONTEXT_BANK;
  378. case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
  379. return DOMAIN_ATTR_NON_FATAL_FAULTS;
  380. case QDF_DOMAIN_ATTR_S1_BYPASS:
  381. return DOMAIN_ATTR_S1_BYPASS;
  382. case QDF_DOMAIN_ATTR_ATOMIC:
  383. return DOMAIN_ATTR_ATOMIC;
  384. case QDF_DOMAIN_ATTR_SECURE_VMID:
  385. return DOMAIN_ATTR_SECURE_VMID;
  386. case QDF_DOMAIN_ATTR_FAST:
  387. return DOMAIN_ATTR_FAST;
  388. case QDF_DOMAIN_ATTR_PGTBL_INFO:
  389. return DOMAIN_ATTR_PGTBL_INFO;
  390. case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
  391. return DOMAIN_ATTR_USE_UPSTREAM_HINT;
  392. case QDF_DOMAIN_ATTR_EARLY_MAP:
  393. return DOMAIN_ATTR_EARLY_MAP;
  394. case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
  395. return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
  396. case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
  397. return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
  398. case QDF_DOMAIN_ATTR_USE_LLC_NWA:
  399. return DOMAIN_ATTR_USE_LLC_NWA;
  400. case QDF_DOMAIN_ATTR_SPLIT_TABLES:
  401. return DOMAIN_ATTR_SPLIT_TABLES;
  402. case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
  403. return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
  404. case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
  405. return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
  406. case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
  407. return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
  408. default:
  409. return DOMAIN_ATTR_EXTENDED_MAX;
  410. }
  411. }
  412. /**
  413. * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
  414. *
  415. * @domain: iommu domain
  416. * @attr: iommu attribute
  417. * @data: data pointer
  418. *
  419. * Return: iommu domain attr
  420. */
  421. static inline int
  422. __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
  423. enum qdf_iommu_attr attr, void *data)
  424. {
  425. return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
  426. data);
  427. }
  428. #endif
  429. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
  430. /**
  431. * __qdf_dev_get_domain() - get iommu domain from osdev
  432. * @osdev: parent device instance
  433. *
  434. * Return: iommu domain
  435. */
  436. static inline struct iommu_domain *
  437. __qdf_dev_get_domain(qdf_device_t osdev)
  438. {
  439. return osdev->domain;
  440. }
  441. #else
  442. static inline struct iommu_domain *
  443. __qdf_dev_get_domain(qdf_device_t osdev)
  444. {
  445. if (osdev->iommu_mapping)
  446. return osdev->iommu_mapping->domain;
  447. return NULL;
  448. }
  449. #endif
  450. /**
  451. * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
  452. * @osdev: parent device instance
  453. * @dma_addr: dma_addr
  454. *
  455. * Get actual physical address from dma_addr based on SMMU enablement status.
  456. * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
  457. * (IOVA) otherwise returns physical address. So get SMMU physical address
  458. * mapping from IOVA.
  459. *
  460. * Return: dmaable physical address
  461. */
  462. static inline unsigned long
  463. __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
  464. qdf_dma_addr_t dma_addr)
  465. {
  466. struct iommu_domain *domain;
  467. if (__qdf_mem_smmu_s1_enabled(osdev)) {
  468. domain = __qdf_dev_get_domain(osdev);
  469. if (domain)
  470. return iommu_iova_to_phys(domain, dma_addr);
  471. }
  472. return dma_addr;
  473. }
  474. #else
  475. static inline unsigned long
  476. __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
  477. qdf_dma_addr_t dma_addr)
  478. {
  479. return dma_addr;
  480. }
  481. #endif
  482. /**
  483. * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
  484. * @dev: device instance
  485. * @sgt: scatter gather table pointer
  486. * @cpu_addr: HLOS virtual address
  487. * @dma_addr: dma/iova
  488. * @size: allocated memory size
  489. *
  490. * Return: physical address
  491. */
  492. static inline int
  493. __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
  494. qdf_dma_addr_t dma_addr, size_t size)
  495. {
  496. return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
  497. size);
  498. }
  499. /**
  500. * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
  501. * @sgt: the mapped sg table header
  502. *
  503. * Return: None
  504. */
  505. static inline void
  506. __qdf_os_mem_free_sgtable(struct sg_table *sgt)
  507. {
  508. sg_free_table(sgt);
  509. }
  510. /**
  511. * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
  512. * @sgt: scatter gather table pointer
  513. *
  514. * Return: None
  515. */
  516. static inline void
  517. __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
  518. {
  519. struct scatterlist *sg;
  520. int i;
  521. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  522. if (!sg)
  523. break;
  524. sg->dma_address = sg_phys(sg);
  525. }
  526. }
  527. /**
  528. * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
  529. * @osdev: parent device instance
  530. * @mem_info: Pointer to allocated memory information
  531. *
  532. * Based on smmu stage 1 translation enablement status, return corresponding dma
  533. * address from qdf_mem_info_t. If stage 1 translation enabled, return
  534. * IO virtual address otherwise return physical address.
  535. *
  536. * Return: dma address
  537. */
  538. static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
  539. qdf_mem_info_t *mem_info)
  540. {
  541. if (__qdf_mem_smmu_s1_enabled(osdev))
  542. return (qdf_dma_addr_t)mem_info->iova;
  543. else
  544. return (qdf_dma_addr_t)mem_info->pa;
  545. }
  546. /**
  547. * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
  548. * @osdev: parent device instance
  549. * @mem_info: Pointer to allocated memory information
  550. *
  551. * Based on smmu stage 1 translation enablement status, return corresponding
  552. * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
  553. * enabled, return pointer to IO virtual address otherwise return pointer to
  554. * physical address
  555. *
  556. * Return: dma address storage pointer
  557. */
  558. static inline qdf_dma_addr_t *
  559. __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
  560. qdf_mem_info_t *mem_info)
  561. {
  562. if (__qdf_mem_smmu_s1_enabled(osdev))
  563. return (qdf_dma_addr_t *)(&mem_info->iova);
  564. else
  565. return (qdf_dma_addr_t *)(&mem_info->pa);
  566. }
  567. /**
  568. * __qdf_update_mem_map_table() - Update DMA memory map info
  569. * @osdev: Parent device instance
  570. * @mem_info: Pointer to shared memory information
  571. * @dma_addr: dma address
  572. * @mem_size: memory size allocated
  573. *
  574. * Store DMA shared memory information
  575. *
  576. * Return: none
  577. */
  578. static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
  579. qdf_mem_info_t *mem_info,
  580. qdf_dma_addr_t dma_addr,
  581. uint32_t mem_size)
  582. {
  583. mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
  584. mem_info->iova = dma_addr;
  585. mem_info->size = mem_size;
  586. }
  587. /**
  588. * __qdf_mem_get_dma_size() - Return DMA memory size
  589. * @osdev: parent device instance
  590. * @mem_info: Pointer to allocated memory information
  591. *
  592. * Return: DMA memory size
  593. */
  594. static inline uint32_t
  595. __qdf_mem_get_dma_size(qdf_device_t osdev,
  596. qdf_mem_info_t *mem_info)
  597. {
  598. return mem_info->size;
  599. }
  600. /**
  601. * __qdf_mem_set_dma_size() - Set DMA memory size
  602. * @osdev: parent device instance
  603. * @mem_info: Pointer to allocated memory information
  604. * @mem_size: memory size allocated
  605. *
  606. * Return: none
  607. */
  608. static inline void
  609. __qdf_mem_set_dma_size(qdf_device_t osdev,
  610. qdf_mem_info_t *mem_info,
  611. uint32_t mem_size)
  612. {
  613. mem_info->size = mem_size;
  614. }
  615. /**
  616. * __qdf_mem_get_dma_pa() - Return DMA physical address
  617. * @osdev: parent device instance
  618. * @mem_info: Pointer to allocated memory information
  619. *
  620. * Return: DMA physical address
  621. */
  622. static inline qdf_dma_addr_t
  623. __qdf_mem_get_dma_pa(qdf_device_t osdev,
  624. qdf_mem_info_t *mem_info)
  625. {
  626. return mem_info->pa;
  627. }
  628. /**
  629. * __qdf_mem_set_dma_pa() - Set DMA physical address
  630. * @osdev: parent device instance
  631. * @mem_info: Pointer to allocated memory information
  632. * @dma_pa: DMA phsical address
  633. *
  634. * Return: none
  635. */
  636. static inline void
  637. __qdf_mem_set_dma_pa(qdf_device_t osdev,
  638. qdf_mem_info_t *mem_info,
  639. qdf_dma_addr_t dma_pa)
  640. {
  641. mem_info->pa = dma_pa;
  642. }
  643. /**
  644. * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
  645. * @osdev: OS device handle
  646. * @dev: Pointer to device handle
  647. * @size: Size to be allocated
  648. * @paddr: Physical address
  649. * @func: Function name of the call site
  650. * @line: line numbe rof the call site
  651. *
  652. * Return: pointer of allocated memory or null if memory alloc fails
  653. */
  654. void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
  655. qdf_size_t size, qdf_dma_addr_t *paddr,
  656. const char *func, uint32_t line);
  657. /**
  658. * __qdf_mem_malloc() - allocates QDF memory
  659. * @size: Number of bytes of memory to allocate.
  660. *
  661. * @func: Function name of the call site
  662. * @line: line numbe rof the call site
  663. *
  664. * This function will dynamicallly allocate the specified number of bytes of
  665. * memory.
  666. *
  667. * Return:
  668. * Upon successful allocate, returns a non-NULL pointer to the allocated
  669. * memory. If this function is unable to allocate the amount of memory
  670. * specified (for any reason) it returns NULL.
  671. */
  672. void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
  673. /**
  674. * __qdf_mem_free() - free QDF memory
  675. * @ptr: Pointer to the starting address of the memory to be freed.
  676. *
  677. * This function will free the memory pointed to by 'ptr'.
  678. * Return: None
  679. */
  680. void __qdf_mem_free(void *ptr);
  681. /**
  682. * __qdf_mem_valloc() - QDF virtual memory allocation API
  683. * @size: Number of bytes of virtual memory to allocate.
  684. * @func: Caller function name
  685. * @line: Line number
  686. *
  687. * Return: A valid memory location on success, or NULL on failure
  688. */
  689. void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
  690. /**
  691. * __qdf_mem_vfree() - QDF API to free virtual memory
  692. * @ptr: Pointer to the virtual memory to free
  693. *
  694. * Return: None
  695. */
  696. void __qdf_mem_vfree(void *ptr);
  697. /**
  698. * __qdf_mem_virt_to_phys() - Convert virtual address to physical
  699. * @vaddr: virtual address
  700. *
  701. * Return: physical address
  702. */
  703. #define __qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
  704. #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
  705. /**
  706. * __qdf_untracked_mem_malloc() - allocates non-QDF memory
  707. * @size: Number of bytes of memory to allocate.
  708. * @func: Function name of the call site
  709. * @line: line number of the call site
  710. *
  711. * This function will dynamically allocate the specified number of bytes of
  712. * memory. Memory allocated is not tracked by qdf memory debug framework.
  713. *
  714. * Return:
  715. * Upon successful allocation, returns a non-NULL pointer to the allocated
  716. * memory. If this function is unable to allocate the amount of memory
  717. * specified (for any reason) it returns NULL.
  718. */
  719. void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
  720. uint32_t line);
  721. /**
  722. * __qdf_untracked_mem_free() - free non-QDF memory
  723. * @ptr: Pointer to the starting address of the memory to be freed.
  724. *
  725. * This function will free the memory pointed to by 'ptr'.
  726. * Return: None
  727. */
  728. void __qdf_untracked_mem_free(void *ptr);
  729. #endif
  730. /**
  731. * __qdf_mem_free_consistent() - free consistent qdf memory
  732. * @osdev: OS device handle
  733. * @dev: Pointer to device handle
  734. * @size: Size to be allocated
  735. * @vaddr: virtual address
  736. * @paddr: Physical address
  737. * @memctx: Pointer to DMA context
  738. *
  739. * Return: none
  740. */
  741. void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
  742. qdf_size_t size, void *vaddr,
  743. qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
  744. #endif /* __I_QDF_MEM_H */