hal_api.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. /*
  2. * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are
  6. * met:
  7. * * Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * * Redistributions in binary form must reproduce the above
  10. * copyright notice, this list of conditions and the following
  11. * disclaimer in the documentation and/or other materials provided
  12. * with the distribution.
  13. * * Neither the name of The Linux Foundation nor the names of its
  14. * contributors may be used to endorse or promote products derived
  15. * from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
  21. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  24. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  25. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  26. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  27. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. #ifndef _HAL_API_H_
  30. #define _HAL_API_H_
  31. #include "qdf_types.h"
  32. #include "hal_internal.h"
  33. #include "hif_io32.h"
  34. #include "rx_msdu_link.h"
  35. #include "rx_reo_queue.h"
  36. #include "rx_reo_queue_ext.h"
  37. /**
  38. * hal_attach - Initalize HAL layer
  39. * @hif_handle: Opaque HIF handle
  40. * @qdf_dev: QDF device
  41. *
  42. * Return: Opaque HAL SOC handle
  43. * NULL on failure (if given ring is not available)
  44. *
  45. * This function should be called as part of HIF initialization (for accessing
  46. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  47. */
  48. extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
  49. /**
  50. * hal_detach - Detach HAL layer
  51. * @hal_soc: HAL SOC handle
  52. *
  53. * This function should be called as part of HIF detach
  54. *
  55. */
  56. extern void hal_detach(void *hal_soc);
  57. /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
  58. enum hal_ring_type {
  59. REO_DST,
  60. REO_EXCEPTION,
  61. REO_REINJECT,
  62. REO_CMD,
  63. REO_STATUS,
  64. TCL_DATA,
  65. TCL_CMD,
  66. TCL_STATUS,
  67. CE_SRC,
  68. CE_DST,
  69. CE_DST_STATUS,
  70. WBM_IDLE_LINK,
  71. SW2WBM_RELEASE,
  72. WBM2SW_RELEASE,
  73. RXDMA_BUF,
  74. RXDMA_DST,
  75. RXDMA_MONITOR_BUF,
  76. RXDMA_MONITOR_STATUS,
  77. RXDMA_MONITOR_DST,
  78. MAX_RING_TYPES
  79. };
  80. /* SRNG flags passed in hal_srng_params.flags */
  81. #define HAL_SRNG_MSI_SWAP 0x00000008
  82. #define HAL_SRNG_RING_PTR_SWAP 0x00000010
  83. #define HAL_SRNG_DATA_TLV_SWAP 0x00000020
  84. #define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
  85. #define HAL_SRNG_MSI_INTR 0x00020000
  86. /**
  87. * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
  88. * used by callers for calculating the size of memory to be allocated before
  89. * calling hal_srng_setup to setup the ring
  90. *
  91. * @hal_soc: Opaque HAL SOC handle
  92. * @ring_type: one of the types from hal_ring_type
  93. *
  94. */
  95. extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
  96. /* SRNG parameters to be passed to hal_srng_setup */
  97. struct hal_srng_params {
  98. /* Physical base address of the ring */
  99. qdf_dma_addr_t ring_base_paddr;
  100. /* Virtual base address of the ring */
  101. void *ring_base_vaddr;
  102. /* Number of entries in ring */
  103. uint32_t num_entries;
  104. /* max transfer length */
  105. uint16_t max_buffer_length;
  106. /* MSI Address */
  107. qdf_dma_addr_t msi_addr;
  108. /* MSI data */
  109. uint32_t msi_data;
  110. /* Interrupt timer threshold – in micro seconds */
  111. uint32_t intr_timer_thres_us;
  112. /* Interrupt batch counter threshold – in number of ring entries */
  113. uint32_t intr_batch_cntr_thres_entries;
  114. /* Low threshold – in number of ring entries
  115. * (valid for src rings only)
  116. */
  117. uint32_t low_threshold;
  118. /* Misc flags */
  119. uint32_t flags;
  120. /* Unique ring id */
  121. uint8_t ring_id;
  122. };
  123. /**
  124. * hal_srng_setup - Initalize HW SRNG ring.
  125. *
  126. * @hal_soc: Opaque HAL SOC handle
  127. * @ring_type: one of the types from hal_ring_type
  128. * @ring_num: Ring number if there are multiple rings of
  129. * same type (staring from 0)
  130. * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
  131. * @ring_params: SRNG ring params in hal_srng_params structure.
  132. * Callers are expected to allocate contiguous ring memory of size
  133. * 'num_entries * entry_size' bytes and pass the physical and virtual base
  134. * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
  135. * structure. Ring base address should be 8 byte aligned and size of each ring
  136. * entry should be queried using the API hal_srng_get_entrysize
  137. *
  138. * Return: Opaque pointer to ring on success
  139. * NULL on failure (if given ring is not available)
  140. */
  141. extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  142. int mac_id, struct hal_srng_params *ring_params);
  143. /**
  144. * hal_srng_cleanup - Deinitialize HW SRNG ring.
  145. * @hal_soc: Opaque HAL SOC handle
  146. * @hal_srng: Opaque HAL SRNG pointer
  147. */
  148. extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
  149. /**
  150. * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
  151. * hal_srng_access_start if locked access is required
  152. *
  153. * @hal_soc: Opaque HAL SOC handle
  154. * @hal_ring: Ring pointer (Source or Destination ring)
  155. *
  156. * Return: 0 on success; error on failire
  157. */
  158. static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
  159. {
  160. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  161. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  162. srng->u.src_ring.cached_tp =
  163. *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
  164. else
  165. srng->u.dst_ring.cached_hp =
  166. *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
  167. return 0;
  168. }
  169. /**
  170. * hal_srng_access_start - Start (locked) ring access
  171. *
  172. * @hal_soc: Opaque HAL SOC handle
  173. * @hal_ring: Ring pointer (Source or Destination ring)
  174. *
  175. * Return: 0 on success; error on failire
  176. */
  177. static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
  178. {
  179. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  180. SRNG_LOCK(&(srng->lock));
  181. return hal_srng_access_start_unlocked(hal_soc, hal_ring);
  182. }
  183. /**
  184. * hal_srng_dst_get_next - Get next entry from a destination ring and move
  185. * cached tail pointer
  186. *
  187. * @hal_soc: Opaque HAL SOC handle
  188. * @hal_ring: Destination ring pointer
  189. *
  190. * Return: Opaque pointer for next ring entry; NULL on failire
  191. */
  192. static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
  193. {
  194. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  195. uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
  196. uint32_t desc_loop_cnt;
  197. desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
  198. >> SRNG_LOOP_CNT_LSB;
  199. if (srng->u.dst_ring.loop_cnt == desc_loop_cnt) {
  200. srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) &
  201. srng->ring_size_mask;
  202. srng->u.dst_ring.loop_cnt = (srng->u.dst_ring.loop_cnt +
  203. !srng->u.dst_ring.tp) &
  204. (SRNG_LOOP_CNT_MASK >> SRNG_LOOP_CNT_LSB);
  205. /* TODO: Confirm if loop count mask is same for all rings */
  206. return (void *)desc;
  207. }
  208. return NULL;
  209. }
  210. /**
  211. * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
  212. * hal_srng_dst_get_next should be called subsequently to move the tail pointer
  213. * TODO: See if we need an optimized version of get_next that doesn't check for
  214. * loop_cnt
  215. *
  216. * @hal_soc: Opaque HAL SOC handle
  217. * @hal_ring: Destination ring pointer
  218. *
  219. * Return: Opaque pointer for next ring entry; NULL on failire
  220. */
  221. static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
  222. {
  223. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  224. uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
  225. uint32_t desc_loop_cnt;
  226. desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
  227. >> SRNG_LOOP_CNT_LSB;
  228. if (srng->u.dst_ring.loop_cnt == desc_loop_cnt)
  229. return (void *)desc;
  230. return NULL;
  231. }
  232. /**
  233. * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
  234. * by SW) in destination ring
  235. *
  236. * @hal_soc: Opaque HAL SOC handle
  237. * @hal_ring: Destination ring pointer
  238. * @sync_hw_ptr: Sync cached head pointer with HW
  239. *
  240. */
  241. static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
  242. int sync_hw_ptr)
  243. {
  244. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  245. uint32 hp;
  246. uint32 tp = srng->u.dst_ring.tp;
  247. if (sync_hw_ptr) {
  248. hp = *(srng->u.dst_ring.hp_addr);
  249. srng->u.dst_ring.cached_hp = hp;
  250. } else {
  251. hp = srng->u.dst_ring.cached_hp;
  252. }
  253. if (hp >= tp)
  254. return (hp - tp) / srng->entry_size;
  255. else
  256. return (srng->ring_size - tp + hp) / srng->entry_size;
  257. }
  258. /**
  259. * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
  260. * pointer. This can be used to release any buffers associated with completed
  261. * ring entries. Note that this should not be used for posting new descriptor
  262. * entries. Posting of new entries should be done only using
  263. * hal_srng_src_get_next_reaped when this function is used for reaping.
  264. *
  265. * @hal_soc: Opaque HAL SOC handle
  266. * @hal_ring: Source ring pointer
  267. *
  268. * Return: Opaque pointer for next ring entry; NULL on failire
  269. */
  270. static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
  271. {
  272. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  273. uint32_t *desc;
  274. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) &
  275. srng->ring_size_mask;
  276. if (next_reap_hp != srng->u.src_ring.cached_tp) {
  277. desc = &(srng->ring_base_vaddr[next_reap_hp]);
  278. srng->u.src_ring.reap_hp = next_reap_hp;
  279. return (void *)desc;
  280. }
  281. return NULL;
  282. }
  283. /**
  284. * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
  285. * already reaped using hal_srng_src_reap_next, for posting new entries to
  286. * the ring
  287. *
  288. * @hal_soc: Opaque HAL SOC handle
  289. * @hal_ring: Source ring pointer
  290. *
  291. * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
  292. */
  293. static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
  294. {
  295. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  296. uint32_t *desc;
  297. if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
  298. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  299. srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) &
  300. srng->ring_size_mask;
  301. return (void *)desc;
  302. }
  303. return NULL;
  304. }
  305. /**
  306. * hal_srng_src_done_val -
  307. *
  308. * @hal_soc: Opaque HAL SOC handle
  309. * @hal_ring: Source ring pointer
  310. *
  311. * Return: Opaque pointer for next ring entry; NULL on failire
  312. */
  313. static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
  314. {
  315. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  316. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) &
  317. srng->ring_size_mask;
  318. if (next_reap_hp == srng->u.src_ring.cached_tp)
  319. return 0;
  320. if (srng->u.src_ring.cached_tp > next_reap_hp)
  321. return (srng->u.src_ring.cached_tp - next_reap_hp) /
  322. srng->entry_size;
  323. else
  324. return ((srng->ring_size - next_reap_hp) +
  325. srng->u.src_ring.cached_tp) / srng->entry_size;
  326. }
  327. /**
  328. * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
  329. *
  330. * @hal_soc: Opaque HAL SOC handle
  331. * @hal_ring: Source ring pointer
  332. *
  333. * Return: Opaque pointer for next ring entry; NULL on failire
  334. */
  335. static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
  336. {
  337. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  338. uint32_t *desc;
  339. uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) &
  340. srng->ring_size_mask;
  341. if (next_hp != srng->u.src_ring.cached_tp) {
  342. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  343. srng->u.src_ring.hp = next_hp;
  344. /* TODO: Since reap function is not used by all rings, we can
  345. * remove the following update of reap_hp in this function
  346. * if we can ensure that only hal_srng_src_get_next_reaped
  347. * is used for the rings requiring reap functionality
  348. */
  349. srng->u.src_ring.reap_hp = next_hp;
  350. return (void *)desc;
  351. }
  352. return NULL;
  353. }
  354. /**
  355. * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
  356. * hal_srng_src_get_next should be called subsequently to move the head pointer
  357. *
  358. * @hal_soc: Opaque HAL SOC handle
  359. * @hal_ring: Source ring pointer
  360. *
  361. * Return: Opaque pointer for next ring entry; NULL on failire
  362. */
  363. static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
  364. {
  365. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  366. uint32_t *desc;
  367. if (((srng->u.src_ring.hp + srng->entry_size) &
  368. srng->ring_size_mask) != srng->u.src_ring.cached_tp) {
  369. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  370. return (void *)desc;
  371. }
  372. return NULL;
  373. }
  374. /**
  375. * hal_srng_src_num_avail - Returns number of available entries in src ring
  376. *
  377. * @hal_soc: Opaque HAL SOC handle
  378. * @hal_ring: Source ring pointer
  379. * @sync_hw_ptr: Sync cached tail pointer with HW
  380. *
  381. */
  382. static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
  383. void *hal_ring, int sync_hw_ptr)
  384. {
  385. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  386. uint32 tp;
  387. uint32 hp = srng->u.src_ring.hp;
  388. if (sync_hw_ptr) {
  389. tp = *(srng->u.src_ring.tp_addr);
  390. srng->u.src_ring.cached_tp = tp;
  391. } else {
  392. tp = srng->u.src_ring.cached_tp;
  393. }
  394. if (tp > hp)
  395. return ((tp - hp) / srng->entry_size) - 1;
  396. else
  397. return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
  398. }
  399. /**
  400. * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
  401. * ring head/tail pointers to HW.
  402. * This should be used only if hal_srng_access_start_unlocked to start ring
  403. * access
  404. *
  405. * @hal_soc: Opaque HAL SOC handle
  406. * @hal_ring: Ring pointer (Source or Destination ring)
  407. *
  408. * Return: 0 on success; error on failire
  409. */
  410. static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
  411. {
  412. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  413. /* TODO: See if we need a write memory barrier here */
  414. if (srng->flags & HAL_SRNG_LMAC_RING) {
  415. /* For LMAC rings, ring pointer updates are done through FW and
  416. * hence written to a shared memory location that is read by FW
  417. */
  418. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  419. *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
  420. else
  421. *(srng->u.src_ring.tp_addr) = srng->u.dst_ring.tp;
  422. } else {
  423. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  424. hif_write32_mb(srng->u.src_ring.hp_addr,
  425. srng->u.src_ring.hp);
  426. else
  427. hif_write32_mb(srng->u.dst_ring.tp_addr,
  428. srng->u.dst_ring.tp);
  429. }
  430. }
  431. /**
  432. * hal_srng_access_end - Unlock ring access and update cached ring head/tail
  433. * pointers to HW
  434. * This should be used only if hal_srng_access_start to start ring access
  435. *
  436. * @hal_soc: Opaque HAL SOC handle
  437. * @hal_ring: Ring pointer (Source or Destination ring)
  438. *
  439. * Return: 0 on success; error on failire
  440. */
  441. static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
  442. {
  443. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  444. hal_srng_access_end_unlocked(hal_soc, hal_ring);
  445. SRNG_UNLOCK(&(srng->lock));
  446. }
  447. /**
  448. * hal_srng_access_end_reap - Unlock ring access
  449. * This should be used only if hal_srng_access_start to start ring access
  450. * and should be used only while reaping SRC ring completions
  451. *
  452. * @hal_soc: Opaque HAL SOC handle
  453. * @hal_ring: Ring pointer (Source or Destination ring)
  454. *
  455. * Return: 0 on success; error on failire
  456. */
  457. static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
  458. {
  459. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  460. SRNG_UNLOCK(&(srng->lock));
  461. }
  462. /* TODO: Check if the following definitions is available in HW headers */
  463. #define WBM_IDLE_DESC_LIST 1
  464. #define WBM_IDLE_SCATTER_BUF_SIZE 32704
  465. #define NUM_MPDUS_PER_LINK_DESC 6
  466. #define NUM_MSDUS_PER_LINK_DESC 7
  467. #define REO_QUEUE_DESC_ALIGN 128
  468. #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
  469. #define LINK_DESC_ALIGN 128
  470. /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
  471. * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
  472. */
  473. #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
  474. /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
  475. * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
  476. * should be specified in 16 word units. But the number of bits defined for
  477. * this field in HW header files is 5.
  478. */
  479. #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
  480. /**
  481. * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
  482. * HW structure
  483. *
  484. * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
  485. * @cookie: SW cookie for the buffer/descriptor
  486. * @link_desc_paddr: Physical address of link descriptor entry
  487. *
  488. */
  489. static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
  490. qdf_dma_addr_t link_desc_paddr)
  491. {
  492. uint32_t *buf_addr = (uint32_t *)desc;
  493. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
  494. link_desc_paddr & 0xffffffff);
  495. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
  496. (uint64_t)link_desc_paddr >> 32);
  497. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
  498. WBM_IDLE_DESC_LIST);
  499. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
  500. cookie);
  501. }
  502. /**
  503. * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
  504. * in an idle list
  505. *
  506. * @hal_soc: Opaque HAL SOC handle
  507. *
  508. */
  509. static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
  510. {
  511. return WBM_IDLE_SCATTER_BUF_SIZE;
  512. }
  513. /**
  514. * hal_get_link_desc_size - Get the size of each link descriptor
  515. *
  516. * @hal_soc: Opaque HAL SOC handle
  517. *
  518. */
  519. static inline uint32_t hal_get_link_desc_size(void *hal_soc)
  520. {
  521. return LINK_DESC_SIZE;
  522. }
  523. /**
  524. * hal_get_link_desc_align - Get the required start address alignment for
  525. * link descriptors
  526. *
  527. * @hal_soc: Opaque HAL SOC handle
  528. *
  529. */
  530. static inline uint32_t hal_get_link_desc_align(void *hal_soc)
  531. {
  532. return LINK_DESC_ALIGN;
  533. }
  534. /**
  535. * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
  536. *
  537. * @hal_soc: Opaque HAL SOC handle
  538. *
  539. */
  540. static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
  541. {
  542. return NUM_MPDUS_PER_LINK_DESC;
  543. }
  544. /**
  545. * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
  546. *
  547. * @hal_soc: Opaque HAL SOC handle
  548. *
  549. */
  550. static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
  551. {
  552. return NUM_MSDUS_PER_LINK_DESC;
  553. }
  554. /**
  555. * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
  556. * descriptor can hold
  557. *
  558. * @hal_soc: Opaque HAL SOC handle
  559. *
  560. */
  561. static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
  562. {
  563. return NUM_MPDU_LINKS_PER_QUEUE_DESC;
  564. }
  565. /**
  566. * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
  567. * that the given buffer size
  568. *
  569. * @hal_soc: Opaque HAL SOC handle
  570. * @scatter_buf_size: Size of scatter buffer
  571. *
  572. */
  573. static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
  574. uint32_t scatter_buf_size)
  575. {
  576. return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
  577. hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
  578. }
  579. /**
  580. * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
  581. * provided
  582. *
  583. * @hal_soc: Opaque HAL SOC handle
  584. * @idle_scatter_bufs_base_paddr: Array of physical base addresses
  585. * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
  586. * @num_scatter_bufs: Number of scatter buffers in the above lists
  587. * @scatter_buf_size: Size of each scatter buffer
  588. *
  589. */
  590. extern void hal_setup_link_idle_list(void *hal_soc,
  591. qdf_dma_addr_t scatter_bufs_base_paddr[],
  592. void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
  593. uint32_t scatter_buf_size, uint32_t last_buf_end_offset);
  594. /**
  595. * hal_reo_setup - Initialize HW REO block
  596. *
  597. * @hal_soc: Opaque HAL SOC handle
  598. */
  599. extern void hal_reo_setup(void *hal_soc);
  600. enum hal_pn_type {
  601. HAL_PN_NONE,
  602. HAL_PN_WPA,
  603. HAL_PN_WAPI_EVEN,
  604. HAL_PN_WAPI_UNEVEN,
  605. };
  606. #define HAL_RX_MAX_BA_WINDOW 256
  607. /**
  608. * hal_get_reo_qdesc_size - Get size of reo queue descriptor
  609. *
  610. * @hal_soc: Opaque HAL SOC handle
  611. * @ba_window_size: BlockAck window size
  612. *
  613. */
  614. static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
  615. uint32_t ba_window_size)
  616. {
  617. if (ba_window_size <= 1)
  618. return sizeof(struct rx_reo_queue);
  619. if (ba_window_size <= 105)
  620. return sizeof(struct rx_reo_queue) +
  621. sizeof(struct rx_reo_queue_ext);
  622. if (ba_window_size <= 210)
  623. return sizeof(struct rx_reo_queue) +
  624. (2 * sizeof(struct rx_reo_queue_ext));
  625. return sizeof(struct rx_reo_queue) +
  626. (3 * sizeof(struct rx_reo_queue_ext));
  627. }
  628. /**
  629. * hal_get_reo_qdesc_align - Get start address alignment for reo
  630. * queue descriptors
  631. *
  632. * @hal_soc: Opaque HAL SOC handle
  633. *
  634. */
  635. static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
  636. {
  637. return REO_QUEUE_DESC_ALIGN;
  638. }
  639. /**
  640. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  641. *
  642. * @hal_soc: Opaque HAL SOC handle
  643. * @ba_window_size: BlockAck window size
  644. * @start_seq: Starting sequence number
  645. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  646. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  647. * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
  648. *
  649. */
  650. extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
  651. uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
  652. int pn_type);
  653. /**
  654. * hal_srng_get_hp_addr - Get head pointer physical address
  655. *
  656. * @hal_soc: Opaque HAL SOC handle
  657. * @hal_ring: Ring pointer (Source or Destination ring)
  658. *
  659. */
  660. static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
  661. {
  662. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  663. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  664. if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
  665. /* Currently this interface is required only for LMAC rings */
  666. return (qdf_dma_addr_t)NULL;
  667. }
  668. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  669. return hal->shadow_wrptr_mem_paddr + (srng->u.src_ring.hp_addr -
  670. hal->shadow_wrptr_mem_vaddr);
  671. } else {
  672. return hal->shadow_rdptr_mem_paddr + (srng->u.dst_ring.hp_addr -
  673. hal->shadow_rdptr_mem_vaddr);
  674. }
  675. }
  676. /**
  677. * hal_srng_get_tp_addr - Get tail pointer physical address
  678. *
  679. * @hal_soc: Opaque HAL SOC handle
  680. * @hal_ring: Ring pointer (Source or Destination ring)
  681. *
  682. */
  683. static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
  684. {
  685. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  686. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  687. if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
  688. /* Currently this interface is required only for LMAC rings */
  689. return (qdf_dma_addr_t)NULL;
  690. }
  691. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  692. return hal->shadow_rdptr_mem_paddr +
  693. ((unsigned long)(srng->u.src_ring.tp_addr) -
  694. (unsigned long)(hal->shadow_rdptr_mem_vaddr));
  695. } else {
  696. return hal->shadow_wrptr_mem_paddr +
  697. ((unsigned long)(srng->u.dst_ring.tp_addr) -
  698. (unsigned long)(hal->shadow_wrptr_mem_vaddr));
  699. }
  700. }
  701. /**
  702. * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
  703. *
  704. * @hal_soc: Opaque HAL SOC handle
  705. * @hal_ring: Ring pointer (Source or Destination ring)
  706. * @ring_params: SRNG parameters will be returned through this structure
  707. */
  708. extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
  709. struct hal_srng_params *ring_params);
  710. #endif /* _HAL_API_H_ */