hal_api.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions are
  6. * met:
  7. * * Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * * Redistributions in binary form must reproduce the above
  10. * copyright notice, this list of conditions and the following
  11. * disclaimer in the documentation and/or other materials provided
  12. * with the distribution.
  13. * * Neither the name of The Linux Foundation nor the names of its
  14. * contributors may be used to endorse or promote products derived
  15. * from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
  18. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
  21. * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  22. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  23. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  24. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  25. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  26. * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  27. * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. */
  29. #ifndef _HAL_API_H_
  30. #define _HAL_API_H_
  31. #include "qdf_types.h"
  32. #include "qdf_util.h"
  33. #include "hal_internal.h"
  34. #include "rx_msdu_link.h"
  35. #include "rx_reo_queue.h"
  36. #include "rx_reo_queue_ext.h"
  37. #define MAX_UNWINDOWED_ADDRESS 0x80000
  38. #define WINDOW_ENABLE_BIT 0x80000000
  39. #define WINDOW_REG_ADDRESS 0x310C
  40. #define WINDOW_SHIFT 19
  41. #define WINDOW_VALUE_MASK 0x1F
  42. #define WINDOW_START MAX_UNWINDOWED_ADDRESS
  43. #define WINDOW_RANGE_MASK 0x7FFFF
  44. static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
  45. {
  46. uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
  47. if (window != hal_soc->register_window) {
  48. qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
  49. WINDOW_ENABLE_BIT | window);
  50. hal_soc->register_window = window;
  51. }
  52. }
  53. /**
  54. * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
  55. * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
  56. * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
  57. * would be a bug
  58. */
  59. static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
  60. uint32_t value)
  61. {
  62. if (!hal_soc->use_register_windowing ||
  63. offset < MAX_UNWINDOWED_ADDRESS) {
  64. qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
  65. } else {
  66. qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
  67. hal_select_window(hal_soc, offset);
  68. qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
  69. (offset & WINDOW_RANGE_MASK), value);
  70. qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
  71. }
  72. }
  73. /**
  74. * hal_write_address_32_mb - write a value to a register
  75. *
  76. */
  77. static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
  78. void __iomem *addr, uint32_t value)
  79. {
  80. uint32_t offset;
  81. if (!hal_soc->use_register_windowing)
  82. return qdf_iowrite32(addr, value);
  83. offset = addr - hal_soc->dev_base_addr;
  84. hal_write32_mb(hal_soc, offset, value);
  85. }
  86. static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
  87. {
  88. uint32_t ret;
  89. if (!hal_soc->use_register_windowing ||
  90. offset < MAX_UNWINDOWED_ADDRESS) {
  91. return qdf_ioread32(hal_soc->dev_base_addr + offset);
  92. }
  93. qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
  94. hal_select_window(hal_soc, offset);
  95. ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
  96. (offset & WINDOW_RANGE_MASK));
  97. qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
  98. return ret;
  99. }
  100. #include "hif_io32.h"
  101. /**
  102. * hal_attach - Initalize HAL layer
  103. * @hif_handle: Opaque HIF handle
  104. * @qdf_dev: QDF device
  105. *
  106. * Return: Opaque HAL SOC handle
  107. * NULL on failure (if given ring is not available)
  108. *
  109. * This function should be called as part of HIF initialization (for accessing
  110. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  111. */
  112. extern void *hal_attach(void *hif_handle, qdf_device_t qdf_dev);
  113. /**
  114. * hal_detach - Detach HAL layer
  115. * @hal_soc: HAL SOC handle
  116. *
  117. * This function should be called as part of HIF detach
  118. *
  119. */
  120. extern void hal_detach(void *hal_soc);
  121. /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
  122. enum hal_ring_type {
  123. REO_DST,
  124. REO_EXCEPTION,
  125. REO_REINJECT,
  126. REO_CMD,
  127. REO_STATUS,
  128. TCL_DATA,
  129. TCL_CMD,
  130. TCL_STATUS,
  131. CE_SRC,
  132. CE_DST,
  133. CE_DST_STATUS,
  134. WBM_IDLE_LINK,
  135. SW2WBM_RELEASE,
  136. WBM2SW_RELEASE,
  137. RXDMA_BUF,
  138. RXDMA_DST,
  139. RXDMA_MONITOR_BUF,
  140. RXDMA_MONITOR_STATUS,
  141. RXDMA_MONITOR_DST,
  142. RXDMA_MONITOR_DESC,
  143. MAX_RING_TYPES
  144. };
  145. /* SRNG flags passed in hal_srng_params.flags */
  146. #define HAL_SRNG_MSI_SWAP 0x00000008
  147. #define HAL_SRNG_RING_PTR_SWAP 0x00000010
  148. #define HAL_SRNG_DATA_TLV_SWAP 0x00000020
  149. #define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
  150. #define HAL_SRNG_MSI_INTR 0x00020000
  151. /**
  152. * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
  153. * used by callers for calculating the size of memory to be allocated before
  154. * calling hal_srng_setup to setup the ring
  155. *
  156. * @hal_soc: Opaque HAL SOC handle
  157. * @ring_type: one of the types from hal_ring_type
  158. *
  159. */
  160. extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
  161. /**
  162. * hal_srng_max_entries - Returns maximum possible number of ring entries
  163. * @hal_soc: Opaque HAL SOC handle
  164. * @ring_type: one of the types from hal_ring_type
  165. *
  166. * Return: Maximum number of entries for the given ring_type
  167. */
  168. uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
  169. /* HAL memory information */
  170. struct hal_mem_info {
  171. /* dev base virutal addr */
  172. void *dev_base_addr;
  173. /* dev base physical addr */
  174. void *dev_base_paddr;
  175. /* Remote virtual pointer memory for HW/FW updates */
  176. void *shadow_rdptr_mem_vaddr;
  177. /* Remote physical pointer memory for HW/FW updates */
  178. void *shadow_rdptr_mem_paddr;
  179. /* Shared memory for ring pointer updates from host to FW */
  180. void *shadow_wrptr_mem_vaddr;
  181. /* Shared physical memory for ring pointer updates from host to FW */
  182. void *shadow_wrptr_mem_paddr;
  183. };
  184. /* SRNG parameters to be passed to hal_srng_setup */
  185. struct hal_srng_params {
  186. /* Physical base address of the ring */
  187. qdf_dma_addr_t ring_base_paddr;
  188. /* Virtual base address of the ring */
  189. void *ring_base_vaddr;
  190. /* Number of entries in ring */
  191. uint32_t num_entries;
  192. /* max transfer length */
  193. uint16_t max_buffer_length;
  194. /* MSI Address */
  195. qdf_dma_addr_t msi_addr;
  196. /* MSI data */
  197. uint32_t msi_data;
  198. /* Interrupt timer threshold – in micro seconds */
  199. uint32_t intr_timer_thres_us;
  200. /* Interrupt batch counter threshold – in number of ring entries */
  201. uint32_t intr_batch_cntr_thres_entries;
  202. /* Low threshold – in number of ring entries
  203. * (valid for src rings only)
  204. */
  205. uint32_t low_threshold;
  206. /* Misc flags */
  207. uint32_t flags;
  208. /* Unique ring id */
  209. uint8_t ring_id;
  210. /* Source or Destination ring */
  211. enum hal_srng_dir ring_dir;
  212. /* Size of ring entry */
  213. uint32_t entry_size;
  214. /* hw register base address */
  215. void *hwreg_base[MAX_SRNG_REG_GROUPS];
  216. };
  217. /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
  218. * @hal_soc: hal handle
  219. *
  220. * Return: QDF_STATUS_OK on success
  221. */
  222. extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
  223. /* hal_set_one_shadow_config() - add a config for the specified ring
  224. * @hal_soc: hal handle
  225. * @ring_type: ring type
  226. * @ring_num: ring num
  227. *
  228. * The ring type and ring num uniquely specify the ring. After this call,
  229. * the hp/tp will be added as the next entry int the shadow register
  230. * configuration table. The hal code will use the shadow register address
  231. * in place of the hp/tp address.
  232. *
  233. * This function is exposed, so that the CE module can skip configuring shadow
  234. * registers for unused ring and rings assigned to the firmware.
  235. *
  236. * Return: QDF_STATUS_OK on success
  237. */
  238. extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
  239. int ring_num);
  240. /**
  241. * hal_get_shadow_config() - retrieve the config table
  242. * @hal_soc: hal handle
  243. * @shadow_config: will point to the table after
  244. * @num_shadow_registers_configured: will contain the number of valid entries
  245. */
  246. extern void hal_get_shadow_config(void *hal_soc,
  247. struct pld_shadow_reg_v2_cfg **shadow_config,
  248. int *num_shadow_registers_configured);
  249. /**
  250. * hal_srng_setup - Initalize HW SRNG ring.
  251. *
  252. * @hal_soc: Opaque HAL SOC handle
  253. * @ring_type: one of the types from hal_ring_type
  254. * @ring_num: Ring number if there are multiple rings of
  255. * same type (staring from 0)
  256. * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
  257. * @ring_params: SRNG ring params in hal_srng_params structure.
  258. * Callers are expected to allocate contiguous ring memory of size
  259. * 'num_entries * entry_size' bytes and pass the physical and virtual base
  260. * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
  261. * structure. Ring base address should be 8 byte aligned and size of each ring
  262. * entry should be queried using the API hal_srng_get_entrysize
  263. *
  264. * Return: Opaque pointer to ring on success
  265. * NULL on failure (if given ring is not available)
  266. */
  267. extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  268. int mac_id, struct hal_srng_params *ring_params);
  269. /**
  270. * hal_srng_cleanup - Deinitialize HW SRNG ring.
  271. * @hal_soc: Opaque HAL SOC handle
  272. * @hal_srng: Opaque HAL SRNG pointer
  273. */
  274. extern void hal_srng_cleanup(void *hal_soc, void *hal_srng);
  275. /**
  276. * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
  277. * hal_srng_access_start if locked access is required
  278. *
  279. * @hal_soc: Opaque HAL SOC handle
  280. * @hal_ring: Ring pointer (Source or Destination ring)
  281. *
  282. * Return: 0 on success; error on failire
  283. */
  284. static inline int hal_srng_access_start_unlocked(void *hal_soc, void *hal_ring)
  285. {
  286. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  287. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  288. srng->u.src_ring.cached_tp =
  289. *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
  290. else
  291. srng->u.dst_ring.cached_hp =
  292. *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
  293. return 0;
  294. }
  295. /**
  296. * hal_srng_access_start - Start (locked) ring access
  297. *
  298. * @hal_soc: Opaque HAL SOC handle
  299. * @hal_ring: Ring pointer (Source or Destination ring)
  300. *
  301. * Return: 0 on success; error on failire
  302. */
  303. static inline int hal_srng_access_start(void *hal_soc, void *hal_ring)
  304. {
  305. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  306. SRNG_LOCK(&(srng->lock));
  307. return hal_srng_access_start_unlocked(hal_soc, hal_ring);
  308. }
  309. /**
  310. * hal_srng_dst_get_next - Get next entry from a destination ring and move
  311. * cached tail pointer
  312. *
  313. * @hal_soc: Opaque HAL SOC handle
  314. * @hal_ring: Destination ring pointer
  315. *
  316. * Return: Opaque pointer for next ring entry; NULL on failire
  317. */
  318. static inline void *hal_srng_dst_get_next(void *hal_soc, void *hal_ring)
  319. {
  320. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  321. volatile uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
  322. uint32_t desc_loop_cnt;
  323. desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
  324. >> SRNG_LOOP_CNT_LSB;
  325. if (srng->u.dst_ring.loop_cnt == desc_loop_cnt) {
  326. /* TODO: Using % is expensive, but we have to do this since
  327. * size of some SRNG rings is not power of 2 (due to descriptor
  328. * sizes). Need to create separate API for rings used
  329. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  330. * SW2RXDMA and CE rings)
  331. */
  332. srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
  333. srng->ring_size;
  334. srng->u.dst_ring.loop_cnt = (srng->u.dst_ring.loop_cnt +
  335. !srng->u.dst_ring.tp) &
  336. (SRNG_LOOP_CNT_MASK >> SRNG_LOOP_CNT_LSB);
  337. /* TODO: Confirm if loop count mask is same for all rings */
  338. return (void *)desc;
  339. }
  340. return NULL;
  341. }
  342. /**
  343. * hal_srng_dst_peek - Get next entry from a ring without moving tail pointer.
  344. * hal_srng_dst_get_next should be called subsequently to move the tail pointer
  345. * TODO: See if we need an optimized version of get_next that doesn't check for
  346. * loop_cnt
  347. *
  348. * @hal_soc: Opaque HAL SOC handle
  349. * @hal_ring: Destination ring pointer
  350. *
  351. * Return: Opaque pointer for next ring entry; NULL on failire
  352. */
  353. static inline void *hal_srng_dst_peek(void *hal_soc, void *hal_ring)
  354. {
  355. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  356. uint32_t *desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
  357. uint32_t desc_loop_cnt;
  358. desc_loop_cnt = (desc[srng->entry_size - 1] & SRNG_LOOP_CNT_MASK)
  359. >> SRNG_LOOP_CNT_LSB;
  360. if (srng->u.dst_ring.loop_cnt == desc_loop_cnt)
  361. return (void *)desc;
  362. return NULL;
  363. }
  364. /**
  365. * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
  366. * by SW) in destination ring
  367. *
  368. * @hal_soc: Opaque HAL SOC handle
  369. * @hal_ring: Destination ring pointer
  370. * @sync_hw_ptr: Sync cached head pointer with HW
  371. *
  372. */
  373. static inline uint32_t hal_srng_dst_num_valid(void *hal_soc, void *hal_ring,
  374. int sync_hw_ptr)
  375. {
  376. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  377. uint32 hp;
  378. uint32 tp = srng->u.dst_ring.tp;
  379. if (sync_hw_ptr) {
  380. hp = *(srng->u.dst_ring.hp_addr);
  381. srng->u.dst_ring.cached_hp = hp;
  382. } else {
  383. hp = srng->u.dst_ring.cached_hp;
  384. }
  385. if (hp >= tp)
  386. return (hp - tp) / srng->entry_size;
  387. else
  388. return (srng->ring_size - tp + hp) / srng->entry_size;
  389. }
  390. /**
  391. * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
  392. * pointer. This can be used to release any buffers associated with completed
  393. * ring entries. Note that this should not be used for posting new descriptor
  394. * entries. Posting of new entries should be done only using
  395. * hal_srng_src_get_next_reaped when this function is used for reaping.
  396. *
  397. * @hal_soc: Opaque HAL SOC handle
  398. * @hal_ring: Source ring pointer
  399. *
  400. * Return: Opaque pointer for next ring entry; NULL on failire
  401. */
  402. static inline void *hal_srng_src_reap_next(void *hal_soc, void *hal_ring)
  403. {
  404. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  405. uint32_t *desc;
  406. /* TODO: Using % is expensive, but we have to do this since
  407. * size of some SRNG rings is not power of 2 (due to descriptor
  408. * sizes). Need to create separate API for rings used
  409. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  410. * SW2RXDMA and CE rings)
  411. */
  412. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
  413. srng->ring_size;
  414. if (next_reap_hp != srng->u.src_ring.cached_tp) {
  415. desc = &(srng->ring_base_vaddr[next_reap_hp]);
  416. srng->u.src_ring.reap_hp = next_reap_hp;
  417. return (void *)desc;
  418. }
  419. return NULL;
  420. }
  421. /**
  422. * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
  423. * already reaped using hal_srng_src_reap_next, for posting new entries to
  424. * the ring
  425. *
  426. * @hal_soc: Opaque HAL SOC handle
  427. * @hal_ring: Source ring pointer
  428. *
  429. * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
  430. */
  431. static inline void *hal_srng_src_get_next_reaped(void *hal_soc, void *hal_ring)
  432. {
  433. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  434. uint32_t *desc;
  435. if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
  436. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  437. srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
  438. srng->ring_size;
  439. return (void *)desc;
  440. }
  441. return NULL;
  442. }
  443. /**
  444. * hal_srng_src_done_val -
  445. *
  446. * @hal_soc: Opaque HAL SOC handle
  447. * @hal_ring: Source ring pointer
  448. *
  449. * Return: Opaque pointer for next ring entry; NULL on failire
  450. */
  451. static inline uint32_t hal_srng_src_done_val(void *hal_soc, void *hal_ring)
  452. {
  453. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  454. /* TODO: Using % is expensive, but we have to do this since
  455. * size of some SRNG rings is not power of 2 (due to descriptor
  456. * sizes). Need to create separate API for rings used
  457. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  458. * SW2RXDMA and CE rings)
  459. */
  460. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
  461. srng->ring_size;
  462. if (next_reap_hp == srng->u.src_ring.cached_tp)
  463. return 0;
  464. if (srng->u.src_ring.cached_tp > next_reap_hp)
  465. return (srng->u.src_ring.cached_tp - next_reap_hp) /
  466. srng->entry_size;
  467. else
  468. return ((srng->ring_size - next_reap_hp) +
  469. srng->u.src_ring.cached_tp) / srng->entry_size;
  470. }
  471. /**
  472. * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
  473. *
  474. * @hal_soc: Opaque HAL SOC handle
  475. * @hal_ring: Source ring pointer
  476. *
  477. * Return: Opaque pointer for next ring entry; NULL on failire
  478. */
  479. static inline void *hal_srng_src_get_next(void *hal_soc, void *hal_ring)
  480. {
  481. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  482. uint32_t *desc;
  483. /* TODO: Using % is expensive, but we have to do this since
  484. * size of some SRNG rings is not power of 2 (due to descriptor
  485. * sizes). Need to create separate API for rings used
  486. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  487. * SW2RXDMA and CE rings)
  488. */
  489. uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
  490. srng->ring_size;
  491. if (next_hp != srng->u.src_ring.cached_tp) {
  492. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  493. srng->u.src_ring.hp = next_hp;
  494. /* TODO: Since reap function is not used by all rings, we can
  495. * remove the following update of reap_hp in this function
  496. * if we can ensure that only hal_srng_src_get_next_reaped
  497. * is used for the rings requiring reap functionality
  498. */
  499. srng->u.src_ring.reap_hp = next_hp;
  500. return (void *)desc;
  501. }
  502. return NULL;
  503. }
  504. /**
  505. * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
  506. * hal_srng_src_get_next should be called subsequently to move the head pointer
  507. *
  508. * @hal_soc: Opaque HAL SOC handle
  509. * @hal_ring: Source ring pointer
  510. *
  511. * Return: Opaque pointer for next ring entry; NULL on failire
  512. */
  513. static inline void *hal_srng_src_peek(void *hal_soc, void *hal_ring)
  514. {
  515. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  516. uint32_t *desc;
  517. /* TODO: Using % is expensive, but we have to do this since
  518. * size of some SRNG rings is not power of 2 (due to descriptor
  519. * sizes). Need to create separate API for rings used
  520. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  521. * SW2RXDMA and CE rings)
  522. */
  523. if (((srng->u.src_ring.hp + srng->entry_size) %
  524. srng->ring_size) != srng->u.src_ring.cached_tp) {
  525. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  526. return (void *)desc;
  527. }
  528. return NULL;
  529. }
  530. /**
  531. * hal_srng_src_num_avail - Returns number of available entries in src ring
  532. *
  533. * @hal_soc: Opaque HAL SOC handle
  534. * @hal_ring: Source ring pointer
  535. * @sync_hw_ptr: Sync cached tail pointer with HW
  536. *
  537. */
  538. static inline uint32_t hal_srng_src_num_avail(void *hal_soc,
  539. void *hal_ring, int sync_hw_ptr)
  540. {
  541. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  542. uint32 tp;
  543. uint32 hp = srng->u.src_ring.hp;
  544. if (sync_hw_ptr) {
  545. tp = *(srng->u.src_ring.tp_addr);
  546. srng->u.src_ring.cached_tp = tp;
  547. } else {
  548. tp = srng->u.src_ring.cached_tp;
  549. }
  550. if (tp > hp)
  551. return ((tp - hp) / srng->entry_size) - 1;
  552. else
  553. return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
  554. }
  555. /**
  556. * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
  557. * ring head/tail pointers to HW.
  558. * This should be used only if hal_srng_access_start_unlocked to start ring
  559. * access
  560. *
  561. * @hal_soc: Opaque HAL SOC handle
  562. * @hal_ring: Ring pointer (Source or Destination ring)
  563. *
  564. * Return: 0 on success; error on failire
  565. */
  566. static inline void hal_srng_access_end_unlocked(void *hal_soc, void *hal_ring)
  567. {
  568. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  569. /* TODO: See if we need a write memory barrier here */
  570. if (srng->flags & HAL_SRNG_LMAC_RING) {
  571. /* For LMAC rings, ring pointer updates are done through FW and
  572. * hence written to a shared memory location that is read by FW
  573. */
  574. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  575. *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
  576. } else {
  577. *(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
  578. }
  579. } else {
  580. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  581. hal_write_address_32_mb(hal_soc,
  582. srng->u.src_ring.hp_addr,
  583. srng->u.src_ring.hp);
  584. else
  585. hal_write_address_32_mb(hal_soc,
  586. srng->u.dst_ring.tp_addr,
  587. srng->u.dst_ring.tp);
  588. }
  589. }
  590. /**
  591. * hal_srng_access_end - Unlock ring access and update cached ring head/tail
  592. * pointers to HW
  593. * This should be used only if hal_srng_access_start to start ring access
  594. *
  595. * @hal_soc: Opaque HAL SOC handle
  596. * @hal_ring: Ring pointer (Source or Destination ring)
  597. *
  598. * Return: 0 on success; error on failire
  599. */
  600. static inline void hal_srng_access_end(void *hal_soc, void *hal_ring)
  601. {
  602. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  603. hal_srng_access_end_unlocked(hal_soc, hal_ring);
  604. SRNG_UNLOCK(&(srng->lock));
  605. }
  606. /**
  607. * hal_srng_access_end_reap - Unlock ring access
  608. * This should be used only if hal_srng_access_start to start ring access
  609. * and should be used only while reaping SRC ring completions
  610. *
  611. * @hal_soc: Opaque HAL SOC handle
  612. * @hal_ring: Ring pointer (Source or Destination ring)
  613. *
  614. * Return: 0 on success; error on failire
  615. */
  616. static inline void hal_srng_access_end_reap(void *hal_soc, void *hal_ring)
  617. {
  618. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  619. SRNG_UNLOCK(&(srng->lock));
  620. }
  621. /* TODO: Check if the following definitions is available in HW headers */
  622. #define WBM_IDLE_DESC_LIST 1
  623. #define WBM_IDLE_SCATTER_BUF_SIZE 32704
  624. #define NUM_MPDUS_PER_LINK_DESC 6
  625. #define NUM_MSDUS_PER_LINK_DESC 7
  626. #define REO_QUEUE_DESC_ALIGN 128
  627. #define LINK_DESC_SIZE (NUM_OF_DWORDS_RX_MSDU_LINK << 2)
  628. #define LINK_DESC_ALIGN 128
  629. /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
  630. * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
  631. */
  632. #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
  633. /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
  634. * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
  635. * should be specified in 16 word units. But the number of bits defined for
  636. * this field in HW header files is 5.
  637. */
  638. #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
  639. /**
  640. * hal_set_link_desc_addr - Setup link descriptor in a buffer_addr_info
  641. * HW structure
  642. *
  643. * @desc: Descriptor entry (from WBM_IDLE_LINK ring)
  644. * @cookie: SW cookie for the buffer/descriptor
  645. * @link_desc_paddr: Physical address of link descriptor entry
  646. *
  647. */
  648. static inline void hal_set_link_desc_addr(void *desc, uint32_t cookie,
  649. qdf_dma_addr_t link_desc_paddr)
  650. {
  651. uint32_t *buf_addr = (uint32_t *)desc;
  652. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_0, BUFFER_ADDR_31_0,
  653. link_desc_paddr & 0xffffffff);
  654. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, BUFFER_ADDR_39_32,
  655. (uint64_t)link_desc_paddr >> 32);
  656. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, RETURN_BUFFER_MANAGER,
  657. WBM_IDLE_DESC_LIST);
  658. HAL_DESC_SET_FIELD(buf_addr, BUFFER_ADDR_INFO_1, SW_BUFFER_COOKIE,
  659. cookie);
  660. }
  661. /**
  662. * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
  663. * in an idle list
  664. *
  665. * @hal_soc: Opaque HAL SOC handle
  666. *
  667. */
  668. static inline uint32_t hal_idle_list_scatter_buf_size(void *hal_soc)
  669. {
  670. return WBM_IDLE_SCATTER_BUF_SIZE;
  671. }
  672. /**
  673. * hal_get_link_desc_size - Get the size of each link descriptor
  674. *
  675. * @hal_soc: Opaque HAL SOC handle
  676. *
  677. */
  678. static inline uint32_t hal_get_link_desc_size(void *hal_soc)
  679. {
  680. return LINK_DESC_SIZE;
  681. }
  682. /**
  683. * hal_get_link_desc_align - Get the required start address alignment for
  684. * link descriptors
  685. *
  686. * @hal_soc: Opaque HAL SOC handle
  687. *
  688. */
  689. static inline uint32_t hal_get_link_desc_align(void *hal_soc)
  690. {
  691. return LINK_DESC_ALIGN;
  692. }
  693. /**
  694. * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
  695. *
  696. * @hal_soc: Opaque HAL SOC handle
  697. *
  698. */
  699. static inline uint32_t hal_num_mpdus_per_link_desc(void *hal_soc)
  700. {
  701. return NUM_MPDUS_PER_LINK_DESC;
  702. }
  703. /**
  704. * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
  705. *
  706. * @hal_soc: Opaque HAL SOC handle
  707. *
  708. */
  709. static inline uint32_t hal_num_msdus_per_link_desc(void *hal_soc)
  710. {
  711. return NUM_MSDUS_PER_LINK_DESC;
  712. }
  713. /**
  714. * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
  715. * descriptor can hold
  716. *
  717. * @hal_soc: Opaque HAL SOC handle
  718. *
  719. */
  720. static inline uint32_t hal_num_mpdu_links_per_queue_desc(void *hal_soc)
  721. {
  722. return NUM_MPDU_LINKS_PER_QUEUE_DESC;
  723. }
  724. /**
  725. * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
  726. * that the given buffer size
  727. *
  728. * @hal_soc: Opaque HAL SOC handle
  729. * @scatter_buf_size: Size of scatter buffer
  730. *
  731. */
  732. static inline uint32_t hal_idle_scatter_buf_num_entries(void *hal_soc,
  733. uint32_t scatter_buf_size)
  734. {
  735. return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
  736. hal_srng_get_entrysize(hal_soc, WBM_IDLE_LINK);
  737. }
  738. /**
  739. * hal_idle_scatter_buf_setup - Setup scattered idle list using the buffer list
  740. * provided
  741. *
  742. * @hal_soc: Opaque HAL SOC handle
  743. * @idle_scatter_bufs_base_paddr: Array of physical base addresses
  744. * @idle_scatter_bufs_base_vaddr: Array of virtual base addresses
  745. * @num_scatter_bufs: Number of scatter buffers in the above lists
  746. * @scatter_buf_size: Size of each scatter buffer
  747. *
  748. */
  749. extern void hal_setup_link_idle_list(void *hal_soc,
  750. qdf_dma_addr_t scatter_bufs_base_paddr[],
  751. void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
  752. uint32_t scatter_buf_size, uint32_t last_buf_end_offset);
  753. /* REO parameters to be passed to hal_reo_setup */
  754. struct hal_reo_params {
  755. bool rx_hash_enabled;
  756. };
  757. /**
  758. * hal_reo_setup - Initialize HW REO block
  759. *
  760. * @hal_soc: Opaque HAL SOC handle
  761. * @reo_params: parameters needed by HAL for REO config
  762. */
  763. extern void hal_reo_setup(void *hal_soc,
  764. struct hal_reo_params *reo_params);
  765. enum hal_pn_type {
  766. HAL_PN_NONE,
  767. HAL_PN_WPA,
  768. HAL_PN_WAPI_EVEN,
  769. HAL_PN_WAPI_UNEVEN,
  770. };
  771. #define HAL_RX_MAX_BA_WINDOW 256
  772. /**
  773. * hal_get_reo_qdesc_size - Get size of reo queue descriptor
  774. *
  775. * @hal_soc: Opaque HAL SOC handle
  776. * @ba_window_size: BlockAck window size
  777. *
  778. */
  779. static inline uint32_t hal_get_reo_qdesc_size(void *hal_soc,
  780. uint32_t ba_window_size)
  781. {
  782. if (ba_window_size <= 1)
  783. return sizeof(struct rx_reo_queue);
  784. if (ba_window_size <= 105)
  785. return sizeof(struct rx_reo_queue) +
  786. sizeof(struct rx_reo_queue_ext);
  787. if (ba_window_size <= 210)
  788. return sizeof(struct rx_reo_queue) +
  789. (2 * sizeof(struct rx_reo_queue_ext));
  790. return sizeof(struct rx_reo_queue) +
  791. (3 * sizeof(struct rx_reo_queue_ext));
  792. }
  793. /**
  794. * hal_get_reo_qdesc_align - Get start address alignment for reo
  795. * queue descriptors
  796. *
  797. * @hal_soc: Opaque HAL SOC handle
  798. *
  799. */
  800. static inline uint32_t hal_get_reo_qdesc_align(void *hal_soc)
  801. {
  802. return REO_QUEUE_DESC_ALIGN;
  803. }
  804. /**
  805. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  806. *
  807. * @hal_soc: Opaque HAL SOC handle
  808. * @ba_window_size: BlockAck window size
  809. * @start_seq: Starting sequence number
  810. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  811. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  812. * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
  813. *
  814. */
  815. extern void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
  816. uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
  817. int pn_type);
  818. /**
  819. * hal_srng_get_hp_addr - Get head pointer physical address
  820. *
  821. * @hal_soc: Opaque HAL SOC handle
  822. * @hal_ring: Ring pointer (Source or Destination ring)
  823. *
  824. */
  825. static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
  826. {
  827. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  828. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  829. if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
  830. /* Currently this interface is required only for LMAC rings */
  831. return (qdf_dma_addr_t)NULL;
  832. }
  833. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  834. return hal->shadow_wrptr_mem_paddr +
  835. ((unsigned long)(srng->u.src_ring.hp_addr) -
  836. (unsigned long)(hal->shadow_wrptr_mem_vaddr));
  837. } else {
  838. return hal->shadow_rdptr_mem_paddr +
  839. ((unsigned long)(srng->u.dst_ring.hp_addr) -
  840. (unsigned long)(hal->shadow_rdptr_mem_vaddr));
  841. }
  842. }
  843. /**
  844. * hal_srng_get_tp_addr - Get tail pointer physical address
  845. *
  846. * @hal_soc: Opaque HAL SOC handle
  847. * @hal_ring: Ring pointer (Source or Destination ring)
  848. *
  849. */
  850. static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
  851. {
  852. struct hal_srng *srng = (struct hal_srng *)hal_ring;
  853. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  854. if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
  855. /* Currently this interface is required only for LMAC rings */
  856. return (qdf_dma_addr_t)NULL;
  857. }
  858. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  859. return hal->shadow_rdptr_mem_paddr +
  860. ((unsigned long)(srng->u.src_ring.tp_addr) -
  861. (unsigned long)(hal->shadow_rdptr_mem_vaddr));
  862. } else {
  863. return hal->shadow_wrptr_mem_paddr +
  864. ((unsigned long)(srng->u.dst_ring.tp_addr) -
  865. (unsigned long)(hal->shadow_wrptr_mem_vaddr));
  866. }
  867. }
  868. /**
  869. * hal_get_srng_params - Retreive SRNG parameters for a given ring from HAL
  870. *
  871. * @hal_soc: Opaque HAL SOC handle
  872. * @hal_ring: Ring pointer (Source or Destination ring)
  873. * @ring_params: SRNG parameters will be returned through this structure
  874. */
  875. extern void hal_get_srng_params(void *hal_soc, void *hal_ring,
  876. struct hal_srng_params *ring_params);
  877. /**
  878. * hal_mem_info - Retreive hal memory base address
  879. *
  880. * @hal_soc: Opaque HAL SOC handle
  881. * @mem: pointer to structure to be updated with hal mem info
  882. */
  883. extern void hal_get_meminfo(void *hal_soc,struct hal_mem_info *mem );
  884. #endif /* _HAL_APIH_ */