hal_api.h 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641
  1. /*
  2. * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #ifndef _HAL_API_H_
  19. #define _HAL_API_H_
  20. #include "qdf_types.h"
  21. #include "qdf_util.h"
  22. #include "qdf_atomic.h"
  23. #include "hal_internal.h"
  24. #define MAX_UNWINDOWED_ADDRESS 0x80000
  25. #if defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490)
  26. #define WINDOW_ENABLE_BIT 0x40000000
  27. #else
  28. #define WINDOW_ENABLE_BIT 0x80000000
  29. #endif
  30. #define WINDOW_REG_ADDRESS 0x310C
  31. #define WINDOW_SHIFT 19
  32. #define WINDOW_VALUE_MASK 0x3F
  33. #define WINDOW_START MAX_UNWINDOWED_ADDRESS
  34. #define WINDOW_RANGE_MASK 0x7FFFF
  35. /*
  36. * BAR + 4K is always accessible, any access outside this
  37. * space requires force wake procedure.
  38. * OFFSET = 4K - 32 bytes = 0x4063
  39. */
  40. #define MAPPED_REF_OFF 0x4063
  41. #define FORCE_WAKE_DELAY_TIMEOUT 50
  42. #define FORCE_WAKE_DELAY_MS 5
  43. /**
  44. * hal_ring_desc - opaque handle for DP ring descriptor
  45. */
  46. struct hal_ring_desc;
  47. typedef struct hal_ring_desc *hal_ring_desc_t;
  48. /**
  49. * hal_link_desc - opaque handle for DP link descriptor
  50. */
  51. struct hal_link_desc;
  52. typedef struct hal_link_desc *hal_link_desc_t;
  53. /**
  54. * hal_rxdma_desc - opaque handle for DP rxdma dst ring descriptor
  55. */
  56. struct hal_rxdma_desc;
  57. typedef struct hal_rxdma_desc *hal_rxdma_desc_t;
  58. #ifdef ENABLE_VERBOSE_DEBUG
  59. static inline void
  60. hal_set_verbose_debug(bool flag)
  61. {
  62. is_hal_verbose_debug_enabled = flag;
  63. }
  64. #endif
  65. #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
  66. static inline int hal_force_wake_request(struct hal_soc *soc)
  67. {
  68. return 0;
  69. }
  70. static inline int hal_force_wake_release(struct hal_soc *soc)
  71. {
  72. return 0;
  73. }
  74. #else
  75. static inline int hal_force_wake_request(struct hal_soc *soc)
  76. {
  77. uint32_t timeout = 0;
  78. if (pld_force_wake_request(soc->qdf_dev->dev)) {
  79. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  80. "%s: Request send failed \n", __func__);
  81. return -EINVAL;
  82. }
  83. while (!pld_is_device_awake(soc->qdf_dev->dev) &&
  84. timeout <= FORCE_WAKE_DELAY_TIMEOUT) {
  85. mdelay(FORCE_WAKE_DELAY_MS);
  86. timeout += FORCE_WAKE_DELAY_MS;
  87. }
  88. if (pld_is_device_awake(soc->qdf_dev->dev) == true)
  89. return 0;
  90. else
  91. return -ETIMEDOUT;
  92. }
  93. static inline int hal_force_wake_release(struct hal_soc *soc)
  94. {
  95. return pld_force_wake_release(soc->qdf_dev->dev);
  96. }
  97. #endif
  98. #ifdef PCIE_REG_WINDOW_LOCAL_NO_CACHE
  99. static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
  100. {
  101. uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
  102. qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
  103. WINDOW_ENABLE_BIT | window);
  104. hal_soc->register_window = window;
  105. }
  106. #else
  107. static inline void hal_select_window(struct hal_soc *hal_soc, uint32_t offset)
  108. {
  109. uint32_t window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
  110. if (window != hal_soc->register_window) {
  111. qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_REG_ADDRESS,
  112. WINDOW_ENABLE_BIT | window);
  113. hal_soc->register_window = window;
  114. }
  115. }
  116. #endif
  117. /**
  118. * note1: WINDOW_RANGE_MASK = (1 << WINDOW_SHIFT) -1
  119. * note2: 1 << WINDOW_SHIFT = MAX_UNWINDOWED_ADDRESS
  120. * note3: WINDOW_VALUE_MASK = big enough that trying to write past that window
  121. * would be a bug
  122. */
  123. #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
  124. static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
  125. uint32_t value)
  126. {
  127. if (!hal_soc->use_register_windowing ||
  128. offset < MAX_UNWINDOWED_ADDRESS) {
  129. qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
  130. } else {
  131. qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
  132. hal_select_window(hal_soc, offset);
  133. qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
  134. (offset & WINDOW_RANGE_MASK), value);
  135. qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
  136. }
  137. }
  138. #else
  139. static inline void hal_write32_mb(struct hal_soc *hal_soc, uint32_t offset,
  140. uint32_t value)
  141. {
  142. if ((offset > MAPPED_REF_OFF) &&
  143. hal_force_wake_request(hal_soc)) {
  144. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  145. "%s: Wake up request failed\n", __func__);
  146. return;
  147. }
  148. if (!hal_soc->use_register_windowing ||
  149. offset < MAX_UNWINDOWED_ADDRESS) {
  150. qdf_iowrite32(hal_soc->dev_base_addr + offset, value);
  151. } else {
  152. qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
  153. hal_select_window(hal_soc, offset);
  154. qdf_iowrite32(hal_soc->dev_base_addr + WINDOW_START +
  155. (offset & WINDOW_RANGE_MASK), value);
  156. qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
  157. }
  158. if ((offset > MAPPED_REF_OFF) &&
  159. hal_force_wake_release(hal_soc))
  160. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  161. "%s: Wake up release failed\n", __func__);
  162. }
  163. #endif
  164. /**
  165. * hal_write_address_32_mb - write a value to a register
  166. *
  167. */
  168. static inline void hal_write_address_32_mb(struct hal_soc *hal_soc,
  169. void __iomem *addr, uint32_t value)
  170. {
  171. uint32_t offset;
  172. if (!hal_soc->use_register_windowing)
  173. return qdf_iowrite32(addr, value);
  174. offset = addr - hal_soc->dev_base_addr;
  175. hal_write32_mb(hal_soc, offset, value);
  176. }
  177. #if !defined(QCA_WIFI_QCA6390) && !defined(QCA_WIFI_QCA6490)
  178. static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
  179. {
  180. uint32_t ret;
  181. if (!hal_soc->use_register_windowing ||
  182. offset < MAX_UNWINDOWED_ADDRESS) {
  183. return qdf_ioread32(hal_soc->dev_base_addr + offset);
  184. }
  185. qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
  186. hal_select_window(hal_soc, offset);
  187. ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
  188. (offset & WINDOW_RANGE_MASK));
  189. qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
  190. return ret;
  191. }
  192. /**
  193. * hal_read_address_32_mb() - Read 32-bit value from the register
  194. * @soc: soc handle
  195. * @addr: register address to read
  196. *
  197. * Return: 32-bit value
  198. */
  199. static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
  200. void __iomem *addr)
  201. {
  202. uint32_t offset;
  203. uint32_t ret;
  204. if (!soc->use_register_windowing)
  205. return qdf_ioread32(addr);
  206. offset = addr - soc->dev_base_addr;
  207. ret = hal_read32_mb(soc, offset);
  208. return ret;
  209. }
  210. #else
  211. static inline uint32_t hal_read32_mb(struct hal_soc *hal_soc, uint32_t offset)
  212. {
  213. uint32_t ret;
  214. if ((offset > MAPPED_REF_OFF) &&
  215. hal_force_wake_request(hal_soc)) {
  216. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  217. "%s: Wake up request failed\n", __func__);
  218. return -EINVAL;
  219. }
  220. if (!hal_soc->use_register_windowing ||
  221. offset < MAX_UNWINDOWED_ADDRESS) {
  222. return qdf_ioread32(hal_soc->dev_base_addr + offset);
  223. }
  224. qdf_spin_lock_irqsave(&hal_soc->register_access_lock);
  225. hal_select_window(hal_soc, offset);
  226. ret = qdf_ioread32(hal_soc->dev_base_addr + WINDOW_START +
  227. (offset & WINDOW_RANGE_MASK));
  228. qdf_spin_unlock_irqrestore(&hal_soc->register_access_lock);
  229. if ((offset > MAPPED_REF_OFF) &&
  230. hal_force_wake_release(hal_soc))
  231. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  232. "%s: Wake up release failed\n", __func__);
  233. return ret;
  234. }
  235. static inline uint32_t hal_read_address_32_mb(struct hal_soc *soc,
  236. void __iomem *addr)
  237. {
  238. uint32_t offset;
  239. uint32_t ret;
  240. if (!soc->use_register_windowing)
  241. return qdf_ioread32(addr);
  242. offset = addr - soc->dev_base_addr;
  243. ret = hal_read32_mb(soc, offset);
  244. return ret;
  245. }
  246. #endif
  247. #include "hif_io32.h"
  248. /**
  249. * hal_attach - Initialize HAL layer
  250. * @hif_handle: Opaque HIF handle
  251. * @qdf_dev: QDF device
  252. *
  253. * Return: Opaque HAL SOC handle
  254. * NULL on failure (if given ring is not available)
  255. *
  256. * This function should be called as part of HIF initialization (for accessing
  257. * copy engines). DP layer will get hal_soc handle using hif_get_hal_handle()
  258. */
  259. void *hal_attach(struct hif_opaque_softc *hif_handle, qdf_device_t qdf_dev);
  260. /**
  261. * hal_detach - Detach HAL layer
  262. * @hal_soc: HAL SOC handle
  263. *
  264. * This function should be called as part of HIF detach
  265. *
  266. */
  267. extern void hal_detach(void *hal_soc);
  268. /* SRNG type to be passed in APIs hal_srng_get_entrysize and hal_srng_setup */
  269. enum hal_ring_type {
  270. REO_DST = 0,
  271. REO_EXCEPTION = 1,
  272. REO_REINJECT = 2,
  273. REO_CMD = 3,
  274. REO_STATUS = 4,
  275. TCL_DATA = 5,
  276. TCL_CMD = 6,
  277. TCL_STATUS = 7,
  278. CE_SRC = 8,
  279. CE_DST = 9,
  280. CE_DST_STATUS = 10,
  281. WBM_IDLE_LINK = 11,
  282. SW2WBM_RELEASE = 12,
  283. WBM2SW_RELEASE = 13,
  284. RXDMA_BUF = 14,
  285. RXDMA_DST = 15,
  286. RXDMA_MONITOR_BUF = 16,
  287. RXDMA_MONITOR_STATUS = 17,
  288. RXDMA_MONITOR_DST = 18,
  289. RXDMA_MONITOR_DESC = 19,
  290. DIR_BUF_RX_DMA_SRC = 20,
  291. #ifdef WLAN_FEATURE_CIF_CFR
  292. WIFI_POS_SRC,
  293. #endif
  294. MAX_RING_TYPES
  295. };
  296. #define HAL_SRNG_LMAC_RING 0x80000000
  297. /* SRNG flags passed in hal_srng_params.flags */
  298. #define HAL_SRNG_MSI_SWAP 0x00000008
  299. #define HAL_SRNG_RING_PTR_SWAP 0x00000010
  300. #define HAL_SRNG_DATA_TLV_SWAP 0x00000020
  301. #define HAL_SRNG_LOW_THRES_INTR_ENABLE 0x00010000
  302. #define HAL_SRNG_MSI_INTR 0x00020000
  303. #define HAL_SRNG_CACHED_DESC 0x00040000
  304. #define PN_SIZE_24 0
  305. #define PN_SIZE_48 1
  306. #define PN_SIZE_128 2
  307. /**
  308. * hal_srng_get_entrysize - Returns size of ring entry in bytes. Should be
  309. * used by callers for calculating the size of memory to be allocated before
  310. * calling hal_srng_setup to setup the ring
  311. *
  312. * @hal_soc: Opaque HAL SOC handle
  313. * @ring_type: one of the types from hal_ring_type
  314. *
  315. */
  316. extern uint32_t hal_srng_get_entrysize(void *hal_soc, int ring_type);
  317. /**
  318. * hal_srng_max_entries - Returns maximum possible number of ring entries
  319. * @hal_soc: Opaque HAL SOC handle
  320. * @ring_type: one of the types from hal_ring_type
  321. *
  322. * Return: Maximum number of entries for the given ring_type
  323. */
  324. uint32_t hal_srng_max_entries(void *hal_soc, int ring_type);
  325. /**
  326. * hal_srng_dump - Dump ring status
  327. * @srng: hal srng pointer
  328. */
  329. void hal_srng_dump(struct hal_srng *srng);
  330. /**
  331. * hal_srng_get_dir - Returns the direction of the ring
  332. * @hal_soc: Opaque HAL SOC handle
  333. * @ring_type: one of the types from hal_ring_type
  334. *
  335. * Return: Ring direction
  336. */
  337. enum hal_srng_dir hal_srng_get_dir(void *hal_soc, int ring_type);
  338. /* HAL memory information */
  339. struct hal_mem_info {
  340. /* dev base virutal addr */
  341. void *dev_base_addr;
  342. /* dev base physical addr */
  343. void *dev_base_paddr;
  344. /* Remote virtual pointer memory for HW/FW updates */
  345. void *shadow_rdptr_mem_vaddr;
  346. /* Remote physical pointer memory for HW/FW updates */
  347. void *shadow_rdptr_mem_paddr;
  348. /* Shared memory for ring pointer updates from host to FW */
  349. void *shadow_wrptr_mem_vaddr;
  350. /* Shared physical memory for ring pointer updates from host to FW */
  351. void *shadow_wrptr_mem_paddr;
  352. };
  353. /* SRNG parameters to be passed to hal_srng_setup */
  354. struct hal_srng_params {
  355. /* Physical base address of the ring */
  356. qdf_dma_addr_t ring_base_paddr;
  357. /* Virtual base address of the ring */
  358. void *ring_base_vaddr;
  359. /* Number of entries in ring */
  360. uint32_t num_entries;
  361. /* max transfer length */
  362. uint16_t max_buffer_length;
  363. /* MSI Address */
  364. qdf_dma_addr_t msi_addr;
  365. /* MSI data */
  366. uint32_t msi_data;
  367. /* Interrupt timer threshold – in micro seconds */
  368. uint32_t intr_timer_thres_us;
  369. /* Interrupt batch counter threshold – in number of ring entries */
  370. uint32_t intr_batch_cntr_thres_entries;
  371. /* Low threshold – in number of ring entries
  372. * (valid for src rings only)
  373. */
  374. uint32_t low_threshold;
  375. /* Misc flags */
  376. uint32_t flags;
  377. /* Unique ring id */
  378. uint8_t ring_id;
  379. /* Source or Destination ring */
  380. enum hal_srng_dir ring_dir;
  381. /* Size of ring entry */
  382. uint32_t entry_size;
  383. /* hw register base address */
  384. void *hwreg_base[MAX_SRNG_REG_GROUPS];
  385. };
  386. /* hal_construct_shadow_config() - initialize the shadow registers for dp rings
  387. * @hal_soc: hal handle
  388. *
  389. * Return: QDF_STATUS_OK on success
  390. */
  391. extern QDF_STATUS hal_construct_shadow_config(void *hal_soc);
  392. /* hal_set_one_shadow_config() - add a config for the specified ring
  393. * @hal_soc: hal handle
  394. * @ring_type: ring type
  395. * @ring_num: ring num
  396. *
  397. * The ring type and ring num uniquely specify the ring. After this call,
  398. * the hp/tp will be added as the next entry int the shadow register
  399. * configuration table. The hal code will use the shadow register address
  400. * in place of the hp/tp address.
  401. *
  402. * This function is exposed, so that the CE module can skip configuring shadow
  403. * registers for unused ring and rings assigned to the firmware.
  404. *
  405. * Return: QDF_STATUS_OK on success
  406. */
  407. extern QDF_STATUS hal_set_one_shadow_config(void *hal_soc, int ring_type,
  408. int ring_num);
  409. /**
  410. * hal_get_shadow_config() - retrieve the config table
  411. * @hal_soc: hal handle
  412. * @shadow_config: will point to the table after
  413. * @num_shadow_registers_configured: will contain the number of valid entries
  414. */
  415. extern void hal_get_shadow_config(void *hal_soc,
  416. struct pld_shadow_reg_v2_cfg **shadow_config,
  417. int *num_shadow_registers_configured);
  418. /**
  419. * hal_srng_setup - Initialize HW SRNG ring.
  420. *
  421. * @hal_soc: Opaque HAL SOC handle
  422. * @ring_type: one of the types from hal_ring_type
  423. * @ring_num: Ring number if there are multiple rings of
  424. * same type (staring from 0)
  425. * @mac_id: valid MAC Id should be passed if ring type is one of lmac rings
  426. * @ring_params: SRNG ring params in hal_srng_params structure.
  427. * Callers are expected to allocate contiguous ring memory of size
  428. * 'num_entries * entry_size' bytes and pass the physical and virtual base
  429. * addresses through 'ring_base_paddr' and 'ring_base_vaddr' in hal_srng_params
  430. * structure. Ring base address should be 8 byte aligned and size of each ring
  431. * entry should be queried using the API hal_srng_get_entrysize
  432. *
  433. * Return: Opaque pointer to ring on success
  434. * NULL on failure (if given ring is not available)
  435. */
  436. extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
  437. int mac_id, struct hal_srng_params *ring_params);
  438. /* Remapping ids of REO rings */
  439. #define REO_REMAP_TCL 0
  440. #define REO_REMAP_SW1 1
  441. #define REO_REMAP_SW2 2
  442. #define REO_REMAP_SW3 3
  443. #define REO_REMAP_SW4 4
  444. #define REO_REMAP_RELEASE 5
  445. #define REO_REMAP_FW 6
  446. #define REO_REMAP_UNUSED 7
  447. /*
  448. * currently this macro only works for IX0 since all the rings we are remapping
  449. * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
  450. */
  451. #define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
  452. HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
  453. /* allow the destination macros to be expanded */
  454. #define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
  455. (_NEW_DEST << \
  456. (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
  457. _ORIGINAL_DEST ## _SHFT))
  458. /**
  459. * hal_reo_read_write_ctrl_ix - Read or write REO_DESTINATION_RING_CTRL_IX
  460. * @hal_soc_hdl: HAL SOC handle
  461. * @read: boolean value to indicate if read or write
  462. * @ix0: pointer to store IX0 reg value
  463. * @ix1: pointer to store IX1 reg value
  464. * @ix2: pointer to store IX2 reg value
  465. * @ix3: pointer to store IX3 reg value
  466. */
  467. void hal_reo_read_write_ctrl_ix(hal_soc_handle_t hal_soc_hdl, bool read,
  468. uint32_t *ix0, uint32_t *ix1,
  469. uint32_t *ix2, uint32_t *ix3);
  470. /**
  471. * hal_srng_set_hp_paddr() - Set physical address to dest SRNG head pointer
  472. * @sring: sring pointer
  473. * @paddr: physical address
  474. */
  475. extern void hal_srng_dst_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
  476. /**
  477. * hal_srng_dst_init_hp() - Initilaize head pointer with cached head pointer
  478. * @srng: sring pointer
  479. * @vaddr: virtual address
  480. */
  481. extern void hal_srng_dst_init_hp(struct hal_srng *srng, uint32_t *vaddr);
  482. /**
  483. * hal_srng_cleanup - Deinitialize HW SRNG ring.
  484. * @hal_soc: Opaque HAL SOC handle
  485. * @hal_srng: Opaque HAL SRNG pointer
  486. */
  487. void hal_srng_cleanup(void *hal_soc, hal_ring_handle_t hal_ring_hdl);
  488. static inline bool hal_srng_initialized(hal_ring_handle_t hal_ring_hdl)
  489. {
  490. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  491. return !!srng->initialized;
  492. }
  493. /**
  494. * hal_srng_dst_peek - Check if there are any entries in the ring (peek)
  495. * @hal_soc: Opaque HAL SOC handle
  496. * @hal_ring_hdl: Destination ring pointer
  497. *
  498. * Caller takes responsibility for any locking needs.
  499. *
  500. * Return: Opaque pointer for next ring entry; NULL on failire
  501. */
  502. static inline
  503. void *hal_srng_dst_peek(hal_soc_handle_t hal_soc_hdl,
  504. hal_ring_handle_t hal_ring_hdl)
  505. {
  506. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  507. if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
  508. return (void *)(&srng->ring_base_vaddr[srng->u.dst_ring.tp]);
  509. return NULL;
  510. }
  511. /**
  512. * hal_srng_access_start_unlocked - Start ring access (unlocked). Should use
  513. * hal_srng_access_start if locked access is required
  514. *
  515. * @hal_soc: Opaque HAL SOC handle
  516. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  517. *
  518. * Return: 0 on success; error on failire
  519. */
  520. static inline int
  521. hal_srng_access_start_unlocked(hal_soc_handle_t hal_soc_hdl,
  522. hal_ring_handle_t hal_ring_hdl)
  523. {
  524. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  525. struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
  526. uint32_t *desc;
  527. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  528. srng->u.src_ring.cached_tp =
  529. *(volatile uint32_t *)(srng->u.src_ring.tp_addr);
  530. else {
  531. srng->u.dst_ring.cached_hp =
  532. *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
  533. if (srng->flags & HAL_SRNG_CACHED_DESC) {
  534. desc = hal_srng_dst_peek(hal_soc_hdl, hal_ring_hdl);
  535. if (qdf_likely(desc)) {
  536. qdf_mem_dma_cache_sync(soc->qdf_dev,
  537. qdf_mem_virt_to_phys
  538. (desc),
  539. QDF_DMA_FROM_DEVICE,
  540. (srng->entry_size *
  541. sizeof(uint32_t)));
  542. qdf_prefetch(desc);
  543. }
  544. }
  545. }
  546. return 0;
  547. }
  548. /**
  549. * hal_srng_access_start - Start (locked) ring access
  550. *
  551. * @hal_soc: Opaque HAL SOC handle
  552. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  553. *
  554. * Return: 0 on success; error on failire
  555. */
  556. static inline int hal_srng_access_start(hal_soc_handle_t hal_soc_hdl,
  557. hal_ring_handle_t hal_ring_hdl)
  558. {
  559. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  560. if (qdf_unlikely(!hal_ring_hdl)) {
  561. qdf_print("Error: Invalid hal_ring\n");
  562. return -EINVAL;
  563. }
  564. SRNG_LOCK(&(srng->lock));
  565. return hal_srng_access_start_unlocked(hal_soc_hdl, hal_ring_hdl);
  566. }
  567. /**
  568. * hal_srng_dst_get_next - Get next entry from a destination ring and move
  569. * cached tail pointer
  570. *
  571. * @hal_soc: Opaque HAL SOC handle
  572. * @hal_ring_hdl: Destination ring pointer
  573. *
  574. * Return: Opaque pointer for next ring entry; NULL on failire
  575. */
  576. static inline
  577. void *hal_srng_dst_get_next(void *hal_soc,
  578. hal_ring_handle_t hal_ring_hdl)
  579. {
  580. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  581. struct hal_soc *soc = (struct hal_soc *)hal_soc;
  582. uint32_t *desc;
  583. uint32_t *desc_next;
  584. uint32_t tp;
  585. if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
  586. desc = &(srng->ring_base_vaddr[srng->u.dst_ring.tp]);
  587. /* TODO: Using % is expensive, but we have to do this since
  588. * size of some SRNG rings is not power of 2 (due to descriptor
  589. * sizes). Need to create separate API for rings used
  590. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  591. * SW2RXDMA and CE rings)
  592. */
  593. srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
  594. srng->ring_size;
  595. if (srng->flags & HAL_SRNG_CACHED_DESC) {
  596. tp = srng->u.dst_ring.tp;
  597. desc_next = &srng->ring_base_vaddr[tp];
  598. qdf_mem_dma_cache_sync(soc->qdf_dev,
  599. qdf_mem_virt_to_phys(desc_next),
  600. QDF_DMA_FROM_DEVICE,
  601. (srng->entry_size *
  602. sizeof(uint32_t)));
  603. qdf_prefetch(desc_next);
  604. }
  605. return (void *)desc;
  606. }
  607. return NULL;
  608. }
  609. /**
  610. * hal_srng_dst_get_next_hp - Get next entry from a destination ring and move
  611. * cached head pointer
  612. *
  613. * @hal_soc: Opaque HAL SOC handle
  614. * @hal_ring_hdl: Destination ring pointer
  615. *
  616. * Return: Opaque pointer for next ring entry; NULL on failire
  617. */
  618. static inline void *
  619. hal_srng_dst_get_next_hp(hal_soc_handle_t hal_soc_hdl,
  620. hal_ring_handle_t hal_ring_hdl)
  621. {
  622. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  623. uint32_t *desc;
  624. /* TODO: Using % is expensive, but we have to do this since
  625. * size of some SRNG rings is not power of 2 (due to descriptor
  626. * sizes). Need to create separate API for rings used
  627. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  628. * SW2RXDMA and CE rings)
  629. */
  630. uint32_t next_hp = (srng->u.dst_ring.cached_hp + srng->entry_size) %
  631. srng->ring_size;
  632. if (next_hp != srng->u.dst_ring.tp) {
  633. desc = &(srng->ring_base_vaddr[srng->u.dst_ring.cached_hp]);
  634. srng->u.dst_ring.cached_hp = next_hp;
  635. return (void *)desc;
  636. }
  637. return NULL;
  638. }
  639. /**
  640. * hal_srng_dst_peek_sync - Check if there are any entries in the ring (peek)
  641. * @hal_soc: Opaque HAL SOC handle
  642. * @hal_ring_hdl: Destination ring pointer
  643. *
  644. * Sync cached head pointer with HW.
  645. * Caller takes responsibility for any locking needs.
  646. *
  647. * Return: Opaque pointer for next ring entry; NULL on failire
  648. */
  649. static inline
  650. void *hal_srng_dst_peek_sync(hal_soc_handle_t hal_soc_hdl,
  651. hal_ring_handle_t hal_ring_hdl)
  652. {
  653. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  654. srng->u.dst_ring.cached_hp =
  655. *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
  656. if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
  657. return (void *)(&(srng->ring_base_vaddr[srng->u.dst_ring.tp]));
  658. return NULL;
  659. }
  660. /**
  661. * hal_srng_dst_peek_sync_locked - Peek for any entries in the ring
  662. * @hal_soc: Opaque HAL SOC handle
  663. * @hal_ring_hdl: Destination ring pointer
  664. *
  665. * Sync cached head pointer with HW.
  666. * This function takes up SRNG_LOCK. Should not be called with SRNG lock held.
  667. *
  668. * Return: Opaque pointer for next ring entry; NULL on failire
  669. */
  670. static inline
  671. void *hal_srng_dst_peek_sync_locked(hal_soc_handle_t hal_soc_hdl,
  672. hal_ring_handle_t hal_ring_hdl)
  673. {
  674. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  675. void *ring_desc_ptr = NULL;
  676. if (qdf_unlikely(!hal_ring_hdl)) {
  677. qdf_print("Error: Invalid hal_ring\n");
  678. return NULL;
  679. }
  680. SRNG_LOCK(&srng->lock);
  681. ring_desc_ptr = hal_srng_dst_peek_sync(hal_soc_hdl, hal_ring_hdl);
  682. SRNG_UNLOCK(&srng->lock);
  683. return ring_desc_ptr;
  684. }
  685. /**
  686. * hal_srng_dst_num_valid - Returns number of valid entries (to be processed
  687. * by SW) in destination ring
  688. *
  689. * @hal_soc: Opaque HAL SOC handle
  690. * @hal_ring_hdl: Destination ring pointer
  691. * @sync_hw_ptr: Sync cached head pointer with HW
  692. *
  693. */
  694. static inline uint32_t hal_srng_dst_num_valid(void *hal_soc,
  695. hal_ring_handle_t hal_ring_hdl,
  696. int sync_hw_ptr)
  697. {
  698. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  699. uint32_t hp;
  700. uint32_t tp = srng->u.dst_ring.tp;
  701. if (sync_hw_ptr) {
  702. hp = *(volatile uint32_t *)(srng->u.dst_ring.hp_addr);
  703. srng->u.dst_ring.cached_hp = hp;
  704. } else {
  705. hp = srng->u.dst_ring.cached_hp;
  706. }
  707. if (hp >= tp)
  708. return (hp - tp) / srng->entry_size;
  709. else
  710. return (srng->ring_size - tp + hp) / srng->entry_size;
  711. }
  712. /**
  713. * hal_srng_src_reap_next - Reap next entry from a source ring and move reap
  714. * pointer. This can be used to release any buffers associated with completed
  715. * ring entries. Note that this should not be used for posting new descriptor
  716. * entries. Posting of new entries should be done only using
  717. * hal_srng_src_get_next_reaped when this function is used for reaping.
  718. *
  719. * @hal_soc: Opaque HAL SOC handle
  720. * @hal_ring_hdl: Source ring pointer
  721. *
  722. * Return: Opaque pointer for next ring entry; NULL on failire
  723. */
  724. static inline void *
  725. hal_srng_src_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  726. {
  727. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  728. uint32_t *desc;
  729. /* TODO: Using % is expensive, but we have to do this since
  730. * size of some SRNG rings is not power of 2 (due to descriptor
  731. * sizes). Need to create separate API for rings used
  732. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  733. * SW2RXDMA and CE rings)
  734. */
  735. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
  736. srng->ring_size;
  737. if (next_reap_hp != srng->u.src_ring.cached_tp) {
  738. desc = &(srng->ring_base_vaddr[next_reap_hp]);
  739. srng->u.src_ring.reap_hp = next_reap_hp;
  740. return (void *)desc;
  741. }
  742. return NULL;
  743. }
  744. /**
  745. * hal_srng_src_get_next_reaped - Get next entry from a source ring that is
  746. * already reaped using hal_srng_src_reap_next, for posting new entries to
  747. * the ring
  748. *
  749. * @hal_soc: Opaque HAL SOC handle
  750. * @hal_ring_hdl: Source ring pointer
  751. *
  752. * Return: Opaque pointer for next (reaped) source ring entry; NULL on failire
  753. */
  754. static inline void *
  755. hal_srng_src_get_next_reaped(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  756. {
  757. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  758. uint32_t *desc;
  759. if (srng->u.src_ring.hp != srng->u.src_ring.reap_hp) {
  760. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  761. srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
  762. srng->ring_size;
  763. return (void *)desc;
  764. }
  765. return NULL;
  766. }
  767. /**
  768. * hal_srng_src_pending_reap_next - Reap next entry from a source ring and
  769. * move reap pointer. This API is used in detach path to release any buffers
  770. * associated with ring entries which are pending reap.
  771. *
  772. * @hal_soc: Opaque HAL SOC handle
  773. * @hal_ring_hdl: Source ring pointer
  774. *
  775. * Return: Opaque pointer for next ring entry; NULL on failire
  776. */
  777. static inline void *
  778. hal_srng_src_pending_reap_next(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  779. {
  780. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  781. uint32_t *desc;
  782. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
  783. srng->ring_size;
  784. if (next_reap_hp != srng->u.src_ring.hp) {
  785. desc = &(srng->ring_base_vaddr[next_reap_hp]);
  786. srng->u.src_ring.reap_hp = next_reap_hp;
  787. return (void *)desc;
  788. }
  789. return NULL;
  790. }
  791. /**
  792. * hal_srng_src_done_val -
  793. *
  794. * @hal_soc: Opaque HAL SOC handle
  795. * @hal_ring_hdl: Source ring pointer
  796. *
  797. * Return: Opaque pointer for next ring entry; NULL on failire
  798. */
  799. static inline uint32_t
  800. hal_srng_src_done_val(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  801. {
  802. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  803. /* TODO: Using % is expensive, but we have to do this since
  804. * size of some SRNG rings is not power of 2 (due to descriptor
  805. * sizes). Need to create separate API for rings used
  806. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  807. * SW2RXDMA and CE rings)
  808. */
  809. uint32_t next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
  810. srng->ring_size;
  811. if (next_reap_hp == srng->u.src_ring.cached_tp)
  812. return 0;
  813. if (srng->u.src_ring.cached_tp > next_reap_hp)
  814. return (srng->u.src_ring.cached_tp - next_reap_hp) /
  815. srng->entry_size;
  816. else
  817. return ((srng->ring_size - next_reap_hp) +
  818. srng->u.src_ring.cached_tp) / srng->entry_size;
  819. }
  820. /**
  821. * hal_get_entrysize_from_srng() - Retrieve ring entry size
  822. * @hal_ring_hdl: Source ring pointer
  823. *
  824. * Return: uint8_t
  825. */
  826. static inline
  827. uint8_t hal_get_entrysize_from_srng(hal_ring_handle_t hal_ring_hdl)
  828. {
  829. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  830. return srng->entry_size;
  831. }
  832. /**
  833. * hal_get_sw_hptp - Get SW head and tail pointer location for any ring
  834. * @hal_soc: Opaque HAL SOC handle
  835. * @hal_ring_hdl: Source ring pointer
  836. * @tailp: Tail Pointer
  837. * @headp: Head Pointer
  838. *
  839. * Return: Update tail pointer and head pointer in arguments.
  840. */
  841. static inline
  842. void hal_get_sw_hptp(void *hal_soc, hal_ring_handle_t hal_ring_hdl,
  843. uint32_t *tailp, uint32_t *headp)
  844. {
  845. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  846. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  847. *headp = srng->u.src_ring.hp;
  848. *tailp = *srng->u.src_ring.tp_addr;
  849. } else {
  850. *tailp = srng->u.dst_ring.tp;
  851. *headp = *srng->u.dst_ring.hp_addr;
  852. }
  853. }
  854. /**
  855. * hal_srng_src_get_next - Get next entry from a source ring and move cached tail pointer
  856. *
  857. * @hal_soc: Opaque HAL SOC handle
  858. * @hal_ring_hdl: Source ring pointer
  859. *
  860. * Return: Opaque pointer for next ring entry; NULL on failire
  861. */
  862. static inline
  863. void *hal_srng_src_get_next(void *hal_soc,
  864. hal_ring_handle_t hal_ring_hdl)
  865. {
  866. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  867. uint32_t *desc;
  868. /* TODO: Using % is expensive, but we have to do this since
  869. * size of some SRNG rings is not power of 2 (due to descriptor
  870. * sizes). Need to create separate API for rings used
  871. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  872. * SW2RXDMA and CE rings)
  873. */
  874. uint32_t next_hp = (srng->u.src_ring.hp + srng->entry_size) %
  875. srng->ring_size;
  876. if (next_hp != srng->u.src_ring.cached_tp) {
  877. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  878. srng->u.src_ring.hp = next_hp;
  879. /* TODO: Since reap function is not used by all rings, we can
  880. * remove the following update of reap_hp in this function
  881. * if we can ensure that only hal_srng_src_get_next_reaped
  882. * is used for the rings requiring reap functionality
  883. */
  884. srng->u.src_ring.reap_hp = next_hp;
  885. return (void *)desc;
  886. }
  887. return NULL;
  888. }
  889. /**
  890. * hal_srng_src_peek - Get next entry from a ring without moving head pointer.
  891. * hal_srng_src_get_next should be called subsequently to move the head pointer
  892. *
  893. * @hal_soc: Opaque HAL SOC handle
  894. * @hal_ring_hdl: Source ring pointer
  895. *
  896. * Return: Opaque pointer for next ring entry; NULL on failire
  897. */
  898. static inline
  899. void *hal_srng_src_peek(hal_soc_handle_t hal_soc_hdl,
  900. hal_ring_handle_t hal_ring_hdl)
  901. {
  902. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  903. uint32_t *desc;
  904. /* TODO: Using % is expensive, but we have to do this since
  905. * size of some SRNG rings is not power of 2 (due to descriptor
  906. * sizes). Need to create separate API for rings used
  907. * per-packet, with sizes power of 2 (TCL2SW, REO2SW,
  908. * SW2RXDMA and CE rings)
  909. */
  910. if (((srng->u.src_ring.hp + srng->entry_size) %
  911. srng->ring_size) != srng->u.src_ring.cached_tp) {
  912. desc = &(srng->ring_base_vaddr[srng->u.src_ring.hp]);
  913. return (void *)desc;
  914. }
  915. return NULL;
  916. }
  917. /**
  918. * hal_srng_src_num_avail - Returns number of available entries in src ring
  919. *
  920. * @hal_soc: Opaque HAL SOC handle
  921. * @hal_ring_hdl: Source ring pointer
  922. * @sync_hw_ptr: Sync cached tail pointer with HW
  923. *
  924. */
  925. static inline uint32_t
  926. hal_srng_src_num_avail(void *hal_soc,
  927. hal_ring_handle_t hal_ring_hdl, int sync_hw_ptr)
  928. {
  929. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  930. uint32_t tp;
  931. uint32_t hp = srng->u.src_ring.hp;
  932. if (sync_hw_ptr) {
  933. tp = *(srng->u.src_ring.tp_addr);
  934. srng->u.src_ring.cached_tp = tp;
  935. } else {
  936. tp = srng->u.src_ring.cached_tp;
  937. }
  938. if (tp > hp)
  939. return ((tp - hp) / srng->entry_size) - 1;
  940. else
  941. return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
  942. }
  943. /**
  944. * hal_srng_access_end_unlocked - End ring access (unlocked) - update cached
  945. * ring head/tail pointers to HW.
  946. * This should be used only if hal_srng_access_start_unlocked to start ring
  947. * access
  948. *
  949. * @hal_soc: Opaque HAL SOC handle
  950. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  951. *
  952. * Return: 0 on success; error on failire
  953. */
  954. static inline void
  955. hal_srng_access_end_unlocked(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  956. {
  957. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  958. /* TODO: See if we need a write memory barrier here */
  959. if (srng->flags & HAL_SRNG_LMAC_RING) {
  960. /* For LMAC rings, ring pointer updates are done through FW and
  961. * hence written to a shared memory location that is read by FW
  962. */
  963. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  964. *(srng->u.src_ring.hp_addr) = srng->u.src_ring.hp;
  965. } else {
  966. *(srng->u.dst_ring.tp_addr) = srng->u.dst_ring.tp;
  967. }
  968. } else {
  969. if (srng->ring_dir == HAL_SRNG_SRC_RING)
  970. hal_write_address_32_mb(hal_soc,
  971. srng->u.src_ring.hp_addr,
  972. srng->u.src_ring.hp);
  973. else
  974. hal_write_address_32_mb(hal_soc,
  975. srng->u.dst_ring.tp_addr,
  976. srng->u.dst_ring.tp);
  977. }
  978. }
  979. /**
  980. * hal_srng_access_end - Unlock ring access and update cached ring head/tail
  981. * pointers to HW
  982. * This should be used only if hal_srng_access_start to start ring access
  983. *
  984. * @hal_soc: Opaque HAL SOC handle
  985. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  986. *
  987. * Return: 0 on success; error on failire
  988. */
  989. static inline void
  990. hal_srng_access_end(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  991. {
  992. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  993. if (qdf_unlikely(!hal_ring_hdl)) {
  994. qdf_print("Error: Invalid hal_ring\n");
  995. return;
  996. }
  997. hal_srng_access_end_unlocked(hal_soc, hal_ring_hdl);
  998. SRNG_UNLOCK(&(srng->lock));
  999. }
  1000. /**
  1001. * hal_srng_access_end_reap - Unlock ring access
  1002. * This should be used only if hal_srng_access_start to start ring access
  1003. * and should be used only while reaping SRC ring completions
  1004. *
  1005. * @hal_soc: Opaque HAL SOC handle
  1006. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  1007. *
  1008. * Return: 0 on success; error on failire
  1009. */
  1010. static inline void
  1011. hal_srng_access_end_reap(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  1012. {
  1013. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1014. SRNG_UNLOCK(&(srng->lock));
  1015. }
  1016. /* TODO: Check if the following definitions is available in HW headers */
  1017. #define WBM_IDLE_SCATTER_BUF_SIZE 32704
  1018. #define NUM_MPDUS_PER_LINK_DESC 6
  1019. #define NUM_MSDUS_PER_LINK_DESC 7
  1020. #define REO_QUEUE_DESC_ALIGN 128
  1021. #define LINK_DESC_ALIGN 128
  1022. #define ADDRESS_MATCH_TAG_VAL 0x5
  1023. /* Number of mpdu link pointers is 9 in case of TX_MPDU_QUEUE_HEAD and 14 in
  1024. * of TX_MPDU_QUEUE_EXT. We are defining a common average count here
  1025. */
  1026. #define NUM_MPDU_LINKS_PER_QUEUE_DESC 12
  1027. /* TODO: Check with HW team on the scatter buffer size supported. As per WBM
  1028. * MLD, scatter_buffer_size in IDLE_LIST_CONTROL register is 9 bits and size
  1029. * should be specified in 16 word units. But the number of bits defined for
  1030. * this field in HW header files is 5.
  1031. */
  1032. #define WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE 8
  1033. /**
  1034. * hal_idle_list_scatter_buf_size - Get the size of each scatter buffer
  1035. * in an idle list
  1036. *
  1037. * @hal_soc: Opaque HAL SOC handle
  1038. *
  1039. */
  1040. static inline
  1041. uint32_t hal_idle_list_scatter_buf_size(hal_soc_handle_t hal_soc_hdl)
  1042. {
  1043. return WBM_IDLE_SCATTER_BUF_SIZE;
  1044. }
  1045. /**
  1046. * hal_get_link_desc_size - Get the size of each link descriptor
  1047. *
  1048. * @hal_soc: Opaque HAL SOC handle
  1049. *
  1050. */
  1051. static inline uint32_t hal_get_link_desc_size(hal_soc_handle_t hal_soc_hdl)
  1052. {
  1053. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1054. if (!hal_soc || !hal_soc->ops) {
  1055. qdf_print("Error: Invalid ops\n");
  1056. QDF_BUG(0);
  1057. return -EINVAL;
  1058. }
  1059. if (!hal_soc->ops->hal_get_link_desc_size) {
  1060. qdf_print("Error: Invalid function pointer\n");
  1061. QDF_BUG(0);
  1062. return -EINVAL;
  1063. }
  1064. return hal_soc->ops->hal_get_link_desc_size();
  1065. }
  1066. /**
  1067. * hal_get_link_desc_align - Get the required start address alignment for
  1068. * link descriptors
  1069. *
  1070. * @hal_soc: Opaque HAL SOC handle
  1071. *
  1072. */
  1073. static inline
  1074. uint32_t hal_get_link_desc_align(hal_soc_handle_t hal_soc_hdl)
  1075. {
  1076. return LINK_DESC_ALIGN;
  1077. }
  1078. /**
  1079. * hal_num_mpdus_per_link_desc - Get number of mpdus each link desc can hold
  1080. *
  1081. * @hal_soc: Opaque HAL SOC handle
  1082. *
  1083. */
  1084. static inline
  1085. uint32_t hal_num_mpdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
  1086. {
  1087. return NUM_MPDUS_PER_LINK_DESC;
  1088. }
  1089. /**
  1090. * hal_num_msdus_per_link_desc - Get number of msdus each link desc can hold
  1091. *
  1092. * @hal_soc: Opaque HAL SOC handle
  1093. *
  1094. */
  1095. static inline
  1096. uint32_t hal_num_msdus_per_link_desc(hal_soc_handle_t hal_soc_hdl)
  1097. {
  1098. return NUM_MSDUS_PER_LINK_DESC;
  1099. }
  1100. /**
  1101. * hal_num_mpdu_links_per_queue_desc - Get number of mpdu links each queue
  1102. * descriptor can hold
  1103. *
  1104. * @hal_soc: Opaque HAL SOC handle
  1105. *
  1106. */
  1107. static inline
  1108. uint32_t hal_num_mpdu_links_per_queue_desc(hal_soc_handle_t hal_soc_hdl)
  1109. {
  1110. return NUM_MPDU_LINKS_PER_QUEUE_DESC;
  1111. }
  1112. /**
  1113. * hal_idle_list_scatter_buf_num_entries - Get the number of link desc entries
  1114. * that the given buffer size
  1115. *
  1116. * @hal_soc: Opaque HAL SOC handle
  1117. * @scatter_buf_size: Size of scatter buffer
  1118. *
  1119. */
  1120. static inline
  1121. uint32_t hal_idle_scatter_buf_num_entries(hal_soc_handle_t hal_soc_hdl,
  1122. uint32_t scatter_buf_size)
  1123. {
  1124. return (scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE) /
  1125. hal_srng_get_entrysize(hal_soc_hdl, WBM_IDLE_LINK);
  1126. }
  1127. /**
  1128. * hal_idle_list_num_scatter_bufs - Get the number of sctater buffer
  1129. * each given buffer size
  1130. *
  1131. * @hal_soc: Opaque HAL SOC handle
  1132. * @total_mem: size of memory to be scattered
  1133. * @scatter_buf_size: Size of scatter buffer
  1134. *
  1135. */
  1136. static inline
  1137. uint32_t hal_idle_list_num_scatter_bufs(hal_soc_handle_t hal_soc_hdl,
  1138. uint32_t total_mem,
  1139. uint32_t scatter_buf_size)
  1140. {
  1141. uint8_t rem = (total_mem % (scatter_buf_size -
  1142. WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) ? 1 : 0;
  1143. uint32_t num_scatter_bufs = (total_mem / (scatter_buf_size -
  1144. WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE)) + rem;
  1145. return num_scatter_bufs;
  1146. }
  1147. enum hal_pn_type {
  1148. HAL_PN_NONE,
  1149. HAL_PN_WPA,
  1150. HAL_PN_WAPI_EVEN,
  1151. HAL_PN_WAPI_UNEVEN,
  1152. };
  1153. #define HAL_RX_MAX_BA_WINDOW 256
  1154. /**
  1155. * hal_get_reo_qdesc_align - Get start address alignment for reo
  1156. * queue descriptors
  1157. *
  1158. * @hal_soc: Opaque HAL SOC handle
  1159. *
  1160. */
  1161. static inline
  1162. uint32_t hal_get_reo_qdesc_align(hal_soc_handle_t hal_soc_hdl)
  1163. {
  1164. return REO_QUEUE_DESC_ALIGN;
  1165. }
  1166. /**
  1167. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  1168. *
  1169. * @hal_soc: Opaque HAL SOC handle
  1170. * @ba_window_size: BlockAck window size
  1171. * @start_seq: Starting sequence number
  1172. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  1173. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  1174. * @pn_type: PN type (one of the types defined in 'enum hal_pn_type')
  1175. *
  1176. */
  1177. void hal_reo_qdesc_setup(hal_soc_handle_t hal_soc_hdl,
  1178. int tid, uint32_t ba_window_size,
  1179. uint32_t start_seq, void *hw_qdesc_vaddr,
  1180. qdf_dma_addr_t hw_qdesc_paddr,
  1181. int pn_type);
  1182. /**
  1183. * hal_srng_get_hp_addr - Get head pointer physical address
  1184. *
  1185. * @hal_soc: Opaque HAL SOC handle
  1186. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  1187. *
  1188. */
  1189. static inline qdf_dma_addr_t
  1190. hal_srng_get_hp_addr(void *hal_soc,
  1191. hal_ring_handle_t hal_ring_hdl)
  1192. {
  1193. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1194. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  1195. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  1196. return hal->shadow_wrptr_mem_paddr +
  1197. ((unsigned long)(srng->u.src_ring.hp_addr) -
  1198. (unsigned long)(hal->shadow_wrptr_mem_vaddr));
  1199. } else {
  1200. return hal->shadow_rdptr_mem_paddr +
  1201. ((unsigned long)(srng->u.dst_ring.hp_addr) -
  1202. (unsigned long)(hal->shadow_rdptr_mem_vaddr));
  1203. }
  1204. }
  1205. /**
  1206. * hal_srng_get_tp_addr - Get tail pointer physical address
  1207. *
  1208. * @hal_soc: Opaque HAL SOC handle
  1209. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  1210. *
  1211. */
  1212. static inline qdf_dma_addr_t
  1213. hal_srng_get_tp_addr(void *hal_soc, hal_ring_handle_t hal_ring_hdl)
  1214. {
  1215. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1216. struct hal_soc *hal = (struct hal_soc *)hal_soc;
  1217. if (srng->ring_dir == HAL_SRNG_SRC_RING) {
  1218. return hal->shadow_rdptr_mem_paddr +
  1219. ((unsigned long)(srng->u.src_ring.tp_addr) -
  1220. (unsigned long)(hal->shadow_rdptr_mem_vaddr));
  1221. } else {
  1222. return hal->shadow_wrptr_mem_paddr +
  1223. ((unsigned long)(srng->u.dst_ring.tp_addr) -
  1224. (unsigned long)(hal->shadow_wrptr_mem_vaddr));
  1225. }
  1226. }
  1227. /**
  1228. * hal_get_srng_params - Retrieve SRNG parameters for a given ring from HAL
  1229. *
  1230. * @hal_soc: Opaque HAL SOC handle
  1231. * @hal_ring_hdl: Ring pointer (Source or Destination ring)
  1232. * @ring_params: SRNG parameters will be returned through this structure
  1233. */
  1234. void hal_get_srng_params(hal_soc_handle_t hal_soc_hdl,
  1235. hal_ring_handle_t hal_ring_hdl,
  1236. struct hal_srng_params *ring_params);
  1237. /**
  1238. * hal_mem_info - Retrieve hal memory base address
  1239. *
  1240. * @hal_soc: Opaque HAL SOC handle
  1241. * @mem: pointer to structure to be updated with hal mem info
  1242. */
  1243. void hal_get_meminfo(hal_soc_handle_t hal_soc_hdl, struct hal_mem_info *mem);
  1244. /**
  1245. * hal_get_target_type - Return target type
  1246. *
  1247. * @hal_soc: Opaque HAL SOC handle
  1248. */
  1249. uint32_t hal_get_target_type(hal_soc_handle_t hal_soc_hdl);
  1250. /**
  1251. * hal_get_ba_aging_timeout - Retrieve BA aging timeout
  1252. *
  1253. * @hal_soc: Opaque HAL SOC handle
  1254. * @ac: Access category
  1255. * @value: timeout duration in millisec
  1256. */
  1257. void hal_get_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
  1258. uint32_t *value);
  1259. /**
  1260. * hal_set_aging_timeout - Set BA aging timeout
  1261. *
  1262. * @hal_soc: Opaque HAL SOC handle
  1263. * @ac: Access category in millisec
  1264. * @value: timeout duration value
  1265. */
  1266. void hal_set_ba_aging_timeout(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
  1267. uint32_t value);
  1268. /**
  1269. * hal_srng_dst_hw_init - Private function to initialize SRNG
  1270. * destination ring HW
  1271. * @hal_soc: HAL SOC handle
  1272. * @srng: SRNG ring pointer
  1273. */
  1274. static inline void hal_srng_dst_hw_init(struct hal_soc *hal,
  1275. struct hal_srng *srng)
  1276. {
  1277. hal->ops->hal_srng_dst_hw_init(hal, srng);
  1278. }
  1279. /**
  1280. * hal_srng_src_hw_init - Private function to initialize SRNG
  1281. * source ring HW
  1282. * @hal_soc: HAL SOC handle
  1283. * @srng: SRNG ring pointer
  1284. */
  1285. static inline void hal_srng_src_hw_init(struct hal_soc *hal,
  1286. struct hal_srng *srng)
  1287. {
  1288. hal->ops->hal_srng_src_hw_init(hal, srng);
  1289. }
  1290. /**
  1291. * hal_get_hw_hptp() - Get HW head and tail pointer value for any ring
  1292. * @hal_soc: Opaque HAL SOC handle
  1293. * @hal_ring_hdl: Source ring pointer
  1294. * @headp: Head Pointer
  1295. * @tailp: Tail Pointer
  1296. * @ring_type: Ring
  1297. *
  1298. * Return: Update tail pointer and head pointer in arguments.
  1299. */
  1300. static inline
  1301. void hal_get_hw_hptp(hal_soc_handle_t hal_soc_hdl,
  1302. hal_ring_handle_t hal_ring_hdl,
  1303. uint32_t *headp, uint32_t *tailp,
  1304. uint8_t ring_type)
  1305. {
  1306. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1307. hal_soc->ops->hal_get_hw_hptp(hal_soc, hal_ring_hdl,
  1308. headp, tailp, ring_type);
  1309. }
  1310. /**
  1311. * hal_reo_setup - Initialize HW REO block
  1312. *
  1313. * @hal_soc: Opaque HAL SOC handle
  1314. * @reo_params: parameters needed by HAL for REO config
  1315. */
  1316. static inline void hal_reo_setup(hal_soc_handle_t hal_soc_hdl,
  1317. void *reoparams)
  1318. {
  1319. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1320. hal_soc->ops->hal_reo_setup(hal_soc, reoparams);
  1321. }
  1322. /**
  1323. * hal_setup_link_idle_list - Setup scattered idle list using the
  1324. * buffer list provided
  1325. *
  1326. * @hal_soc: Opaque HAL SOC handle
  1327. * @scatter_bufs_base_paddr: Array of physical base addresses
  1328. * @scatter_bufs_base_vaddr: Array of virtual base addresses
  1329. * @num_scatter_bufs: Number of scatter buffers in the above lists
  1330. * @scatter_buf_size: Size of each scatter buffer
  1331. * @last_buf_end_offset: Offset to the last entry
  1332. * @num_entries: Total entries of all scatter bufs
  1333. *
  1334. */
  1335. static inline
  1336. void hal_setup_link_idle_list(hal_soc_handle_t hal_soc_hdl,
  1337. qdf_dma_addr_t scatter_bufs_base_paddr[],
  1338. void *scatter_bufs_base_vaddr[],
  1339. uint32_t num_scatter_bufs,
  1340. uint32_t scatter_buf_size,
  1341. uint32_t last_buf_end_offset,
  1342. uint32_t num_entries)
  1343. {
  1344. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1345. hal_soc->ops->hal_setup_link_idle_list(hal_soc, scatter_bufs_base_paddr,
  1346. scatter_bufs_base_vaddr, num_scatter_bufs,
  1347. scatter_buf_size, last_buf_end_offset,
  1348. num_entries);
  1349. }
  1350. /**
  1351. * hal_srng_dump_ring_desc() - Dump ring descriptor info
  1352. *
  1353. * @hal_soc: Opaque HAL SOC handle
  1354. * @hal_ring_hdl: Source ring pointer
  1355. * @ring_desc: Opaque ring descriptor handle
  1356. */
  1357. static inline void hal_srng_dump_ring_desc(hal_soc_handle_t hal_soc_hdl,
  1358. hal_ring_handle_t hal_ring_hdl,
  1359. hal_ring_desc_t ring_desc)
  1360. {
  1361. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1362. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
  1363. ring_desc, (srng->entry_size << 2));
  1364. }
  1365. /**
  1366. * hal_srng_dump_ring() - Dump last 128 descs of the ring
  1367. *
  1368. * @hal_soc: Opaque HAL SOC handle
  1369. * @hal_ring_hdl: Source ring pointer
  1370. */
  1371. static inline void hal_srng_dump_ring(hal_soc_handle_t hal_soc_hdl,
  1372. hal_ring_handle_t hal_ring_hdl)
  1373. {
  1374. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1375. uint32_t *desc;
  1376. uint32_t tp, i;
  1377. tp = srng->u.dst_ring.tp;
  1378. for (i = 0; i < 128; i++) {
  1379. if (!tp)
  1380. tp = srng->ring_size;
  1381. desc = &srng->ring_base_vaddr[tp - srng->entry_size];
  1382. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
  1383. QDF_TRACE_LEVEL_DEBUG,
  1384. desc, (srng->entry_size << 2));
  1385. tp -= srng->entry_size;
  1386. }
  1387. }
  1388. /*
  1389. * hal_rxdma_desc_to_hal_ring_desc - API to convert rxdma ring desc
  1390. * to opaque dp_ring desc type
  1391. * @ring_desc - rxdma ring desc
  1392. *
  1393. * Return: hal_rxdma_desc_t type
  1394. */
  1395. static inline
  1396. hal_ring_desc_t hal_rxdma_desc_to_hal_ring_desc(hal_rxdma_desc_t ring_desc)
  1397. {
  1398. return (hal_ring_desc_t)ring_desc;
  1399. }
  1400. /**
  1401. * hal_srng_set_event() - Set hal_srng event
  1402. * @hal_ring_hdl: Source ring pointer
  1403. * @event: SRNG ring event
  1404. *
  1405. * Return: None
  1406. */
  1407. static inline void hal_srng_set_event(hal_ring_handle_t hal_ring_hdl, int event)
  1408. {
  1409. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1410. qdf_atomic_set_bit(event, &srng->srng_event);
  1411. }
  1412. /**
  1413. * hal_srng_clear_event() - Clear hal_srng event
  1414. * @hal_ring_hdl: Source ring pointer
  1415. * @event: SRNG ring event
  1416. *
  1417. * Return: None
  1418. */
  1419. static inline
  1420. void hal_srng_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
  1421. {
  1422. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1423. qdf_atomic_clear_bit(event, &srng->srng_event);
  1424. }
  1425. /**
  1426. * hal_srng_get_clear_event() - Clear srng event and return old value
  1427. * @hal_ring_hdl: Source ring pointer
  1428. * @event: SRNG ring event
  1429. *
  1430. * Return: Return old event value
  1431. */
  1432. static inline
  1433. int hal_srng_get_clear_event(hal_ring_handle_t hal_ring_hdl, int event)
  1434. {
  1435. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1436. return qdf_atomic_test_and_clear_bit(event, &srng->srng_event);
  1437. }
  1438. /**
  1439. * hal_srng_set_flush_last_ts() - Record last flush time stamp
  1440. * @hal_ring_hdl: Source ring pointer
  1441. *
  1442. * Return: None
  1443. */
  1444. static inline void hal_srng_set_flush_last_ts(hal_ring_handle_t hal_ring_hdl)
  1445. {
  1446. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1447. srng->last_flush_ts = qdf_get_log_timestamp();
  1448. }
  1449. /**
  1450. * hal_srng_inc_flush_cnt() - Increment flush counter
  1451. * @hal_ring_hdl: Source ring pointer
  1452. *
  1453. * Return: None
  1454. */
  1455. static inline void hal_srng_inc_flush_cnt(hal_ring_handle_t hal_ring_hdl)
  1456. {
  1457. struct hal_srng *srng = (struct hal_srng *)hal_ring_hdl;
  1458. srng->flush_count++;
  1459. }
  1460. #endif /* _HAL_APIH_ */