hal_be_reo.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "qdf_module.h"
  19. #include "hal_hw_headers.h"
  20. #include "hal_be_hw_headers.h"
  21. #include "hal_reo.h"
  22. #include "hal_be_reo.h"
  23. #include "hal_be_api.h"
  24. uint32_t hal_get_reo_reg_base_offset_be(void)
  25. {
  26. return REO_REG_REG_BASE;
  27. }
  28. /**
  29. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  30. *
  31. * @hal_soc: Opaque HAL SOC handle
  32. * @ba_window_size: BlockAck window size
  33. * @start_seq: Starting sequence number
  34. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  35. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  36. * @tid: TID
  37. *
  38. */
  39. void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid,
  40. uint32_t ba_window_size,
  41. uint32_t start_seq, void *hw_qdesc_vaddr,
  42. qdf_dma_addr_t hw_qdesc_paddr,
  43. int pn_type, uint8_t vdev_stats_id)
  44. {
  45. uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
  46. uint32_t *reo_queue_ext_desc;
  47. uint32_t reg_val;
  48. uint32_t pn_enable;
  49. uint32_t pn_size = 0;
  50. qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
  51. hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
  52. HAL_REO_QUEUE_DESC);
  53. /* Fixed pattern in reserved bits for debugging */
  54. HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER,
  55. RESERVED_0A, 0xDDBEEF);
  56. /* This a just a SW meta data and will be copied to REO destination
  57. * descriptors indicated by hardware.
  58. * TODO: Setting TID in this field. See if we should set something else.
  59. */
  60. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  61. RECEIVE_QUEUE_NUMBER, tid);
  62. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  63. VLD, 1);
  64. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  65. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  66. HAL_RX_LINK_DESC_CNTR);
  67. /*
  68. * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
  69. */
  70. reg_val = TID_TO_WME_AC(tid);
  71. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val);
  72. if (ba_window_size < 1)
  73. ba_window_size = 1;
  74. /* WAR to get 2k exception in Non BA case.
  75. * Setting window size to 2 to get 2k jump exception
  76. * when we receive aggregates in Non BA case
  77. */
  78. ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
  79. /* Set RTY bit for non-BA case. Duplicate detection is currently not
  80. * done by HW in non-BA case if RTY bit is not set.
  81. * TODO: This is a temporary War and should be removed once HW fix is
  82. * made to check and discard duplicates even if RTY bit is not set.
  83. */
  84. if (ba_window_size == 1)
  85. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1);
  86. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE,
  87. ba_window_size - 1);
  88. switch (pn_type) {
  89. case HAL_PN_WPA:
  90. pn_enable = 1;
  91. pn_size = PN_SIZE_48;
  92. break;
  93. case HAL_PN_WAPI_EVEN:
  94. case HAL_PN_WAPI_UNEVEN:
  95. pn_enable = 1;
  96. pn_size = PN_SIZE_128;
  97. break;
  98. default:
  99. pn_enable = 0;
  100. break;
  101. }
  102. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED,
  103. pn_enable);
  104. if (pn_type == HAL_PN_WAPI_EVEN)
  105. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  106. PN_SHALL_BE_EVEN, 1);
  107. else if (pn_type == HAL_PN_WAPI_UNEVEN)
  108. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  109. PN_SHALL_BE_UNEVEN, 1);
  110. /*
  111. * TODO: Need to check if PN handling in SW needs to be enabled
  112. * So far this is not a requirement
  113. */
  114. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE,
  115. pn_size);
  116. /* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set
  117. * based on BA window size and/or AMPDU capabilities
  118. */
  119. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  120. IGNORE_AMPDU_FLAG, 1);
  121. if (start_seq <= 0xfff)
  122. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN,
  123. start_seq);
  124. /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
  125. * but REO is not delivering packets if we set it to 1. Need to enable
  126. * this once the issue is resolved
  127. */
  128. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0);
  129. hal_update_stats_counter_index(reo_queue_desc, vdev_stats_id);
  130. /* TODO: Check if we should set start PN for WAPI */
  131. /* TODO: HW queue descriptors are currently allocated for max BA
  132. * window size for all QOS TIDs so that same descriptor can be used
  133. * later when ADDBA request is recevied. This should be changed to
  134. * allocate HW queue descriptors based on BA window size being
  135. * negotiated (0 for non BA cases), and reallocate when BA window
  136. * size changes and also send WMI message to FW to change the REO
  137. * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
  138. */
  139. if (tid == HAL_NON_QOS_TID)
  140. return;
  141. reo_queue_ext_desc = (uint32_t *)
  142. (((struct rx_reo_queue *)reo_queue_desc) + 1);
  143. qdf_mem_zero(reo_queue_ext_desc, 3 *
  144. sizeof(struct rx_reo_queue_ext));
  145. /* Initialize first reo queue extension descriptor */
  146. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  147. HAL_DESC_REO_OWNED,
  148. HAL_REO_QUEUE_EXT_DESC);
  149. /* Fixed pattern in reserved bits for debugging */
  150. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  151. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  152. 0xADBEEF);
  153. /* Initialize second reo queue extension descriptor */
  154. reo_queue_ext_desc = (uint32_t *)
  155. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  156. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  157. HAL_DESC_REO_OWNED,
  158. HAL_REO_QUEUE_EXT_DESC);
  159. /* Fixed pattern in reserved bits for debugging */
  160. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  161. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  162. 0xBDBEEF);
  163. /* Initialize third reo queue extension descriptor */
  164. reo_queue_ext_desc = (uint32_t *)
  165. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  166. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  167. HAL_DESC_REO_OWNED,
  168. HAL_REO_QUEUE_EXT_DESC);
  169. /* Fixed pattern in reserved bits for debugging */
  170. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  171. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  172. 0xCDBEEF);
  173. }
  174. qdf_export_symbol(hal_reo_qdesc_setup_be);
  175. /**
  176. * hal_get_ba_aging_timeout_be - Get BA Aging timeout
  177. *
  178. * @hal_soc: Opaque HAL SOC handle
  179. * @ac: Access category
  180. * @value: window size to get
  181. */
  182. void hal_get_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
  183. uint32_t *value)
  184. {
  185. struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
  186. switch (ac) {
  187. case WME_AC_BE:
  188. *value = HAL_REG_READ(soc,
  189. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  190. REO_REG_REG_BASE)) / 1000;
  191. break;
  192. case WME_AC_BK:
  193. *value = HAL_REG_READ(soc,
  194. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  195. REO_REG_REG_BASE)) / 1000;
  196. break;
  197. case WME_AC_VI:
  198. *value = HAL_REG_READ(soc,
  199. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  200. REO_REG_REG_BASE)) / 1000;
  201. break;
  202. case WME_AC_VO:
  203. *value = HAL_REG_READ(soc,
  204. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  205. REO_REG_REG_BASE)) / 1000;
  206. break;
  207. default:
  208. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  209. "Invalid AC: %d\n", ac);
  210. }
  211. }
  212. qdf_export_symbol(hal_get_ba_aging_timeout_be);
  213. /**
  214. * hal_set_ba_aging_timeout_be - Set BA Aging timeout
  215. *
  216. * @hal_soc: Opaque HAL SOC handle
  217. * @ac: Access category
  218. * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
  219. * @value: Input value to set
  220. */
  221. void hal_set_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
  222. uint32_t value)
  223. {
  224. struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
  225. switch (ac) {
  226. case WME_AC_BE:
  227. HAL_REG_WRITE(soc,
  228. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  229. REO_REG_REG_BASE),
  230. value * 1000);
  231. break;
  232. case WME_AC_BK:
  233. HAL_REG_WRITE(soc,
  234. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  235. REO_REG_REG_BASE),
  236. value * 1000);
  237. break;
  238. case WME_AC_VI:
  239. HAL_REG_WRITE(soc,
  240. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  241. REO_REG_REG_BASE),
  242. value * 1000);
  243. break;
  244. case WME_AC_VO:
  245. HAL_REG_WRITE(soc,
  246. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  247. REO_REG_REG_BASE),
  248. value * 1000);
  249. break;
  250. default:
  251. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  252. "Invalid AC: %d\n", ac);
  253. }
  254. }
  255. qdf_export_symbol(hal_set_ba_aging_timeout_be);
  256. static inline void
  257. hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc,
  258. enum hal_reo_cmd_type type,
  259. uint32_t paddr_lo,
  260. uint8_t paddr_hi)
  261. {
  262. switch (type) {
  263. case CMD_GET_QUEUE_STATS:
  264. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
  265. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  266. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
  267. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  268. break;
  269. case CMD_FLUSH_QUEUE:
  270. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  271. FLUSH_DESC_ADDR_31_0, paddr_lo);
  272. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  273. FLUSH_DESC_ADDR_39_32, paddr_hi);
  274. break;
  275. case CMD_FLUSH_CACHE:
  276. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  277. FLUSH_ADDR_31_0, paddr_lo);
  278. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  279. FLUSH_ADDR_39_32, paddr_hi);
  280. break;
  281. case CMD_UPDATE_RX_REO_QUEUE:
  282. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  283. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  284. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  285. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  286. break;
  287. default:
  288. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  289. "%s: Invalid REO command type", __func__);
  290. break;
  291. }
  292. }
  293. static inline int
  294. hal_reo_cmd_queue_stats_be(hal_ring_handle_t hal_ring_hdl,
  295. hal_soc_handle_t hal_soc_hdl,
  296. struct hal_reo_cmd_params *cmd)
  297. {
  298. uint32_t *reo_desc, val;
  299. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  300. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  301. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  302. if (!reo_desc) {
  303. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  304. "%s: Out of cmd ring entries", __func__);
  305. hal_srng_access_end(hal_soc, hal_ring_hdl);
  306. return -EBUSY;
  307. }
  308. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  309. sizeof(struct reo_get_queue_stats));
  310. /*
  311. * Offsets of descriptor fields defined in HW headers start from
  312. * the field after TLV header
  313. */
  314. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  315. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  316. sizeof(struct reo_get_queue_stats) -
  317. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  318. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  319. REO_STATUS_REQUIRED, cmd->std.need_status);
  320. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS,
  321. cmd->std.addr_lo,
  322. cmd->std.addr_hi);
  323. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, CLEAR_STATS,
  324. cmd->u.stats_params.clear);
  325. if (hif_pm_runtime_get(hal_soc->hif_handle,
  326. RTPM_ID_HAL_REO_CMD, true) == 0) {
  327. hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
  328. hif_pm_runtime_put(hal_soc->hif_handle,
  329. RTPM_ID_HAL_REO_CMD);
  330. } else {
  331. hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
  332. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  333. hal_srng_inc_flush_cnt(hal_ring_hdl);
  334. }
  335. val = reo_desc[CMD_HEADER_DW_OFFSET];
  336. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  337. val);
  338. }
  339. static inline int
  340. hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,
  341. hal_soc_handle_t hal_soc_hdl,
  342. struct hal_reo_cmd_params *cmd)
  343. {
  344. uint32_t *reo_desc, val;
  345. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  346. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  347. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  348. if (!reo_desc) {
  349. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  350. "%s: Out of cmd ring entries", __func__);
  351. hal_srng_access_end(hal_soc, hal_ring_hdl);
  352. return -EBUSY;
  353. }
  354. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  355. sizeof(struct reo_flush_queue));
  356. /*
  357. * Offsets of descriptor fields defined in HW headers start from
  358. * the field after TLV header
  359. */
  360. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  361. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  362. sizeof(struct reo_flush_queue) -
  363. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  364. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  365. REO_STATUS_REQUIRED, cmd->std.need_status);
  366. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE,
  367. cmd->std.addr_lo, cmd->std.addr_hi);
  368. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  369. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  370. cmd->u.fl_queue_params.block_use_after_flush);
  371. if (cmd->u.fl_queue_params.block_use_after_flush) {
  372. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  373. BLOCK_RESOURCE_INDEX,
  374. cmd->u.fl_queue_params.index);
  375. }
  376. hal_srng_access_end(hal_soc, hal_ring_hdl);
  377. val = reo_desc[CMD_HEADER_DW_OFFSET];
  378. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  379. val);
  380. }
  381. static inline int
  382. hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,
  383. hal_soc_handle_t hal_soc_hdl,
  384. struct hal_reo_cmd_params *cmd)
  385. {
  386. uint32_t *reo_desc, val;
  387. struct hal_reo_cmd_flush_cache_params *cp;
  388. uint8_t index = 0;
  389. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  390. cp = &cmd->u.fl_cache_params;
  391. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  392. /* We need a cache block resource for this operation, and REO HW has
  393. * only 4 such blocking resources. These resources are managed using
  394. * reo_res_bitmap, and we return failure if none is available.
  395. */
  396. if (cp->block_use_after_flush) {
  397. index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
  398. if (index > 3) {
  399. qdf_print("No blocking resource available!");
  400. hal_srng_access_end(hal_soc, hal_ring_hdl);
  401. return -EBUSY;
  402. }
  403. hal_soc->index = index;
  404. }
  405. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  406. if (!reo_desc) {
  407. hal_srng_access_end(hal_soc, hal_ring_hdl);
  408. hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
  409. return -EBUSY;
  410. }
  411. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  412. sizeof(struct reo_flush_cache));
  413. /*
  414. * Offsets of descriptor fields defined in HW headers start from
  415. * the field after TLV header
  416. */
  417. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  418. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  419. sizeof(struct reo_flush_cache) -
  420. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  421. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  422. REO_STATUS_REQUIRED, cmd->std.need_status);
  423. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE,
  424. cmd->std.addr_lo, cmd->std.addr_hi);
  425. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  426. FORWARD_ALL_MPDUS_IN_QUEUE,
  427. cp->fwd_mpdus_in_queue);
  428. /* set it to 0 for now */
  429. cp->rel_block_index = 0;
  430. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  431. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  432. if (cp->block_use_after_flush) {
  433. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  434. CACHE_BLOCK_RESOURCE_INDEX, index);
  435. }
  436. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  437. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  438. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  439. BLOCK_CACHE_USAGE_AFTER_FLUSH,
  440. cp->block_use_after_flush);
  441. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, FLUSH_ENTIRE_CACHE,
  442. cp->flush_entire_cache);
  443. if (hif_pm_runtime_get(hal_soc->hif_handle,
  444. RTPM_ID_HAL_REO_CMD, true) == 0) {
  445. hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
  446. hif_pm_runtime_put(hal_soc->hif_handle,
  447. RTPM_ID_HAL_REO_CMD);
  448. } else {
  449. hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
  450. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  451. hal_srng_inc_flush_cnt(hal_ring_hdl);
  452. }
  453. val = reo_desc[CMD_HEADER_DW_OFFSET];
  454. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  455. val);
  456. }
  457. static inline int
  458. hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,
  459. hal_soc_handle_t hal_soc_hdl,
  460. struct hal_reo_cmd_params *cmd)
  461. {
  462. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  463. uint32_t *reo_desc, val;
  464. uint8_t index = 0;
  465. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  466. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  467. index = hal_find_one_bit(hal_soc->reo_res_bitmap);
  468. if (index > 3) {
  469. hal_srng_access_end(hal_soc, hal_ring_hdl);
  470. qdf_print("No blocking resource to unblock!");
  471. return -EBUSY;
  472. }
  473. }
  474. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  475. if (!reo_desc) {
  476. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  477. "%s: Out of cmd ring entries", __func__);
  478. hal_srng_access_end(hal_soc, hal_ring_hdl);
  479. return -EBUSY;
  480. }
  481. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  482. sizeof(struct reo_unblock_cache));
  483. /*
  484. * Offsets of descriptor fields defined in HW headers start from
  485. * the field after TLV header
  486. */
  487. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  488. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  489. sizeof(struct reo_unblock_cache) -
  490. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  491. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  492. REO_STATUS_REQUIRED, cmd->std.need_status);
  493. HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
  494. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  495. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  496. HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
  497. CACHE_BLOCK_RESOURCE_INDEX,
  498. cmd->u.unblk_cache_params.index);
  499. }
  500. hal_srng_access_end(hal_soc, hal_ring_hdl);
  501. val = reo_desc[CMD_HEADER_DW_OFFSET];
  502. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  503. val);
  504. }
  505. static inline int
  506. hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,
  507. hal_soc_handle_t hal_soc_hdl,
  508. struct hal_reo_cmd_params *cmd)
  509. {
  510. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  511. uint32_t *reo_desc, val;
  512. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  513. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  514. if (!reo_desc) {
  515. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  516. "%s: Out of cmd ring entries", __func__);
  517. hal_srng_access_end(hal_soc, hal_ring_hdl);
  518. return -EBUSY;
  519. }
  520. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  521. sizeof(struct reo_flush_timeout_list));
  522. /*
  523. * Offsets of descriptor fields defined in HW headers start from
  524. * the field after TLV header
  525. */
  526. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  527. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  528. sizeof(struct reo_flush_timeout_list) -
  529. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  530. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  531. REO_STATUS_REQUIRED, cmd->std.need_status);
  532. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, AC_TIMOUT_LIST,
  533. cmd->u.fl_tim_list_params.ac_list);
  534. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
  535. MINIMUM_RELEASE_DESC_COUNT,
  536. cmd->u.fl_tim_list_params.min_rel_desc);
  537. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
  538. MINIMUM_FORWARD_BUF_COUNT,
  539. cmd->u.fl_tim_list_params.min_fwd_buf);
  540. hal_srng_access_end(hal_soc, hal_ring_hdl);
  541. val = reo_desc[CMD_HEADER_DW_OFFSET];
  542. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  543. val);
  544. }
  545. static inline int
  546. hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,
  547. hal_soc_handle_t hal_soc_hdl,
  548. struct hal_reo_cmd_params *cmd)
  549. {
  550. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  551. uint32_t *reo_desc, val;
  552. struct hal_reo_cmd_update_queue_params *p;
  553. p = &cmd->u.upd_queue_params;
  554. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  555. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  556. if (!reo_desc) {
  557. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  558. "%s: Out of cmd ring entries", __func__);
  559. hal_srng_access_end(hal_soc, hal_ring_hdl);
  560. return -EBUSY;
  561. }
  562. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  563. sizeof(struct reo_update_rx_reo_queue));
  564. /*
  565. * Offsets of descriptor fields defined in HW headers start from
  566. * the field after TLV header
  567. */
  568. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  569. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  570. sizeof(struct reo_update_rx_reo_queue) -
  571. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  572. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  573. REO_STATUS_REQUIRED, cmd->std.need_status);
  574. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  575. cmd->std.addr_lo, cmd->std.addr_hi);
  576. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  577. UPDATE_RECEIVE_QUEUE_NUMBER,
  578. p->update_rx_queue_num);
  579. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, UPDATE_VLD,
  580. p->update_vld);
  581. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  582. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  583. p->update_assoc_link_desc);
  584. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  585. UPDATE_DISABLE_DUPLICATE_DETECTION,
  586. p->update_disable_dup_detect);
  587. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  588. UPDATE_DISABLE_DUPLICATE_DETECTION,
  589. p->update_disable_dup_detect);
  590. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  591. UPDATE_SOFT_REORDER_ENABLE,
  592. p->update_soft_reorder_enab);
  593. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  594. UPDATE_AC, p->update_ac);
  595. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  596. UPDATE_BAR, p->update_bar);
  597. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  598. UPDATE_BAR, p->update_bar);
  599. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  600. UPDATE_RTY, p->update_rty);
  601. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  602. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  603. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  604. UPDATE_OOR_MODE, p->update_oor_mode);
  605. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  606. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  607. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  608. UPDATE_PN_CHECK_NEEDED,
  609. p->update_pn_check_needed);
  610. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  611. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  612. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  613. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  614. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  615. UPDATE_PN_HANDLING_ENABLE,
  616. p->update_pn_hand_enab);
  617. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  618. UPDATE_PN_SIZE, p->update_pn_size);
  619. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  620. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  621. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  622. UPDATE_SVLD, p->update_svld);
  623. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  624. UPDATE_SSN, p->update_ssn);
  625. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  626. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  627. p->update_seq_2k_err_detect);
  628. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  629. UPDATE_PN_VALID, p->update_pn_valid);
  630. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  631. UPDATE_PN, p->update_pn);
  632. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  633. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  634. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  635. VLD, p->vld);
  636. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  637. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  638. p->assoc_link_desc);
  639. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  640. DISABLE_DUPLICATE_DETECTION,
  641. p->disable_dup_detect);
  642. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  643. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  644. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, AC, p->ac);
  645. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  646. BAR, p->bar);
  647. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  648. CHK_2K_MODE, p->chk_2k_mode);
  649. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  650. RTY, p->rty);
  651. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  652. OOR_MODE, p->oor_mode);
  653. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  654. PN_CHECK_NEEDED, p->pn_check_needed);
  655. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  656. PN_SHALL_BE_EVEN, p->pn_even);
  657. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  658. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  659. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  660. PN_HANDLING_ENABLE, p->pn_hand_enab);
  661. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  662. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  663. if (p->ba_window_size < 1)
  664. p->ba_window_size = 1;
  665. /*
  666. * WAR to get 2k exception in Non BA case.
  667. * Setting window size to 2 to get 2k jump exception
  668. * when we receive aggregates in Non BA case
  669. */
  670. if (p->ba_window_size == 1)
  671. p->ba_window_size++;
  672. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  673. BA_WINDOW_SIZE, p->ba_window_size - 1);
  674. if (p->pn_size == 24)
  675. p->pn_size = PN_SIZE_24;
  676. else if (p->pn_size == 48)
  677. p->pn_size = PN_SIZE_48;
  678. else if (p->pn_size == 128)
  679. p->pn_size = PN_SIZE_128;
  680. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  681. PN_SIZE, p->pn_size);
  682. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  683. SVLD, p->svld);
  684. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  685. SSN, p->ssn);
  686. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  687. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  688. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  689. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  690. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  691. PN_31_0, p->pn_31_0);
  692. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  693. PN_63_32, p->pn_63_32);
  694. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  695. PN_95_64, p->pn_95_64);
  696. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  697. PN_127_96, p->pn_127_96);
  698. if (hif_pm_runtime_get(hal_soc->hif_handle,
  699. RTPM_ID_HAL_REO_CMD, false) == 0) {
  700. hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
  701. hif_pm_runtime_put(hal_soc->hif_handle,
  702. RTPM_ID_HAL_REO_CMD);
  703. } else {
  704. hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
  705. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  706. hal_srng_inc_flush_cnt(hal_ring_hdl);
  707. }
  708. val = reo_desc[CMD_HEADER_DW_OFFSET];
  709. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  710. val);
  711. }
  712. int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,
  713. hal_ring_handle_t hal_ring_hdl,
  714. enum hal_reo_cmd_type cmd,
  715. void *params)
  716. {
  717. struct hal_reo_cmd_params *cmd_params =
  718. (struct hal_reo_cmd_params *)params;
  719. int num = 0;
  720. switch (cmd) {
  721. case CMD_GET_QUEUE_STATS:
  722. num = hal_reo_cmd_queue_stats_be(hal_ring_hdl,
  723. hal_soc_hdl, cmd_params);
  724. break;
  725. case CMD_FLUSH_QUEUE:
  726. num = hal_reo_cmd_flush_queue_be(hal_ring_hdl,
  727. hal_soc_hdl, cmd_params);
  728. break;
  729. case CMD_FLUSH_CACHE:
  730. num = hal_reo_cmd_flush_cache_be(hal_ring_hdl,
  731. hal_soc_hdl, cmd_params);
  732. break;
  733. case CMD_UNBLOCK_CACHE:
  734. num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl,
  735. hal_soc_hdl, cmd_params);
  736. break;
  737. case CMD_FLUSH_TIMEOUT_LIST:
  738. num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl,
  739. hal_soc_hdl,
  740. cmd_params);
  741. break;
  742. case CMD_UPDATE_RX_REO_QUEUE:
  743. num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl,
  744. hal_soc_hdl, cmd_params);
  745. break;
  746. default:
  747. hal_err("Invalid REO command type: %d", cmd);
  748. return -EINVAL;
  749. };
  750. return num;
  751. }
  752. void
  753. hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,
  754. void *st_handle,
  755. hal_soc_handle_t hal_soc_hdl)
  756. {
  757. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  758. struct hal_reo_queue_status *st =
  759. (struct hal_reo_queue_status *)st_handle;
  760. uint64_t *reo_desc = (uint64_t *)ring_desc;
  761. uint64_t val;
  762. /*
  763. * Offsets of descriptor fields defined in HW headers start
  764. * from the field after TLV header
  765. */
  766. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  767. /* header */
  768. hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
  769. &(st->header), hal_soc);
  770. /* SSN */
  771. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)];
  772. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val);
  773. /* current index */
  774. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  775. CURRENT_INDEX)];
  776. st->curr_idx =
  777. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  778. CURRENT_INDEX, val);
  779. /* PN bits */
  780. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  781. PN_31_0)];
  782. st->pn_31_0 =
  783. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  784. PN_31_0, val);
  785. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  786. PN_63_32)];
  787. st->pn_63_32 =
  788. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  789. PN_63_32, val);
  790. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  791. PN_95_64)];
  792. st->pn_95_64 =
  793. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  794. PN_95_64, val);
  795. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  796. PN_127_96)];
  797. st->pn_127_96 =
  798. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  799. PN_127_96, val);
  800. /* timestamps */
  801. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  802. LAST_RX_ENQUEUE_TIMESTAMP)];
  803. st->last_rx_enq_tstamp =
  804. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  805. LAST_RX_ENQUEUE_TIMESTAMP, val);
  806. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  807. LAST_RX_DEQUEUE_TIMESTAMP)];
  808. st->last_rx_deq_tstamp =
  809. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  810. LAST_RX_DEQUEUE_TIMESTAMP, val);
  811. /* rx bitmap */
  812. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  813. RX_BITMAP_31_0)];
  814. st->rx_bitmap_31_0 =
  815. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  816. RX_BITMAP_31_0, val);
  817. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  818. RX_BITMAP_63_32)];
  819. st->rx_bitmap_63_32 =
  820. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  821. RX_BITMAP_63_32, val);
  822. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  823. RX_BITMAP_95_64)];
  824. st->rx_bitmap_95_64 =
  825. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  826. RX_BITMAP_95_64, val);
  827. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  828. RX_BITMAP_127_96)];
  829. st->rx_bitmap_127_96 =
  830. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  831. RX_BITMAP_127_96, val);
  832. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  833. RX_BITMAP_159_128)];
  834. st->rx_bitmap_159_128 =
  835. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  836. RX_BITMAP_159_128, val);
  837. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  838. RX_BITMAP_191_160)];
  839. st->rx_bitmap_191_160 =
  840. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  841. RX_BITMAP_191_160, val);
  842. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  843. RX_BITMAP_223_192)];
  844. st->rx_bitmap_223_192 =
  845. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  846. RX_BITMAP_223_192, val);
  847. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  848. RX_BITMAP_255_224)];
  849. st->rx_bitmap_255_224 =
  850. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  851. RX_BITMAP_255_224, val);
  852. /* various counts */
  853. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  854. CURRENT_MPDU_COUNT)];
  855. st->curr_mpdu_cnt =
  856. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  857. CURRENT_MPDU_COUNT, val);
  858. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  859. CURRENT_MSDU_COUNT)];
  860. st->curr_msdu_cnt =
  861. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  862. CURRENT_MSDU_COUNT, val);
  863. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  864. TIMEOUT_COUNT)];
  865. st->fwd_timeout_cnt =
  866. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  867. TIMEOUT_COUNT, val);
  868. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  869. FORWARD_DUE_TO_BAR_COUNT)];
  870. st->fwd_bar_cnt =
  871. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  872. FORWARD_DUE_TO_BAR_COUNT, val);
  873. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  874. DUPLICATE_COUNT)];
  875. st->dup_cnt =
  876. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  877. DUPLICATE_COUNT, val);
  878. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  879. FRAMES_IN_ORDER_COUNT)];
  880. st->frms_in_order_cnt =
  881. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  882. FRAMES_IN_ORDER_COUNT, val);
  883. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  884. BAR_RECEIVED_COUNT)];
  885. st->bar_rcvd_cnt =
  886. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  887. BAR_RECEIVED_COUNT, val);
  888. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  889. MPDU_FRAMES_PROCESSED_COUNT)];
  890. st->mpdu_frms_cnt =
  891. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  892. MPDU_FRAMES_PROCESSED_COUNT, val);
  893. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  894. MSDU_FRAMES_PROCESSED_COUNT)];
  895. st->msdu_frms_cnt =
  896. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  897. MSDU_FRAMES_PROCESSED_COUNT, val);
  898. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  899. TOTAL_PROCESSED_BYTE_COUNT)];
  900. st->total_cnt =
  901. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  902. TOTAL_PROCESSED_BYTE_COUNT, val);
  903. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  904. LATE_RECEIVE_MPDU_COUNT)];
  905. st->late_recv_mpdu_cnt =
  906. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  907. LATE_RECEIVE_MPDU_COUNT, val);
  908. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  909. WINDOW_JUMP_2K)];
  910. st->win_jump_2k =
  911. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  912. WINDOW_JUMP_2K, val);
  913. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  914. HOLE_COUNT)];
  915. st->hole_cnt =
  916. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  917. HOLE_COUNT, val);
  918. }
  919. void
  920. hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,
  921. void *st_handle,
  922. hal_soc_handle_t hal_soc_hdl)
  923. {
  924. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  925. struct hal_reo_flush_queue_status *st =
  926. (struct hal_reo_flush_queue_status *)st_handle;
  927. uint64_t *reo_desc = (uint64_t *)ring_desc;
  928. uint64_t val;
  929. /*
  930. * Offsets of descriptor fields defined in HW headers start
  931. * from the field after TLV header
  932. */
  933. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  934. /* header */
  935. hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
  936. &(st->header), hal_soc);
  937. /* error bit */
  938. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS,
  939. ERROR_DETECTED)];
  940. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
  941. val);
  942. }
  943. void
  944. hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,
  945. void *st_handle,
  946. hal_soc_handle_t hal_soc_hdl)
  947. {
  948. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  949. struct hal_reo_flush_cache_status *st =
  950. (struct hal_reo_flush_cache_status *)st_handle;
  951. uint64_t *reo_desc = (uint64_t *)ring_desc;
  952. uint64_t val;
  953. /*
  954. * Offsets of descriptor fields defined in HW headers start
  955. * from the field after TLV header
  956. */
  957. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  958. /* header */
  959. hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
  960. &(st->header), hal_soc);
  961. /* error bit */
  962. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  963. ERROR_DETECTED)];
  964. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
  965. val);
  966. /* block error */
  967. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  968. BLOCK_ERROR_DETAILS)];
  969. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  970. BLOCK_ERROR_DETAILS,
  971. val);
  972. if (!st->block_error)
  973. qdf_set_bit(hal_soc->index,
  974. (unsigned long *)&hal_soc->reo_res_bitmap);
  975. /* cache flush status */
  976. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  977. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  978. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  979. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  980. val);
  981. /* cache flush descriptor type */
  982. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  983. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  984. st->cache_flush_status_desc_type =
  985. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  986. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  987. val);
  988. /* cache flush count */
  989. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  990. CACHE_CONTROLLER_FLUSH_COUNT)];
  991. st->cache_flush_cnt =
  992. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  993. CACHE_CONTROLLER_FLUSH_COUNT,
  994. val);
  995. }
  996. void
  997. hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,
  998. hal_soc_handle_t hal_soc_hdl,
  999. void *st_handle)
  1000. {
  1001. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1002. struct hal_reo_unblk_cache_status *st =
  1003. (struct hal_reo_unblk_cache_status *)st_handle;
  1004. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1005. uint64_t val;
  1006. /*
  1007. * Offsets of descriptor fields defined in HW headers start
  1008. * from the field after TLV header
  1009. */
  1010. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1011. /* header */
  1012. hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
  1013. &st->header, hal_soc);
  1014. /* error bit */
  1015. val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
  1016. ERROR_DETECTED)];
  1017. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
  1018. ERROR_DETECTED,
  1019. val);
  1020. /* unblock type */
  1021. val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
  1022. UNBLOCK_TYPE)];
  1023. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
  1024. UNBLOCK_TYPE,
  1025. val);
  1026. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  1027. qdf_clear_bit(hal_soc->index,
  1028. (unsigned long *)&hal_soc->reo_res_bitmap);
  1029. }
  1030. void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,
  1031. void *st_handle,
  1032. hal_soc_handle_t hal_soc_hdl)
  1033. {
  1034. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1035. struct hal_reo_flush_timeout_list_status *st =
  1036. (struct hal_reo_flush_timeout_list_status *)st_handle;
  1037. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1038. uint64_t val;
  1039. /*
  1040. * Offsets of descriptor fields defined in HW headers start
  1041. * from the field after TLV header
  1042. */
  1043. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1044. /* header */
  1045. hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
  1046. &(st->header), hal_soc);
  1047. /* error bit */
  1048. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1049. ERROR_DETECTED)];
  1050. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1051. ERROR_DETECTED,
  1052. val);
  1053. /* list empty */
  1054. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1055. TIMOUT_LIST_EMPTY)];
  1056. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1057. TIMOUT_LIST_EMPTY,
  1058. val);
  1059. /* release descriptor count */
  1060. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1061. RELEASE_DESC_COUNT)];
  1062. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1063. RELEASE_DESC_COUNT,
  1064. val);
  1065. /* forward buf count */
  1066. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1067. FORWARD_BUF_COUNT)];
  1068. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1069. FORWARD_BUF_COUNT,
  1070. val);
  1071. }
  1072. void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,
  1073. void *st_handle,
  1074. hal_soc_handle_t hal_soc_hdl)
  1075. {
  1076. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1077. struct hal_reo_desc_thres_reached_status *st =
  1078. (struct hal_reo_desc_thres_reached_status *)st_handle;
  1079. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1080. uint64_t val;
  1081. /*
  1082. * Offsets of descriptor fields defined in HW headers start
  1083. * from the field after TLV header
  1084. */
  1085. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1086. /* header */
  1087. hal_reo_status_get_header(ring_desc,
  1088. HAL_REO_DESC_THRES_STATUS_TLV,
  1089. &(st->header), hal_soc);
  1090. /* threshold index */
  1091. val = reo_desc[HAL_OFFSET_QW(
  1092. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1093. THRESHOLD_INDEX)];
  1094. st->thres_index = HAL_GET_FIELD(
  1095. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1096. THRESHOLD_INDEX,
  1097. val);
  1098. /* link desc counters */
  1099. val = reo_desc[HAL_OFFSET_QW(
  1100. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1101. LINK_DESCRIPTOR_COUNTER0)];
  1102. st->link_desc_counter0 = HAL_GET_FIELD(
  1103. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1104. LINK_DESCRIPTOR_COUNTER0,
  1105. val);
  1106. val = reo_desc[HAL_OFFSET_QW(
  1107. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1108. LINK_DESCRIPTOR_COUNTER1)];
  1109. st->link_desc_counter1 = HAL_GET_FIELD(
  1110. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1111. LINK_DESCRIPTOR_COUNTER1,
  1112. val);
  1113. val = reo_desc[HAL_OFFSET_QW(
  1114. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1115. LINK_DESCRIPTOR_COUNTER2)];
  1116. st->link_desc_counter2 = HAL_GET_FIELD(
  1117. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1118. LINK_DESCRIPTOR_COUNTER2,
  1119. val);
  1120. val = reo_desc[HAL_OFFSET_QW(
  1121. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1122. LINK_DESCRIPTOR_COUNTER_SUM)];
  1123. st->link_desc_counter_sum = HAL_GET_FIELD(
  1124. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1125. LINK_DESCRIPTOR_COUNTER_SUM,
  1126. val);
  1127. }
  1128. void
  1129. hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,
  1130. void *st_handle,
  1131. hal_soc_handle_t hal_soc_hdl)
  1132. {
  1133. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1134. struct hal_reo_update_rx_queue_status *st =
  1135. (struct hal_reo_update_rx_queue_status *)st_handle;
  1136. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1137. /*
  1138. * Offsets of descriptor fields defined in HW headers start
  1139. * from the field after TLV header
  1140. */
  1141. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1142. /* header */
  1143. hal_reo_status_get_header(ring_desc,
  1144. HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
  1145. &(st->header), hal_soc);
  1146. }
  1147. uint8_t hal_get_tlv_hdr_size_be(void)
  1148. {
  1149. return sizeof(struct tlv_32_hdr);
  1150. }