hal_be_reo.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "qdf_module.h"
  20. #include "hal_hw_headers.h"
  21. #include "hal_be_hw_headers.h"
  22. #include "hal_reo.h"
  23. #include "hal_be_reo.h"
  24. #include "hal_be_api.h"
  25. uint32_t hal_get_reo_reg_base_offset_be(void)
  26. {
  27. return REO_REG_REG_BASE;
  28. }
  29. void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid,
  30. uint32_t ba_window_size,
  31. uint32_t start_seq, void *hw_qdesc_vaddr,
  32. qdf_dma_addr_t hw_qdesc_paddr,
  33. int pn_type, uint8_t vdev_stats_id)
  34. {
  35. uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
  36. uint32_t *reo_queue_ext_desc;
  37. uint32_t reg_val;
  38. uint32_t pn_enable;
  39. uint32_t pn_size = 0;
  40. qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
  41. hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
  42. HAL_REO_QUEUE_DESC);
  43. /* Fixed pattern in reserved bits for debugging */
  44. HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER,
  45. RESERVED_0A, 0xDDBEEF);
  46. /* This a just a SW meta data and will be copied to REO destination
  47. * descriptors indicated by hardware.
  48. * TODO: Setting TID in this field. See if we should set something else.
  49. */
  50. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  51. RECEIVE_QUEUE_NUMBER, tid);
  52. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  53. VLD, 1);
  54. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  55. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  56. HAL_RX_LINK_DESC_CNTR);
  57. /*
  58. * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
  59. */
  60. reg_val = TID_TO_WME_AC(tid);
  61. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val);
  62. if (ba_window_size < 1)
  63. ba_window_size = 1;
  64. /* WAR to get 2k exception in Non BA case.
  65. * Setting window size to 2 to get 2k jump exception
  66. * when we receive aggregates in Non BA case
  67. */
  68. ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
  69. /* Set RTY bit for non-BA case. Duplicate detection is currently not
  70. * done by HW in non-BA case if RTY bit is not set.
  71. * TODO: This is a temporary War and should be removed once HW fix is
  72. * made to check and discard duplicates even if RTY bit is not set.
  73. */
  74. if (ba_window_size == 1)
  75. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1);
  76. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE,
  77. ba_window_size - 1);
  78. switch (pn_type) {
  79. case HAL_PN_WPA:
  80. pn_enable = 1;
  81. pn_size = PN_SIZE_48;
  82. break;
  83. case HAL_PN_WAPI_EVEN:
  84. case HAL_PN_WAPI_UNEVEN:
  85. pn_enable = 1;
  86. pn_size = PN_SIZE_128;
  87. break;
  88. default:
  89. pn_enable = 0;
  90. break;
  91. }
  92. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED,
  93. pn_enable);
  94. if (pn_type == HAL_PN_WAPI_EVEN)
  95. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  96. PN_SHALL_BE_EVEN, 1);
  97. else if (pn_type == HAL_PN_WAPI_UNEVEN)
  98. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  99. PN_SHALL_BE_UNEVEN, 1);
  100. /*
  101. * TODO: Need to check if PN handling in SW needs to be enabled
  102. * So far this is not a requirement
  103. */
  104. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE,
  105. pn_size);
  106. /* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set
  107. * based on BA window size and/or AMPDU capabilities
  108. */
  109. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  110. IGNORE_AMPDU_FLAG, 1);
  111. if (start_seq <= 0xfff)
  112. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN,
  113. start_seq);
  114. /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
  115. * but REO is not delivering packets if we set it to 1. Need to enable
  116. * this once the issue is resolved
  117. */
  118. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0);
  119. hal_update_stats_counter_index(reo_queue_desc, vdev_stats_id);
  120. /* TODO: Check if we should set start PN for WAPI */
  121. /* TODO: HW queue descriptors are currently allocated for max BA
  122. * window size for all QOS TIDs so that same descriptor can be used
  123. * later when ADDBA request is received. This should be changed to
  124. * allocate HW queue descriptors based on BA window size being
  125. * negotiated (0 for non BA cases), and reallocate when BA window
  126. * size changes and also send WMI message to FW to change the REO
  127. * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
  128. */
  129. if (tid == HAL_NON_QOS_TID)
  130. return;
  131. reo_queue_ext_desc = (uint32_t *)
  132. (((struct rx_reo_queue *)reo_queue_desc) + 1);
  133. qdf_mem_zero(reo_queue_ext_desc, 3 *
  134. sizeof(struct rx_reo_queue_ext));
  135. /* Initialize first reo queue extension descriptor */
  136. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  137. HAL_DESC_REO_OWNED,
  138. HAL_REO_QUEUE_EXT_DESC);
  139. /* Fixed pattern in reserved bits for debugging */
  140. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  141. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  142. 0xADBEEF);
  143. /* Initialize second reo queue extension descriptor */
  144. reo_queue_ext_desc = (uint32_t *)
  145. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  146. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  147. HAL_DESC_REO_OWNED,
  148. HAL_REO_QUEUE_EXT_DESC);
  149. /* Fixed pattern in reserved bits for debugging */
  150. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  151. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  152. 0xBDBEEF);
  153. /* Initialize third reo queue extension descriptor */
  154. reo_queue_ext_desc = (uint32_t *)
  155. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  156. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  157. HAL_DESC_REO_OWNED,
  158. HAL_REO_QUEUE_EXT_DESC);
  159. /* Fixed pattern in reserved bits for debugging */
  160. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  161. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  162. 0xCDBEEF);
  163. }
  164. qdf_export_symbol(hal_reo_qdesc_setup_be);
  165. static void
  166. hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc,
  167. enum hal_reo_cmd_type type,
  168. uint32_t paddr_lo,
  169. uint8_t paddr_hi)
  170. {
  171. switch (type) {
  172. case CMD_GET_QUEUE_STATS:
  173. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
  174. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  175. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
  176. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  177. break;
  178. case CMD_FLUSH_QUEUE:
  179. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  180. FLUSH_DESC_ADDR_31_0, paddr_lo);
  181. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  182. FLUSH_DESC_ADDR_39_32, paddr_hi);
  183. break;
  184. case CMD_FLUSH_CACHE:
  185. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  186. FLUSH_ADDR_31_0, paddr_lo);
  187. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  188. FLUSH_ADDR_39_32, paddr_hi);
  189. break;
  190. case CMD_UPDATE_RX_REO_QUEUE:
  191. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  192. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  193. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  194. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  195. break;
  196. default:
  197. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  198. "%s: Invalid REO command type", __func__);
  199. break;
  200. }
  201. }
  202. static int
  203. hal_reo_cmd_queue_stats_be(hal_ring_handle_t hal_ring_hdl,
  204. hal_soc_handle_t hal_soc_hdl,
  205. struct hal_reo_cmd_params *cmd)
  206. {
  207. uint32_t *reo_desc, val;
  208. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  209. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  210. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  211. if (!reo_desc) {
  212. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  213. hal_warn_rl("Out of cmd ring entries");
  214. return -EBUSY;
  215. }
  216. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  217. sizeof(struct reo_get_queue_stats));
  218. /*
  219. * Offsets of descriptor fields defined in HW headers start from
  220. * the field after TLV header
  221. */
  222. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  223. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  224. sizeof(struct reo_get_queue_stats) -
  225. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  226. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  227. REO_STATUS_REQUIRED, cmd->std.need_status);
  228. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS,
  229. cmd->std.addr_lo,
  230. cmd->std.addr_hi);
  231. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, CLEAR_STATS,
  232. cmd->u.stats_params.clear);
  233. hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
  234. HIF_RTPM_ID_HAL_REO_CMD);
  235. val = reo_desc[CMD_HEADER_DW_OFFSET];
  236. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  237. val);
  238. }
  239. static int
  240. hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,
  241. hal_soc_handle_t hal_soc_hdl,
  242. struct hal_reo_cmd_params *cmd)
  243. {
  244. uint32_t *reo_desc, val;
  245. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  246. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  247. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  248. if (!reo_desc) {
  249. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  250. hal_warn_rl("Out of cmd ring entries");
  251. return -EBUSY;
  252. }
  253. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  254. sizeof(struct reo_flush_queue));
  255. /*
  256. * Offsets of descriptor fields defined in HW headers start from
  257. * the field after TLV header
  258. */
  259. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  260. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  261. sizeof(struct reo_flush_queue) -
  262. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  263. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  264. REO_STATUS_REQUIRED, cmd->std.need_status);
  265. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE,
  266. cmd->std.addr_lo, cmd->std.addr_hi);
  267. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  268. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  269. cmd->u.fl_queue_params.block_use_after_flush);
  270. if (cmd->u.fl_queue_params.block_use_after_flush) {
  271. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  272. BLOCK_RESOURCE_INDEX,
  273. cmd->u.fl_queue_params.index);
  274. }
  275. hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
  276. HIF_RTPM_ID_HAL_REO_CMD);
  277. val = reo_desc[CMD_HEADER_DW_OFFSET];
  278. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  279. val);
  280. }
  281. static int
  282. hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,
  283. hal_soc_handle_t hal_soc_hdl,
  284. struct hal_reo_cmd_params *cmd)
  285. {
  286. uint32_t *reo_desc, val;
  287. struct hal_reo_cmd_flush_cache_params *cp;
  288. uint8_t index = 0;
  289. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  290. cp = &cmd->u.fl_cache_params;
  291. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  292. /* We need a cache block resource for this operation, and REO HW has
  293. * only 4 such blocking resources. These resources are managed using
  294. * reo_res_bitmap, and we return failure if none is available.
  295. */
  296. if (cp->block_use_after_flush) {
  297. index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
  298. if (index > 3) {
  299. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  300. hal_warn_rl("No blocking resource available!");
  301. return -EBUSY;
  302. }
  303. hal_soc->index = index;
  304. }
  305. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  306. if (!reo_desc) {
  307. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  308. hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
  309. return -EBUSY;
  310. }
  311. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  312. sizeof(struct reo_flush_cache));
  313. /*
  314. * Offsets of descriptor fields defined in HW headers start from
  315. * the field after TLV header
  316. */
  317. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  318. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  319. sizeof(struct reo_flush_cache) -
  320. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  321. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  322. REO_STATUS_REQUIRED, cmd->std.need_status);
  323. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE,
  324. cmd->std.addr_lo, cmd->std.addr_hi);
  325. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  326. FORWARD_ALL_MPDUS_IN_QUEUE,
  327. cp->fwd_mpdus_in_queue);
  328. /* set it to 0 for now */
  329. cp->rel_block_index = 0;
  330. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  331. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  332. if (cp->block_use_after_flush) {
  333. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  334. CACHE_BLOCK_RESOURCE_INDEX, index);
  335. }
  336. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  337. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  338. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  339. FLUSH_QUEUE_1K_DESC, cp->flush_q_1k_desc);
  340. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  341. BLOCK_CACHE_USAGE_AFTER_FLUSH,
  342. cp->block_use_after_flush);
  343. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, FLUSH_ENTIRE_CACHE,
  344. cp->flush_entire_cache);
  345. hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
  346. HIF_RTPM_ID_HAL_REO_CMD);
  347. val = reo_desc[CMD_HEADER_DW_OFFSET];
  348. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  349. val);
  350. }
  351. static int
  352. hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,
  353. hal_soc_handle_t hal_soc_hdl,
  354. struct hal_reo_cmd_params *cmd)
  355. {
  356. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  357. uint32_t *reo_desc, val;
  358. uint8_t index = 0;
  359. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  360. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  361. index = hal_find_one_bit(hal_soc->reo_res_bitmap);
  362. if (index > 3) {
  363. hal_srng_access_end(hal_soc, hal_ring_hdl);
  364. qdf_print("No blocking resource to unblock!");
  365. return -EBUSY;
  366. }
  367. }
  368. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  369. if (!reo_desc) {
  370. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  371. hal_warn_rl("Out of cmd ring entries");
  372. return -EBUSY;
  373. }
  374. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  375. sizeof(struct reo_unblock_cache));
  376. /*
  377. * Offsets of descriptor fields defined in HW headers start from
  378. * the field after TLV header
  379. */
  380. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  381. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  382. sizeof(struct reo_unblock_cache) -
  383. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  384. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  385. REO_STATUS_REQUIRED, cmd->std.need_status);
  386. HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
  387. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  388. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  389. HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
  390. CACHE_BLOCK_RESOURCE_INDEX,
  391. cmd->u.unblk_cache_params.index);
  392. }
  393. hal_srng_access_end(hal_soc, hal_ring_hdl);
  394. val = reo_desc[CMD_HEADER_DW_OFFSET];
  395. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  396. val);
  397. }
  398. static int
  399. hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,
  400. hal_soc_handle_t hal_soc_hdl,
  401. struct hal_reo_cmd_params *cmd)
  402. {
  403. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  404. uint32_t *reo_desc, val;
  405. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  406. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  407. if (!reo_desc) {
  408. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  409. hal_warn_rl("Out of cmd ring entries");
  410. return -EBUSY;
  411. }
  412. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  413. sizeof(struct reo_flush_timeout_list));
  414. /*
  415. * Offsets of descriptor fields defined in HW headers start from
  416. * the field after TLV header
  417. */
  418. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  419. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  420. sizeof(struct reo_flush_timeout_list) -
  421. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  422. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  423. REO_STATUS_REQUIRED, cmd->std.need_status);
  424. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, AC_TIMOUT_LIST,
  425. cmd->u.fl_tim_list_params.ac_list);
  426. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
  427. MINIMUM_RELEASE_DESC_COUNT,
  428. cmd->u.fl_tim_list_params.min_rel_desc);
  429. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
  430. MINIMUM_FORWARD_BUF_COUNT,
  431. cmd->u.fl_tim_list_params.min_fwd_buf);
  432. hal_srng_access_end(hal_soc, hal_ring_hdl);
  433. val = reo_desc[CMD_HEADER_DW_OFFSET];
  434. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  435. val);
  436. }
  437. static int
  438. hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,
  439. hal_soc_handle_t hal_soc_hdl,
  440. struct hal_reo_cmd_params *cmd)
  441. {
  442. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  443. uint32_t *reo_desc, val;
  444. struct hal_reo_cmd_update_queue_params *p;
  445. p = &cmd->u.upd_queue_params;
  446. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  447. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  448. if (!reo_desc) {
  449. hal_srng_access_end_reap(hal_soc, hal_ring_hdl);
  450. hal_warn_rl("Out of cmd ring entries");
  451. return -EBUSY;
  452. }
  453. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  454. sizeof(struct reo_update_rx_reo_queue));
  455. /*
  456. * Offsets of descriptor fields defined in HW headers start from
  457. * the field after TLV header
  458. */
  459. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  460. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  461. sizeof(struct reo_update_rx_reo_queue) -
  462. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  463. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  464. REO_STATUS_REQUIRED, cmd->std.need_status);
  465. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  466. cmd->std.addr_lo, cmd->std.addr_hi);
  467. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  468. UPDATE_RECEIVE_QUEUE_NUMBER,
  469. p->update_rx_queue_num);
  470. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, UPDATE_VLD,
  471. p->update_vld);
  472. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  473. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  474. p->update_assoc_link_desc);
  475. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  476. UPDATE_DISABLE_DUPLICATE_DETECTION,
  477. p->update_disable_dup_detect);
  478. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  479. UPDATE_DISABLE_DUPLICATE_DETECTION,
  480. p->update_disable_dup_detect);
  481. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  482. UPDATE_SOFT_REORDER_ENABLE,
  483. p->update_soft_reorder_enab);
  484. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  485. UPDATE_AC, p->update_ac);
  486. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  487. UPDATE_BAR, p->update_bar);
  488. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  489. UPDATE_BAR, p->update_bar);
  490. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  491. UPDATE_RTY, p->update_rty);
  492. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  493. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  494. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  495. UPDATE_OOR_MODE, p->update_oor_mode);
  496. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  497. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  498. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  499. UPDATE_PN_CHECK_NEEDED,
  500. p->update_pn_check_needed);
  501. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  502. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  503. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  504. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  505. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  506. UPDATE_PN_HANDLING_ENABLE,
  507. p->update_pn_hand_enab);
  508. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  509. UPDATE_PN_SIZE, p->update_pn_size);
  510. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  511. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  512. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  513. UPDATE_SVLD, p->update_svld);
  514. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  515. UPDATE_SSN, p->update_ssn);
  516. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  517. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  518. p->update_seq_2k_err_detect);
  519. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  520. UPDATE_PN_VALID, p->update_pn_valid);
  521. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  522. UPDATE_PN, p->update_pn);
  523. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  524. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  525. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  526. VLD, p->vld);
  527. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  528. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  529. p->assoc_link_desc);
  530. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  531. DISABLE_DUPLICATE_DETECTION,
  532. p->disable_dup_detect);
  533. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  534. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  535. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, AC, p->ac);
  536. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  537. BAR, p->bar);
  538. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  539. CHK_2K_MODE, p->chk_2k_mode);
  540. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  541. RTY, p->rty);
  542. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  543. OOR_MODE, p->oor_mode);
  544. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  545. PN_CHECK_NEEDED, p->pn_check_needed);
  546. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  547. PN_SHALL_BE_EVEN, p->pn_even);
  548. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  549. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  550. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  551. PN_HANDLING_ENABLE, p->pn_hand_enab);
  552. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  553. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  554. if (p->ba_window_size < 1)
  555. p->ba_window_size = 1;
  556. /*
  557. * WAR to get 2k exception in Non BA case.
  558. * Setting window size to 2 to get 2k jump exception
  559. * when we receive aggregates in Non BA case
  560. */
  561. if (p->ba_window_size == 1)
  562. p->ba_window_size++;
  563. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  564. BA_WINDOW_SIZE, p->ba_window_size - 1);
  565. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  566. PN_SIZE, p->pn_size);
  567. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  568. SVLD, p->svld);
  569. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  570. SSN, p->ssn);
  571. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  572. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  573. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  574. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  575. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  576. PN_31_0, p->pn_31_0);
  577. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  578. PN_63_32, p->pn_63_32);
  579. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  580. PN_95_64, p->pn_95_64);
  581. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  582. PN_127_96, p->pn_127_96);
  583. hal_srng_access_end_v1(hal_soc_hdl, hal_ring_hdl,
  584. HIF_RTPM_ID_HAL_REO_CMD);
  585. val = reo_desc[CMD_HEADER_DW_OFFSET];
  586. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  587. val);
  588. }
  589. int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,
  590. hal_ring_handle_t hal_ring_hdl,
  591. enum hal_reo_cmd_type cmd,
  592. void *params)
  593. {
  594. struct hal_reo_cmd_params *cmd_params =
  595. (struct hal_reo_cmd_params *)params;
  596. int num = 0;
  597. switch (cmd) {
  598. case CMD_GET_QUEUE_STATS:
  599. num = hal_reo_cmd_queue_stats_be(hal_ring_hdl,
  600. hal_soc_hdl, cmd_params);
  601. break;
  602. case CMD_FLUSH_QUEUE:
  603. num = hal_reo_cmd_flush_queue_be(hal_ring_hdl,
  604. hal_soc_hdl, cmd_params);
  605. break;
  606. case CMD_FLUSH_CACHE:
  607. num = hal_reo_cmd_flush_cache_be(hal_ring_hdl,
  608. hal_soc_hdl, cmd_params);
  609. break;
  610. case CMD_UNBLOCK_CACHE:
  611. num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl,
  612. hal_soc_hdl, cmd_params);
  613. break;
  614. case CMD_FLUSH_TIMEOUT_LIST:
  615. num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl,
  616. hal_soc_hdl,
  617. cmd_params);
  618. break;
  619. case CMD_UPDATE_RX_REO_QUEUE:
  620. num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl,
  621. hal_soc_hdl, cmd_params);
  622. break;
  623. default:
  624. hal_err("Invalid REO command type: %d", cmd);
  625. return -EINVAL;
  626. };
  627. return num;
  628. }
  629. void
  630. hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,
  631. void *st_handle,
  632. hal_soc_handle_t hal_soc_hdl)
  633. {
  634. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  635. struct hal_reo_queue_status *st =
  636. (struct hal_reo_queue_status *)st_handle;
  637. uint64_t *reo_desc = (uint64_t *)ring_desc;
  638. uint64_t val;
  639. /*
  640. * Offsets of descriptor fields defined in HW headers start
  641. * from the field after TLV header
  642. */
  643. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  644. /* header */
  645. hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
  646. &(st->header), hal_soc);
  647. /* SSN */
  648. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)];
  649. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val);
  650. /* current index */
  651. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  652. CURRENT_INDEX)];
  653. st->curr_idx =
  654. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  655. CURRENT_INDEX, val);
  656. /* PN bits */
  657. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  658. PN_31_0)];
  659. st->pn_31_0 =
  660. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  661. PN_31_0, val);
  662. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  663. PN_63_32)];
  664. st->pn_63_32 =
  665. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  666. PN_63_32, val);
  667. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  668. PN_95_64)];
  669. st->pn_95_64 =
  670. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  671. PN_95_64, val);
  672. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  673. PN_127_96)];
  674. st->pn_127_96 =
  675. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  676. PN_127_96, val);
  677. /* timestamps */
  678. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  679. LAST_RX_ENQUEUE_TIMESTAMP)];
  680. st->last_rx_enq_tstamp =
  681. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  682. LAST_RX_ENQUEUE_TIMESTAMP, val);
  683. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  684. LAST_RX_DEQUEUE_TIMESTAMP)];
  685. st->last_rx_deq_tstamp =
  686. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  687. LAST_RX_DEQUEUE_TIMESTAMP, val);
  688. /* rx bitmap */
  689. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  690. RX_BITMAP_31_0)];
  691. st->rx_bitmap_31_0 =
  692. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  693. RX_BITMAP_31_0, val);
  694. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  695. RX_BITMAP_63_32)];
  696. st->rx_bitmap_63_32 =
  697. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  698. RX_BITMAP_63_32, val);
  699. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  700. RX_BITMAP_95_64)];
  701. st->rx_bitmap_95_64 =
  702. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  703. RX_BITMAP_95_64, val);
  704. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  705. RX_BITMAP_127_96)];
  706. st->rx_bitmap_127_96 =
  707. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  708. RX_BITMAP_127_96, val);
  709. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  710. RX_BITMAP_159_128)];
  711. st->rx_bitmap_159_128 =
  712. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  713. RX_BITMAP_159_128, val);
  714. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  715. RX_BITMAP_191_160)];
  716. st->rx_bitmap_191_160 =
  717. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  718. RX_BITMAP_191_160, val);
  719. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  720. RX_BITMAP_223_192)];
  721. st->rx_bitmap_223_192 =
  722. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  723. RX_BITMAP_223_192, val);
  724. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  725. RX_BITMAP_255_224)];
  726. st->rx_bitmap_255_224 =
  727. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  728. RX_BITMAP_255_224, val);
  729. /* various counts */
  730. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  731. CURRENT_MPDU_COUNT)];
  732. st->curr_mpdu_cnt =
  733. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  734. CURRENT_MPDU_COUNT, val);
  735. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  736. CURRENT_MSDU_COUNT)];
  737. st->curr_msdu_cnt =
  738. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  739. CURRENT_MSDU_COUNT, val);
  740. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  741. TIMEOUT_COUNT)];
  742. st->fwd_timeout_cnt =
  743. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  744. TIMEOUT_COUNT, val);
  745. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  746. FORWARD_DUE_TO_BAR_COUNT)];
  747. st->fwd_bar_cnt =
  748. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  749. FORWARD_DUE_TO_BAR_COUNT, val);
  750. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  751. DUPLICATE_COUNT)];
  752. st->dup_cnt =
  753. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  754. DUPLICATE_COUNT, val);
  755. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  756. FRAMES_IN_ORDER_COUNT)];
  757. st->frms_in_order_cnt =
  758. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  759. FRAMES_IN_ORDER_COUNT, val);
  760. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  761. BAR_RECEIVED_COUNT)];
  762. st->bar_rcvd_cnt =
  763. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  764. BAR_RECEIVED_COUNT, val);
  765. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  766. MPDU_FRAMES_PROCESSED_COUNT)];
  767. st->mpdu_frms_cnt =
  768. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  769. MPDU_FRAMES_PROCESSED_COUNT, val);
  770. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  771. MSDU_FRAMES_PROCESSED_COUNT)];
  772. st->msdu_frms_cnt =
  773. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  774. MSDU_FRAMES_PROCESSED_COUNT, val);
  775. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  776. TOTAL_PROCESSED_BYTE_COUNT)];
  777. st->total_cnt =
  778. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  779. TOTAL_PROCESSED_BYTE_COUNT, val);
  780. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  781. LATE_RECEIVE_MPDU_COUNT)];
  782. st->late_recv_mpdu_cnt =
  783. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  784. LATE_RECEIVE_MPDU_COUNT, val);
  785. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  786. WINDOW_JUMP_2K)];
  787. st->win_jump_2k =
  788. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  789. WINDOW_JUMP_2K, val);
  790. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  791. HOLE_COUNT)];
  792. st->hole_cnt =
  793. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  794. HOLE_COUNT, val);
  795. }
  796. void
  797. hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,
  798. void *st_handle,
  799. hal_soc_handle_t hal_soc_hdl)
  800. {
  801. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  802. struct hal_reo_flush_queue_status *st =
  803. (struct hal_reo_flush_queue_status *)st_handle;
  804. uint64_t *reo_desc = (uint64_t *)ring_desc;
  805. uint64_t val;
  806. /*
  807. * Offsets of descriptor fields defined in HW headers start
  808. * from the field after TLV header
  809. */
  810. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  811. /* header */
  812. hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
  813. &(st->header), hal_soc);
  814. /* error bit */
  815. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS,
  816. ERROR_DETECTED)];
  817. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
  818. val);
  819. }
  820. void
  821. hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,
  822. void *st_handle,
  823. hal_soc_handle_t hal_soc_hdl)
  824. {
  825. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  826. struct hal_reo_flush_cache_status *st =
  827. (struct hal_reo_flush_cache_status *)st_handle;
  828. uint64_t *reo_desc = (uint64_t *)ring_desc;
  829. uint64_t val;
  830. /*
  831. * Offsets of descriptor fields defined in HW headers start
  832. * from the field after TLV header
  833. */
  834. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  835. /* header */
  836. hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
  837. &(st->header), hal_soc);
  838. /* error bit */
  839. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  840. ERROR_DETECTED)];
  841. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
  842. val);
  843. /* block error */
  844. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  845. BLOCK_ERROR_DETAILS)];
  846. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  847. BLOCK_ERROR_DETAILS,
  848. val);
  849. if (!st->block_error)
  850. qdf_set_bit(hal_soc->index,
  851. (unsigned long *)&hal_soc->reo_res_bitmap);
  852. /* cache flush status */
  853. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  854. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  855. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  856. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  857. val);
  858. /* cache flush descriptor type */
  859. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  860. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  861. st->cache_flush_status_desc_type =
  862. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  863. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  864. val);
  865. /* cache flush count */
  866. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  867. CACHE_CONTROLLER_FLUSH_COUNT)];
  868. st->cache_flush_cnt =
  869. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  870. CACHE_CONTROLLER_FLUSH_COUNT,
  871. val);
  872. }
  873. void
  874. hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,
  875. hal_soc_handle_t hal_soc_hdl,
  876. void *st_handle)
  877. {
  878. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  879. struct hal_reo_unblk_cache_status *st =
  880. (struct hal_reo_unblk_cache_status *)st_handle;
  881. uint64_t *reo_desc = (uint64_t *)ring_desc;
  882. uint64_t val;
  883. /*
  884. * Offsets of descriptor fields defined in HW headers start
  885. * from the field after TLV header
  886. */
  887. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  888. /* header */
  889. hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
  890. &st->header, hal_soc);
  891. /* error bit */
  892. val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
  893. ERROR_DETECTED)];
  894. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
  895. ERROR_DETECTED,
  896. val);
  897. /* unblock type */
  898. val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
  899. UNBLOCK_TYPE)];
  900. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
  901. UNBLOCK_TYPE,
  902. val);
  903. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  904. qdf_clear_bit(hal_soc->index,
  905. (unsigned long *)&hal_soc->reo_res_bitmap);
  906. }
  907. void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,
  908. void *st_handle,
  909. hal_soc_handle_t hal_soc_hdl)
  910. {
  911. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  912. struct hal_reo_flush_timeout_list_status *st =
  913. (struct hal_reo_flush_timeout_list_status *)st_handle;
  914. uint64_t *reo_desc = (uint64_t *)ring_desc;
  915. uint64_t val;
  916. /*
  917. * Offsets of descriptor fields defined in HW headers start
  918. * from the field after TLV header
  919. */
  920. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  921. /* header */
  922. hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
  923. &(st->header), hal_soc);
  924. /* error bit */
  925. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  926. ERROR_DETECTED)];
  927. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  928. ERROR_DETECTED,
  929. val);
  930. /* list empty */
  931. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  932. TIMOUT_LIST_EMPTY)];
  933. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  934. TIMOUT_LIST_EMPTY,
  935. val);
  936. /* release descriptor count */
  937. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  938. RELEASE_DESC_COUNT)];
  939. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  940. RELEASE_DESC_COUNT,
  941. val);
  942. /* forward buf count */
  943. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  944. FORWARD_BUF_COUNT)];
  945. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  946. FORWARD_BUF_COUNT,
  947. val);
  948. }
  949. void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,
  950. void *st_handle,
  951. hal_soc_handle_t hal_soc_hdl)
  952. {
  953. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  954. struct hal_reo_desc_thres_reached_status *st =
  955. (struct hal_reo_desc_thres_reached_status *)st_handle;
  956. uint64_t *reo_desc = (uint64_t *)ring_desc;
  957. uint64_t val;
  958. /*
  959. * Offsets of descriptor fields defined in HW headers start
  960. * from the field after TLV header
  961. */
  962. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  963. /* header */
  964. hal_reo_status_get_header(ring_desc,
  965. HAL_REO_DESC_THRES_STATUS_TLV,
  966. &(st->header), hal_soc);
  967. /* threshold index */
  968. val = reo_desc[HAL_OFFSET_QW(
  969. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  970. THRESHOLD_INDEX)];
  971. st->thres_index = HAL_GET_FIELD(
  972. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  973. THRESHOLD_INDEX,
  974. val);
  975. /* link desc counters */
  976. val = reo_desc[HAL_OFFSET_QW(
  977. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  978. LINK_DESCRIPTOR_COUNTER0)];
  979. st->link_desc_counter0 = HAL_GET_FIELD(
  980. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  981. LINK_DESCRIPTOR_COUNTER0,
  982. val);
  983. val = reo_desc[HAL_OFFSET_QW(
  984. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  985. LINK_DESCRIPTOR_COUNTER1)];
  986. st->link_desc_counter1 = HAL_GET_FIELD(
  987. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  988. LINK_DESCRIPTOR_COUNTER1,
  989. val);
  990. val = reo_desc[HAL_OFFSET_QW(
  991. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  992. LINK_DESCRIPTOR_COUNTER2)];
  993. st->link_desc_counter2 = HAL_GET_FIELD(
  994. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  995. LINK_DESCRIPTOR_COUNTER2,
  996. val);
  997. val = reo_desc[HAL_OFFSET_QW(
  998. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  999. LINK_DESCRIPTOR_COUNTER_SUM)];
  1000. st->link_desc_counter_sum = HAL_GET_FIELD(
  1001. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1002. LINK_DESCRIPTOR_COUNTER_SUM,
  1003. val);
  1004. }
  1005. void
  1006. hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,
  1007. void *st_handle,
  1008. hal_soc_handle_t hal_soc_hdl)
  1009. {
  1010. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1011. struct hal_reo_update_rx_queue_status *st =
  1012. (struct hal_reo_update_rx_queue_status *)st_handle;
  1013. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1014. /*
  1015. * Offsets of descriptor fields defined in HW headers start
  1016. * from the field after TLV header
  1017. */
  1018. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1019. /* header */
  1020. hal_reo_status_get_header(ring_desc,
  1021. HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
  1022. &(st->header), hal_soc);
  1023. }
  1024. uint8_t hal_get_tlv_hdr_size_be(void)
  1025. {
  1026. return sizeof(struct tlv_32_hdr);
  1027. }