hal_reo.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_reo.h"
  19. #include "hal_tx.h"
  20. #include "qdf_module.h"
  21. #define BLOCK_RES_MASK 0xF
  22. static inline uint8_t hal_find_one_bit(uint8_t x)
  23. {
  24. uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
  25. uint8_t pos;
  26. for (pos = 0; y; y >>= 1)
  27. pos++;
  28. return pos-1;
  29. }
  30. static inline uint8_t hal_find_zero_bit(uint8_t x)
  31. {
  32. uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
  33. uint8_t pos;
  34. for (pos = 0; y; y >>= 1)
  35. pos++;
  36. return pos-1;
  37. }
  38. inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
  39. enum hal_reo_cmd_type type,
  40. uint32_t paddr_lo,
  41. uint8_t paddr_hi)
  42. {
  43. switch (type) {
  44. case CMD_GET_QUEUE_STATS:
  45. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
  46. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  47. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
  48. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  49. break;
  50. case CMD_FLUSH_QUEUE:
  51. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
  52. FLUSH_DESC_ADDR_31_0, paddr_lo);
  53. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  54. FLUSH_DESC_ADDR_39_32, paddr_hi);
  55. break;
  56. case CMD_FLUSH_CACHE:
  57. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
  58. FLUSH_ADDR_31_0, paddr_lo);
  59. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  60. FLUSH_ADDR_39_32, paddr_hi);
  61. break;
  62. case CMD_UPDATE_RX_REO_QUEUE:
  63. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
  64. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  65. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  66. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  67. break;
  68. default:
  69. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  70. "%s: Invalid REO command type\n", __func__);
  71. break;
  72. }
  73. }
  74. inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
  75. struct hal_reo_cmd_params *cmd)
  76. {
  77. uint32_t *reo_desc, val;
  78. hal_srng_access_start(soc, reo_ring);
  79. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  80. if (!reo_desc) {
  81. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  82. "%s: Out of cmd ring entries\n", __func__);
  83. hal_srng_access_end(soc, reo_ring);
  84. return -EBUSY;
  85. }
  86. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  87. sizeof(struct reo_get_queue_stats));
  88. /* Offsets of descriptor fields defined in HW headers start from
  89. * the field after TLV header */
  90. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  91. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats));
  92. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  93. REO_STATUS_REQUIRED, cmd->std.need_status);
  94. hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
  95. cmd->std.addr_lo,
  96. cmd->std.addr_hi);
  97. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
  98. cmd->u.stats_params.clear);
  99. hal_srng_access_end(soc, reo_ring);
  100. val = reo_desc[CMD_HEADER_DW_OFFSET];
  101. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  102. val);
  103. }
  104. qdf_export_symbol(hal_reo_cmd_queue_stats);
  105. inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
  106. struct hal_reo_cmd_params *cmd)
  107. {
  108. uint32_t *reo_desc, val;
  109. hal_srng_access_start(soc, reo_ring);
  110. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  111. if (!reo_desc) {
  112. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  113. "%s: Out of cmd ring entries\n", __func__);
  114. hal_srng_access_end(soc, reo_ring);
  115. return -EBUSY;
  116. }
  117. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  118. sizeof(struct reo_flush_queue));
  119. /* Offsets of descriptor fields defined in HW headers start from
  120. * the field after TLV header */
  121. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  122. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue));
  123. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  124. REO_STATUS_REQUIRED, cmd->std.need_status);
  125. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
  126. cmd->std.addr_hi);
  127. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  128. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  129. cmd->u.fl_queue_params.block_use_after_flush);
  130. if (cmd->u.fl_queue_params.block_use_after_flush) {
  131. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  132. BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
  133. }
  134. hal_srng_access_end(soc, reo_ring);
  135. val = reo_desc[CMD_HEADER_DW_OFFSET];
  136. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  137. val);
  138. }
  139. qdf_export_symbol(hal_reo_cmd_flush_queue);
  140. inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
  141. struct hal_reo_cmd_params *cmd)
  142. {
  143. uint32_t *reo_desc, val;
  144. struct hal_reo_cmd_flush_cache_params *cp;
  145. uint8_t index = 0;
  146. cp = &cmd->u.fl_cache_params;
  147. hal_srng_access_start(soc, reo_ring);
  148. /* We need a cache block resource for this operation, and REO HW has
  149. * only 4 such blocking resources. These resources are managed using
  150. * reo_res_bitmap, and we return failure if none is available.
  151. */
  152. if (cp->block_use_after_flush) {
  153. index = hal_find_zero_bit(soc->reo_res_bitmap);
  154. if (index > 3) {
  155. qdf_print("%s, No blocking resource available!\n", __func__);
  156. hal_srng_access_end(soc, reo_ring);
  157. return -EBUSY;
  158. }
  159. soc->index = index;
  160. }
  161. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  162. if (!reo_desc) {
  163. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  164. "%s: Out of cmd ring entries\n", __func__);
  165. hal_srng_access_end(soc, reo_ring);
  166. hal_srng_dump(reo_ring);
  167. return -EBUSY;
  168. }
  169. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  170. sizeof(struct reo_flush_cache));
  171. /* Offsets of descriptor fields defined in HW headers start from
  172. * the field after TLV header */
  173. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  174. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache));
  175. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  176. REO_STATUS_REQUIRED, cmd->std.need_status);
  177. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
  178. cmd->std.addr_hi);
  179. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  180. FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
  181. /* set it to 0 for now */
  182. cp->rel_block_index = 0;
  183. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  184. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  185. if (cp->block_use_after_flush) {
  186. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  187. CACHE_BLOCK_RESOURCE_INDEX, index);
  188. }
  189. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  190. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  191. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  192. BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
  193. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
  194. cp->flush_all);
  195. hal_srng_access_end(soc, reo_ring);
  196. val = reo_desc[CMD_HEADER_DW_OFFSET];
  197. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  198. val);
  199. }
  200. qdf_export_symbol(hal_reo_cmd_flush_cache);
  201. inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
  202. struct hal_reo_cmd_params *cmd)
  203. {
  204. uint32_t *reo_desc, val;
  205. uint8_t index = 0;
  206. hal_srng_access_start(soc, reo_ring);
  207. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  208. index = hal_find_one_bit(soc->reo_res_bitmap);
  209. if (index > 3) {
  210. hal_srng_access_end(soc, reo_ring);
  211. qdf_print("%s: No blocking resource to unblock!\n",
  212. __func__);
  213. return -EBUSY;
  214. }
  215. }
  216. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  217. if (!reo_desc) {
  218. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  219. "%s: Out of cmd ring entries\n", __func__);
  220. hal_srng_access_end(soc, reo_ring);
  221. return -EBUSY;
  222. }
  223. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  224. sizeof(struct reo_unblock_cache));
  225. /* Offsets of descriptor fields defined in HW headers start from
  226. * the field after TLV header */
  227. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  228. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache));
  229. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  230. REO_STATUS_REQUIRED, cmd->std.need_status);
  231. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  232. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  233. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  234. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  235. CACHE_BLOCK_RESOURCE_INDEX,
  236. cmd->u.unblk_cache_params.index);
  237. }
  238. hal_srng_access_end(soc, reo_ring);
  239. val = reo_desc[CMD_HEADER_DW_OFFSET];
  240. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  241. val);
  242. }
  243. qdf_export_symbol(hal_reo_cmd_unblock_cache);
  244. inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
  245. struct hal_reo_cmd_params *cmd)
  246. {
  247. uint32_t *reo_desc, val;
  248. hal_srng_access_start(soc, reo_ring);
  249. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  250. if (!reo_desc) {
  251. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  252. "%s: Out of cmd ring entries\n", __func__);
  253. hal_srng_access_end(soc, reo_ring);
  254. return -EBUSY;
  255. }
  256. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  257. sizeof(struct reo_flush_timeout_list));
  258. /* Offsets of descriptor fields defined in HW headers start from
  259. * the field after TLV header */
  260. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  261. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list));
  262. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  263. REO_STATUS_REQUIRED, cmd->std.need_status);
  264. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
  265. cmd->u.fl_tim_list_params.ac_list);
  266. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  267. MINIMUM_RELEASE_DESC_COUNT,
  268. cmd->u.fl_tim_list_params.min_rel_desc);
  269. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  270. MINIMUM_FORWARD_BUF_COUNT,
  271. cmd->u.fl_tim_list_params.min_fwd_buf);
  272. hal_srng_access_end(soc, reo_ring);
  273. val = reo_desc[CMD_HEADER_DW_OFFSET];
  274. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  275. val);
  276. }
  277. qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
  278. inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
  279. struct hal_reo_cmd_params *cmd)
  280. {
  281. uint32_t *reo_desc, val;
  282. struct hal_reo_cmd_update_queue_params *p;
  283. p = &cmd->u.upd_queue_params;
  284. hal_srng_access_start(soc, reo_ring);
  285. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  286. if (!reo_desc) {
  287. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  288. "%s: Out of cmd ring entries\n", __func__);
  289. hal_srng_access_end(soc, reo_ring);
  290. return -EBUSY;
  291. }
  292. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  293. sizeof(struct reo_update_rx_reo_queue));
  294. /* Offsets of descriptor fields defined in HW headers start from
  295. * the field after TLV header */
  296. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  297. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue));
  298. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  299. REO_STATUS_REQUIRED, cmd->std.need_status);
  300. hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  301. cmd->std.addr_lo, cmd->std.addr_hi);
  302. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  303. UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
  304. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
  305. p->update_vld);
  306. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  307. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  308. p->update_assoc_link_desc);
  309. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  310. UPDATE_DISABLE_DUPLICATE_DETECTION,
  311. p->update_disable_dup_detect);
  312. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  313. UPDATE_DISABLE_DUPLICATE_DETECTION,
  314. p->update_disable_dup_detect);
  315. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  316. UPDATE_SOFT_REORDER_ENABLE,
  317. p->update_soft_reorder_enab);
  318. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  319. UPDATE_AC, p->update_ac);
  320. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  321. UPDATE_BAR, p->update_bar);
  322. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  323. UPDATE_BAR, p->update_bar);
  324. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  325. UPDATE_RTY, p->update_rty);
  326. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  327. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  328. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  329. UPDATE_OOR_MODE, p->update_oor_mode);
  330. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  331. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  332. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  333. UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
  334. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  335. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  336. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  337. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  338. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  339. UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
  340. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  341. UPDATE_PN_SIZE, p->update_pn_size);
  342. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  343. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  344. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  345. UPDATE_SVLD, p->update_svld);
  346. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  347. UPDATE_SSN, p->update_ssn);
  348. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  349. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  350. p->update_seq_2k_err_detect);
  351. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  352. UPDATE_PN_VALID, p->update_pn_valid);
  353. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  354. UPDATE_PN, p->update_pn);
  355. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  356. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  357. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  358. VLD, p->vld);
  359. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  360. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  361. p->assoc_link_desc);
  362. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  363. DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
  364. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  365. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  366. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
  367. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  368. BAR, p->bar);
  369. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  370. CHK_2K_MODE, p->chk_2k_mode);
  371. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  372. RTY, p->rty);
  373. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  374. OOR_MODE, p->oor_mode);
  375. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  376. PN_CHECK_NEEDED, p->pn_check_needed);
  377. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  378. PN_SHALL_BE_EVEN, p->pn_even);
  379. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  380. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  381. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  382. PN_HANDLING_ENABLE, p->pn_hand_enab);
  383. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  384. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  385. if (p->ba_window_size < 1)
  386. p->ba_window_size = 1;
  387. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  388. BA_WINDOW_SIZE, p->ba_window_size - 1);
  389. if (p->pn_size == 24)
  390. p->pn_size = PN_SIZE_24;
  391. else if (p->pn_size == 48)
  392. p->pn_size = PN_SIZE_48;
  393. else if (p->pn_size == 128)
  394. p->pn_size = PN_SIZE_128;
  395. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  396. PN_SIZE, p->pn_size);
  397. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  398. SVLD, p->svld);
  399. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  400. SSN, p->ssn);
  401. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  402. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  403. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  404. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  405. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
  406. PN_31_0, p->pn_31_0);
  407. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
  408. PN_63_32, p->pn_63_32);
  409. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
  410. PN_95_64, p->pn_95_64);
  411. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
  412. PN_127_96, p->pn_127_96);
  413. hal_srng_access_end(soc, reo_ring);
  414. val = reo_desc[CMD_HEADER_DW_OFFSET];
  415. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  416. val);
  417. }
  418. qdf_export_symbol(hal_reo_cmd_update_rx_queue);
  419. inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
  420. struct hal_reo_queue_status *st)
  421. {
  422. uint32_t val;
  423. /* Offsets of descriptor fields defined in HW headers start
  424. * from the field after TLV header */
  425. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  426. /* header */
  427. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
  428. /* SSN */
  429. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
  430. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
  431. /* current index */
  432. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
  433. CURRENT_INDEX)];
  434. st->curr_idx =
  435. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
  436. CURRENT_INDEX, val);
  437. /* PN bits */
  438. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
  439. PN_31_0)];
  440. st->pn_31_0 =
  441. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
  442. PN_31_0, val);
  443. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
  444. PN_63_32)];
  445. st->pn_63_32 =
  446. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
  447. PN_63_32, val);
  448. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
  449. PN_95_64)];
  450. st->pn_95_64 =
  451. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
  452. PN_95_64, val);
  453. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
  454. PN_127_96)];
  455. st->pn_127_96 =
  456. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
  457. PN_127_96, val);
  458. /* timestamps */
  459. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
  460. LAST_RX_ENQUEUE_TIMESTAMP)];
  461. st->last_rx_enq_tstamp =
  462. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
  463. LAST_RX_ENQUEUE_TIMESTAMP, val);
  464. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
  465. LAST_RX_DEQUEUE_TIMESTAMP)];
  466. st->last_rx_deq_tstamp =
  467. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
  468. LAST_RX_DEQUEUE_TIMESTAMP, val);
  469. /* rx bitmap */
  470. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
  471. RX_BITMAP_31_0)];
  472. st->rx_bitmap_31_0 =
  473. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
  474. RX_BITMAP_31_0, val);
  475. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
  476. RX_BITMAP_63_32)];
  477. st->rx_bitmap_63_32 =
  478. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
  479. RX_BITMAP_63_32, val);
  480. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
  481. RX_BITMAP_95_64)];
  482. st->rx_bitmap_95_64 =
  483. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
  484. RX_BITMAP_95_64, val);
  485. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
  486. RX_BITMAP_127_96)];
  487. st->rx_bitmap_127_96 =
  488. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
  489. RX_BITMAP_127_96, val);
  490. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
  491. RX_BITMAP_159_128)];
  492. st->rx_bitmap_159_128 =
  493. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
  494. RX_BITMAP_159_128, val);
  495. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
  496. RX_BITMAP_191_160)];
  497. st->rx_bitmap_191_160 =
  498. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
  499. RX_BITMAP_191_160, val);
  500. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
  501. RX_BITMAP_223_192)];
  502. st->rx_bitmap_223_192 =
  503. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
  504. RX_BITMAP_223_192, val);
  505. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
  506. RX_BITMAP_255_224)];
  507. st->rx_bitmap_255_224 =
  508. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
  509. RX_BITMAP_255_224, val);
  510. /* various counts */
  511. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  512. CURRENT_MPDU_COUNT)];
  513. st->curr_mpdu_cnt =
  514. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  515. CURRENT_MPDU_COUNT, val);
  516. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  517. CURRENT_MSDU_COUNT)];
  518. st->curr_msdu_cnt =
  519. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  520. CURRENT_MSDU_COUNT, val);
  521. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  522. TIMEOUT_COUNT)];
  523. st->fwd_timeout_cnt =
  524. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  525. TIMEOUT_COUNT, val);
  526. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  527. FORWARD_DUE_TO_BAR_COUNT)];
  528. st->fwd_bar_cnt =
  529. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  530. FORWARD_DUE_TO_BAR_COUNT, val);
  531. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  532. DUPLICATE_COUNT)];
  533. st->dup_cnt =
  534. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  535. DUPLICATE_COUNT, val);
  536. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  537. FRAMES_IN_ORDER_COUNT)];
  538. st->frms_in_order_cnt =
  539. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  540. FRAMES_IN_ORDER_COUNT, val);
  541. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  542. BAR_RECEIVED_COUNT)];
  543. st->bar_rcvd_cnt =
  544. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  545. BAR_RECEIVED_COUNT, val);
  546. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
  547. MPDU_FRAMES_PROCESSED_COUNT)];
  548. st->mpdu_frms_cnt =
  549. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
  550. MPDU_FRAMES_PROCESSED_COUNT, val);
  551. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
  552. MSDU_FRAMES_PROCESSED_COUNT)];
  553. st->msdu_frms_cnt =
  554. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
  555. MSDU_FRAMES_PROCESSED_COUNT, val);
  556. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
  557. TOTAL_PROCESSED_BYTE_COUNT)];
  558. st->total_cnt =
  559. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
  560. TOTAL_PROCESSED_BYTE_COUNT, val);
  561. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  562. LATE_RECEIVE_MPDU_COUNT)];
  563. st->late_recv_mpdu_cnt =
  564. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  565. LATE_RECEIVE_MPDU_COUNT, val);
  566. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  567. WINDOW_JUMP_2K)];
  568. st->win_jump_2k =
  569. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  570. WINDOW_JUMP_2K, val);
  571. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  572. HOLE_COUNT)];
  573. st->hole_cnt =
  574. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  575. HOLE_COUNT, val);
  576. }
  577. qdf_export_symbol(hal_reo_queue_stats_status);
  578. inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
  579. struct hal_reo_flush_queue_status *st)
  580. {
  581. uint32_t val;
  582. /* Offsets of descriptor fields defined in HW headers start
  583. * from the field after TLV header */
  584. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  585. /* header */
  586. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
  587. /* error bit */
  588. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
  589. ERROR_DETECTED)];
  590. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  591. val);
  592. }
  593. qdf_export_symbol(hal_reo_flush_queue_status);
  594. inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
  595. struct hal_reo_flush_cache_status *st)
  596. {
  597. uint32_t val;
  598. /* Offsets of descriptor fields defined in HW headers start
  599. * from the field after TLV header */
  600. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  601. /* header */
  602. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
  603. /* error bit */
  604. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  605. ERROR_DETECTED)];
  606. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  607. val);
  608. /* block error */
  609. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  610. BLOCK_ERROR_DETAILS)];
  611. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  612. BLOCK_ERROR_DETAILS,
  613. val);
  614. if (!st->block_error)
  615. qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
  616. /* cache flush status */
  617. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  618. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  619. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  620. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  621. val);
  622. /* cache flush descriptor type */
  623. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  624. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  625. st->cache_flush_status_desc_type =
  626. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  627. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  628. val);
  629. /* cache flush count */
  630. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  631. CACHE_CONTROLLER_FLUSH_COUNT)];
  632. st->cache_flush_cnt =
  633. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  634. CACHE_CONTROLLER_FLUSH_COUNT,
  635. val);
  636. }
  637. qdf_export_symbol(hal_reo_flush_cache_status);
  638. inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
  639. struct hal_soc *soc,
  640. struct hal_reo_unblk_cache_status *st)
  641. {
  642. uint32_t val;
  643. /* Offsets of descriptor fields defined in HW headers start
  644. * from the field after TLV header */
  645. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  646. /* header */
  647. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
  648. /* error bit */
  649. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  650. ERROR_DETECTED)];
  651. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  652. ERROR_DETECTED,
  653. val);
  654. /* unblock type */
  655. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  656. UNBLOCK_TYPE)];
  657. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  658. UNBLOCK_TYPE,
  659. val);
  660. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  661. qdf_clear_bit(soc->index,
  662. (unsigned long *)&soc->reo_res_bitmap);
  663. }
  664. qdf_export_symbol(hal_reo_unblock_cache_status);
  665. inline void hal_reo_flush_timeout_list_status(
  666. uint32_t *reo_desc,
  667. struct hal_reo_flush_timeout_list_status *st)
  668. {
  669. uint32_t val;
  670. /* Offsets of descriptor fields defined in HW headers start
  671. * from the field after TLV header */
  672. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  673. /* header */
  674. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
  675. /* error bit */
  676. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  677. ERROR_DETECTED)];
  678. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  679. ERROR_DETECTED,
  680. val);
  681. /* list empty */
  682. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  683. TIMOUT_LIST_EMPTY)];
  684. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  685. TIMOUT_LIST_EMPTY,
  686. val);
  687. /* release descriptor count */
  688. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  689. RELEASE_DESC_COUNT)];
  690. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  691. RELEASE_DESC_COUNT,
  692. val);
  693. /* forward buf count */
  694. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  695. FORWARD_BUF_COUNT)];
  696. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  697. FORWARD_BUF_COUNT,
  698. val);
  699. }
  700. qdf_export_symbol(hal_reo_flush_timeout_list_status);
  701. inline void hal_reo_desc_thres_reached_status(
  702. uint32_t *reo_desc,
  703. struct hal_reo_desc_thres_reached_status *st)
  704. {
  705. uint32_t val;
  706. /* Offsets of descriptor fields defined in HW headers start
  707. * from the field after TLV header */
  708. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  709. /* header */
  710. HAL_REO_STATUS_GET_HEADER(reo_desc,
  711. REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
  712. /* threshold index */
  713. val = reo_desc[HAL_OFFSET_DW(
  714. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  715. THRESHOLD_INDEX)];
  716. st->thres_index = HAL_GET_FIELD(
  717. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  718. THRESHOLD_INDEX,
  719. val);
  720. /* link desc counters */
  721. val = reo_desc[HAL_OFFSET_DW(
  722. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  723. LINK_DESCRIPTOR_COUNTER0)];
  724. st->link_desc_counter0 = HAL_GET_FIELD(
  725. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  726. LINK_DESCRIPTOR_COUNTER0,
  727. val);
  728. val = reo_desc[HAL_OFFSET_DW(
  729. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  730. LINK_DESCRIPTOR_COUNTER1)];
  731. st->link_desc_counter1 = HAL_GET_FIELD(
  732. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  733. LINK_DESCRIPTOR_COUNTER1,
  734. val);
  735. val = reo_desc[HAL_OFFSET_DW(
  736. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  737. LINK_DESCRIPTOR_COUNTER2)];
  738. st->link_desc_counter2 = HAL_GET_FIELD(
  739. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  740. LINK_DESCRIPTOR_COUNTER2,
  741. val);
  742. val = reo_desc[HAL_OFFSET_DW(
  743. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  744. LINK_DESCRIPTOR_COUNTER_SUM)];
  745. st->link_desc_counter_sum = HAL_GET_FIELD(
  746. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  747. LINK_DESCRIPTOR_COUNTER_SUM,
  748. val);
  749. }
  750. qdf_export_symbol(hal_reo_desc_thres_reached_status);
  751. inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
  752. struct hal_reo_update_rx_queue_status *st)
  753. {
  754. /* Offsets of descriptor fields defined in HW headers start
  755. * from the field after TLV header */
  756. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  757. /* header */
  758. HAL_REO_STATUS_GET_HEADER(reo_desc,
  759. REO_UPDATE_RX_REO_QUEUE, st->header);
  760. }
  761. qdf_export_symbol(hal_reo_rx_update_queue_status);
  762. /**
  763. * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
  764. * with command number
  765. * @hal_soc: Handle to HAL SoC structure
  766. * @hal_ring: Handle to HAL SRNG structure
  767. *
  768. * Return: none
  769. */
  770. inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
  771. {
  772. int cmd_num;
  773. uint32_t *desc_addr;
  774. struct hal_srng_params srng_params;
  775. uint32_t desc_size;
  776. uint32_t num_desc;
  777. hal_get_srng_params(soc, hal_srng, &srng_params);
  778. desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
  779. desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
  780. desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
  781. num_desc = srng_params.num_entries;
  782. cmd_num = 1;
  783. while (num_desc) {
  784. /* Offsets of descriptor fields defined in HW headers start
  785. * from the field after TLV header */
  786. HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
  787. REO_CMD_NUMBER, cmd_num);
  788. desc_addr += desc_size;
  789. num_desc--; cmd_num++;
  790. }
  791. soc->reo_res_bitmap = 0;
  792. }
  793. qdf_export_symbol(hal_reo_init_cmd_ring);