hal_reo.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_reo.h"
  19. #include "hal_tx.h"
  20. #define BLOCK_RES_MASK 0xF
  21. static inline uint8_t hal_find_one_bit(uint8_t x)
  22. {
  23. uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
  24. uint8_t pos;
  25. for (pos = 0; y; y >>= 1)
  26. pos++;
  27. return pos-1;
  28. }
  29. static inline uint8_t hal_find_zero_bit(uint8_t x)
  30. {
  31. uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
  32. uint8_t pos;
  33. for (pos = 0; y; y >>= 1)
  34. pos++;
  35. return pos-1;
  36. }
  37. inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
  38. enum hal_reo_cmd_type type,
  39. uint32_t paddr_lo,
  40. uint8_t paddr_hi)
  41. {
  42. switch (type) {
  43. case CMD_GET_QUEUE_STATS:
  44. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
  45. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  46. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
  47. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  48. break;
  49. case CMD_FLUSH_QUEUE:
  50. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
  51. FLUSH_DESC_ADDR_31_0, paddr_lo);
  52. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  53. FLUSH_DESC_ADDR_39_32, paddr_hi);
  54. break;
  55. case CMD_FLUSH_CACHE:
  56. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
  57. FLUSH_ADDR_31_0, paddr_lo);
  58. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  59. FLUSH_ADDR_39_32, paddr_hi);
  60. break;
  61. case CMD_UPDATE_RX_REO_QUEUE:
  62. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
  63. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  64. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  65. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  66. break;
  67. default:
  68. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  69. "%s: Invalid REO command type\n", __func__);
  70. break;
  71. }
  72. }
  73. inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
  74. struct hal_reo_cmd_params *cmd)
  75. {
  76. uint32_t *reo_desc, val;
  77. hal_srng_access_start(soc, reo_ring);
  78. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  79. if (!reo_desc) {
  80. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  81. "%s: Out of cmd ring entries\n", __func__);
  82. hal_srng_access_end(soc, reo_ring);
  83. return -EBUSY;
  84. }
  85. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  86. sizeof(struct reo_get_queue_stats));
  87. /* Offsets of descriptor fields defined in HW headers start from
  88. * the field after TLV header */
  89. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  90. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats));
  91. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  92. REO_STATUS_REQUIRED, cmd->std.need_status);
  93. hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
  94. cmd->std.addr_lo,
  95. cmd->std.addr_hi);
  96. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
  97. cmd->u.stats_params.clear);
  98. hal_srng_access_end(soc, reo_ring);
  99. val = reo_desc[CMD_HEADER_DW_OFFSET];
  100. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  101. val);
  102. }
  103. inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
  104. struct hal_reo_cmd_params *cmd)
  105. {
  106. uint32_t *reo_desc, val;
  107. hal_srng_access_start(soc, reo_ring);
  108. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  109. if (!reo_desc) {
  110. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  111. "%s: Out of cmd ring entries\n", __func__);
  112. hal_srng_access_end(soc, reo_ring);
  113. return -EBUSY;
  114. }
  115. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  116. sizeof(struct reo_flush_queue));
  117. /* Offsets of descriptor fields defined in HW headers start from
  118. * the field after TLV header */
  119. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  120. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue));
  121. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  122. REO_STATUS_REQUIRED, cmd->std.need_status);
  123. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
  124. cmd->std.addr_hi);
  125. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  126. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  127. cmd->u.fl_queue_params.block_use_after_flush);
  128. if (cmd->u.fl_queue_params.block_use_after_flush) {
  129. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  130. BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
  131. }
  132. hal_srng_access_end(soc, reo_ring);
  133. val = reo_desc[CMD_HEADER_DW_OFFSET];
  134. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  135. val);
  136. }
  137. inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
  138. struct hal_reo_cmd_params *cmd)
  139. {
  140. uint32_t *reo_desc, val;
  141. struct hal_reo_cmd_flush_cache_params *cp;
  142. uint8_t index = 0;
  143. cp = &cmd->u.fl_cache_params;
  144. hal_srng_access_start(soc, reo_ring);
  145. /* We need a cache block resource for this operation, and REO HW has
  146. * only 4 such blocking resources. These resources are managed using
  147. * reo_res_bitmap, and we return failure if none is available.
  148. */
  149. if (cp->block_use_after_flush) {
  150. index = hal_find_zero_bit(soc->reo_res_bitmap);
  151. if (index > 3) {
  152. qdf_print("%s, No blocking resource available!\n", __func__);
  153. hal_srng_access_end(soc, reo_ring);
  154. return -EBUSY;
  155. }
  156. soc->index = index;
  157. }
  158. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  159. if (!reo_desc) {
  160. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  161. "%s: Out of cmd ring entries\n", __func__);
  162. hal_srng_access_end(soc, reo_ring);
  163. return -EBUSY;
  164. }
  165. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  166. sizeof(struct reo_flush_cache));
  167. /* Offsets of descriptor fields defined in HW headers start from
  168. * the field after TLV header */
  169. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  170. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache));
  171. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  172. REO_STATUS_REQUIRED, cmd->std.need_status);
  173. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
  174. cmd->std.addr_hi);
  175. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  176. FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
  177. /* set it to 0 for now */
  178. cp->rel_block_index = 0;
  179. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  180. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  181. if (cp->block_use_after_flush) {
  182. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  183. CACHE_BLOCK_RESOURCE_INDEX, index);
  184. }
  185. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  186. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  187. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  188. BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
  189. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
  190. cp->flush_all);
  191. hal_srng_access_end(soc, reo_ring);
  192. val = reo_desc[CMD_HEADER_DW_OFFSET];
  193. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  194. val);
  195. }
  196. inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
  197. struct hal_reo_cmd_params *cmd)
  198. {
  199. uint32_t *reo_desc, val;
  200. uint8_t index = 0;
  201. hal_srng_access_start(soc, reo_ring);
  202. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  203. index = hal_find_one_bit(soc->reo_res_bitmap);
  204. if (index > 3) {
  205. hal_srng_access_end(soc, reo_ring);
  206. qdf_print("%s: No blocking resource to unblock!\n",
  207. __func__);
  208. return -EBUSY;
  209. }
  210. }
  211. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  212. if (!reo_desc) {
  213. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  214. "%s: Out of cmd ring entries\n", __func__);
  215. hal_srng_access_end(soc, reo_ring);
  216. return -EBUSY;
  217. }
  218. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  219. sizeof(struct reo_unblock_cache));
  220. /* Offsets of descriptor fields defined in HW headers start from
  221. * the field after TLV header */
  222. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  223. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache));
  224. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  225. REO_STATUS_REQUIRED, cmd->std.need_status);
  226. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  227. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  228. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  229. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  230. CACHE_BLOCK_RESOURCE_INDEX,
  231. cmd->u.unblk_cache_params.index);
  232. }
  233. hal_srng_access_end(soc, reo_ring);
  234. val = reo_desc[CMD_HEADER_DW_OFFSET];
  235. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  236. val);
  237. }
  238. inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
  239. struct hal_reo_cmd_params *cmd)
  240. {
  241. uint32_t *reo_desc, val;
  242. hal_srng_access_start(soc, reo_ring);
  243. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  244. if (!reo_desc) {
  245. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  246. "%s: Out of cmd ring entries\n", __func__);
  247. hal_srng_access_end(soc, reo_ring);
  248. return -EBUSY;
  249. }
  250. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  251. sizeof(struct reo_flush_timeout_list));
  252. /* Offsets of descriptor fields defined in HW headers start from
  253. * the field after TLV header */
  254. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  255. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list));
  256. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  257. REO_STATUS_REQUIRED, cmd->std.need_status);
  258. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
  259. cmd->u.fl_tim_list_params.ac_list);
  260. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  261. MINIMUM_RELEASE_DESC_COUNT,
  262. cmd->u.fl_tim_list_params.min_rel_desc);
  263. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  264. MINIMUM_FORWARD_BUF_COUNT,
  265. cmd->u.fl_tim_list_params.min_fwd_buf);
  266. hal_srng_access_end(soc, reo_ring);
  267. val = reo_desc[CMD_HEADER_DW_OFFSET];
  268. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  269. val);
  270. }
  271. inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
  272. struct hal_reo_cmd_params *cmd)
  273. {
  274. uint32_t *reo_desc, val;
  275. struct hal_reo_cmd_update_queue_params *p;
  276. p = &cmd->u.upd_queue_params;
  277. hal_srng_access_start(soc, reo_ring);
  278. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  279. if (!reo_desc) {
  280. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  281. "%s: Out of cmd ring entries\n", __func__);
  282. hal_srng_access_end(soc, reo_ring);
  283. return -EBUSY;
  284. }
  285. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  286. sizeof(struct reo_update_rx_reo_queue));
  287. /* Offsets of descriptor fields defined in HW headers start from
  288. * the field after TLV header */
  289. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  290. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue));
  291. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  292. REO_STATUS_REQUIRED, cmd->std.need_status);
  293. hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  294. cmd->std.addr_lo, cmd->std.addr_hi);
  295. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  296. UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
  297. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
  298. p->update_vld);
  299. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  300. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  301. p->update_assoc_link_desc);
  302. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  303. UPDATE_DISABLE_DUPLICATE_DETECTION,
  304. p->update_disable_dup_detect);
  305. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  306. UPDATE_DISABLE_DUPLICATE_DETECTION,
  307. p->update_disable_dup_detect);
  308. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  309. UPDATE_SOFT_REORDER_ENABLE,
  310. p->update_soft_reorder_enab);
  311. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  312. UPDATE_AC, p->update_ac);
  313. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  314. UPDATE_BAR, p->update_bar);
  315. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  316. UPDATE_BAR, p->update_bar);
  317. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  318. UPDATE_RTY, p->update_rty);
  319. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  320. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  321. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  322. UPDATE_OOR_MODE, p->update_oor_mode);
  323. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  324. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  325. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  326. UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
  327. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  328. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  329. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  330. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  331. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  332. UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
  333. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  334. UPDATE_PN_SIZE, p->update_pn_size);
  335. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  336. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  337. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  338. UPDATE_SVLD, p->update_svld);
  339. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  340. UPDATE_SSN, p->update_ssn);
  341. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  342. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  343. p->update_seq_2k_err_detect);
  344. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  345. UPDATE_PN_VALID, p->update_pn_valid);
  346. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  347. UPDATE_PN, p->update_pn);
  348. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  349. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  350. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  351. VLD, p->vld);
  352. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  353. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  354. p->assoc_link_desc);
  355. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  356. DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
  357. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  358. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  359. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
  360. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  361. BAR, p->bar);
  362. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  363. CHK_2K_MODE, p->chk_2k_mode);
  364. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  365. RTY, p->rty);
  366. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  367. OOR_MODE, p->oor_mode);
  368. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  369. PN_CHECK_NEEDED, p->pn_check_needed);
  370. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  371. PN_SHALL_BE_EVEN, p->pn_even);
  372. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  373. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  374. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  375. PN_HANDLING_ENABLE, p->pn_hand_enab);
  376. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  377. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  378. if (p->ba_window_size < 1)
  379. p->ba_window_size = 1;
  380. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  381. BA_WINDOW_SIZE, p->ba_window_size - 1);
  382. if (p->pn_size == 24)
  383. p->pn_size = PN_SIZE_24;
  384. else if (p->pn_size == 48)
  385. p->pn_size = PN_SIZE_48;
  386. else if (p->pn_size == 128)
  387. p->pn_size = PN_SIZE_128;
  388. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  389. PN_SIZE, p->pn_size);
  390. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  391. SVLD, p->svld);
  392. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  393. SSN, p->ssn);
  394. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  395. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  396. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  397. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  398. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
  399. PN_31_0, p->pn_31_0);
  400. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
  401. PN_63_32, p->pn_63_32);
  402. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
  403. PN_95_64, p->pn_95_64);
  404. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
  405. PN_127_96, p->pn_127_96);
  406. hal_srng_access_end(soc, reo_ring);
  407. val = reo_desc[CMD_HEADER_DW_OFFSET];
  408. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  409. val);
  410. }
  411. inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
  412. struct hal_reo_queue_status *st)
  413. {
  414. uint32_t val;
  415. /* Offsets of descriptor fields defined in HW headers start
  416. * from the field after TLV header */
  417. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  418. /* header */
  419. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
  420. /* SSN */
  421. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
  422. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
  423. /* current index */
  424. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
  425. CURRENT_INDEX)];
  426. st->curr_idx =
  427. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
  428. CURRENT_INDEX, val);
  429. /* PN bits */
  430. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
  431. PN_31_0)];
  432. st->pn_31_0 =
  433. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
  434. PN_31_0, val);
  435. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
  436. PN_63_32)];
  437. st->pn_63_32 =
  438. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
  439. PN_63_32, val);
  440. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
  441. PN_95_64)];
  442. st->pn_95_64 =
  443. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
  444. PN_95_64, val);
  445. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
  446. PN_127_96)];
  447. st->pn_127_96 =
  448. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
  449. PN_127_96, val);
  450. /* timestamps */
  451. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
  452. LAST_RX_ENQUEUE_TIMESTAMP)];
  453. st->last_rx_enq_tstamp =
  454. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
  455. LAST_RX_ENQUEUE_TIMESTAMP, val);
  456. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
  457. LAST_RX_DEQUEUE_TIMESTAMP)];
  458. st->last_rx_deq_tstamp =
  459. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
  460. LAST_RX_DEQUEUE_TIMESTAMP, val);
  461. /* rx bitmap */
  462. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
  463. RX_BITMAP_31_0)];
  464. st->rx_bitmap_31_0 =
  465. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
  466. RX_BITMAP_31_0, val);
  467. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
  468. RX_BITMAP_63_32)];
  469. st->rx_bitmap_63_32 =
  470. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
  471. RX_BITMAP_63_32, val);
  472. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
  473. RX_BITMAP_95_64)];
  474. st->rx_bitmap_95_64 =
  475. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
  476. RX_BITMAP_95_64, val);
  477. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
  478. RX_BITMAP_127_96)];
  479. st->rx_bitmap_127_96 =
  480. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
  481. RX_BITMAP_127_96, val);
  482. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
  483. RX_BITMAP_159_128)];
  484. st->rx_bitmap_159_128 =
  485. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
  486. RX_BITMAP_159_128, val);
  487. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
  488. RX_BITMAP_191_160)];
  489. st->rx_bitmap_191_160 =
  490. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
  491. RX_BITMAP_191_160, val);
  492. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
  493. RX_BITMAP_223_192)];
  494. st->rx_bitmap_223_192 =
  495. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
  496. RX_BITMAP_223_192, val);
  497. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
  498. RX_BITMAP_255_224)];
  499. st->rx_bitmap_255_224 =
  500. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
  501. RX_BITMAP_255_224, val);
  502. /* various counts */
  503. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  504. CURRENT_MPDU_COUNT)];
  505. st->curr_mpdu_cnt =
  506. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  507. CURRENT_MPDU_COUNT, val);
  508. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  509. CURRENT_MSDU_COUNT)];
  510. st->curr_msdu_cnt =
  511. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  512. CURRENT_MSDU_COUNT, val);
  513. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  514. TIMEOUT_COUNT)];
  515. st->fwd_timeout_cnt =
  516. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  517. TIMEOUT_COUNT, val);
  518. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  519. FORWARD_DUE_TO_BAR_COUNT)];
  520. st->fwd_bar_cnt =
  521. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  522. FORWARD_DUE_TO_BAR_COUNT, val);
  523. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  524. DUPLICATE_COUNT)];
  525. st->dup_cnt =
  526. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  527. DUPLICATE_COUNT, val);
  528. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  529. FRAMES_IN_ORDER_COUNT)];
  530. st->frms_in_order_cnt =
  531. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  532. FRAMES_IN_ORDER_COUNT, val);
  533. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  534. BAR_RECEIVED_COUNT)];
  535. st->bar_rcvd_cnt =
  536. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  537. BAR_RECEIVED_COUNT, val);
  538. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
  539. MPDU_FRAMES_PROCESSED_COUNT)];
  540. st->mpdu_frms_cnt =
  541. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
  542. MPDU_FRAMES_PROCESSED_COUNT, val);
  543. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
  544. MSDU_FRAMES_PROCESSED_COUNT)];
  545. st->msdu_frms_cnt =
  546. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
  547. MSDU_FRAMES_PROCESSED_COUNT, val);
  548. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
  549. TOTAL_PROCESSED_BYTE_COUNT)];
  550. st->total_cnt =
  551. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
  552. TOTAL_PROCESSED_BYTE_COUNT, val);
  553. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  554. LATE_RECEIVE_MPDU_COUNT)];
  555. st->late_recv_mpdu_cnt =
  556. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  557. LATE_RECEIVE_MPDU_COUNT, val);
  558. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  559. WINDOW_JUMP_2K)];
  560. st->win_jump_2k =
  561. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  562. WINDOW_JUMP_2K, val);
  563. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  564. HOLE_COUNT)];
  565. st->hole_cnt =
  566. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  567. HOLE_COUNT, val);
  568. }
  569. inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
  570. struct hal_reo_flush_queue_status *st)
  571. {
  572. uint32_t val;
  573. /* Offsets of descriptor fields defined in HW headers start
  574. * from the field after TLV header */
  575. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  576. /* header */
  577. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
  578. /* error bit */
  579. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
  580. ERROR_DETECTED)];
  581. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  582. val);
  583. }
  584. inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
  585. struct hal_reo_flush_cache_status *st)
  586. {
  587. uint32_t val;
  588. /* Offsets of descriptor fields defined in HW headers start
  589. * from the field after TLV header */
  590. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  591. /* header */
  592. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
  593. /* error bit */
  594. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  595. ERROR_DETECTED)];
  596. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  597. val);
  598. /* block error */
  599. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  600. BLOCK_ERROR_DETAILS)];
  601. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  602. BLOCK_ERROR_DETAILS,
  603. val);
  604. if (!st->block_error)
  605. qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
  606. /* cache flush status */
  607. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  608. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  609. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  610. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  611. val);
  612. /* cache flush descriptor type */
  613. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  614. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  615. st->cache_flush_status_desc_type =
  616. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  617. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  618. val);
  619. /* cache flush count */
  620. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  621. CACHE_CONTROLLER_FLUSH_COUNT)];
  622. st->cache_flush_cnt =
  623. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  624. CACHE_CONTROLLER_FLUSH_COUNT,
  625. val);
  626. }
  627. inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
  628. struct hal_soc *soc,
  629. struct hal_reo_unblk_cache_status *st)
  630. {
  631. uint32_t val;
  632. /* Offsets of descriptor fields defined in HW headers start
  633. * from the field after TLV header */
  634. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  635. /* header */
  636. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
  637. /* error bit */
  638. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  639. ERROR_DETECTED)];
  640. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  641. ERROR_DETECTED,
  642. val);
  643. /* unblock type */
  644. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  645. UNBLOCK_TYPE)];
  646. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  647. UNBLOCK_TYPE,
  648. val);
  649. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  650. qdf_clear_bit(soc->index,
  651. (unsigned long *)&soc->reo_res_bitmap);
  652. }
  653. inline void hal_reo_flush_timeout_list_status(
  654. uint32_t *reo_desc,
  655. struct hal_reo_flush_timeout_list_status *st)
  656. {
  657. uint32_t val;
  658. /* Offsets of descriptor fields defined in HW headers start
  659. * from the field after TLV header */
  660. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  661. /* header */
  662. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
  663. /* error bit */
  664. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  665. ERROR_DETECTED)];
  666. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  667. ERROR_DETECTED,
  668. val);
  669. /* list empty */
  670. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  671. TIMOUT_LIST_EMPTY)];
  672. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  673. TIMOUT_LIST_EMPTY,
  674. val);
  675. /* release descriptor count */
  676. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  677. RELEASE_DESC_COUNT)];
  678. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  679. RELEASE_DESC_COUNT,
  680. val);
  681. /* forward buf count */
  682. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  683. FORWARD_BUF_COUNT)];
  684. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  685. FORWARD_BUF_COUNT,
  686. val);
  687. }
  688. inline void hal_reo_desc_thres_reached_status(
  689. uint32_t *reo_desc,
  690. struct hal_reo_desc_thres_reached_status *st)
  691. {
  692. uint32_t val;
  693. /* Offsets of descriptor fields defined in HW headers start
  694. * from the field after TLV header */
  695. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  696. /* header */
  697. HAL_REO_STATUS_GET_HEADER(reo_desc,
  698. REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
  699. /* threshold index */
  700. val = reo_desc[HAL_OFFSET_DW(
  701. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  702. THRESHOLD_INDEX)];
  703. st->thres_index = HAL_GET_FIELD(
  704. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  705. THRESHOLD_INDEX,
  706. val);
  707. /* link desc counters */
  708. val = reo_desc[HAL_OFFSET_DW(
  709. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  710. LINK_DESCRIPTOR_COUNTER0)];
  711. st->link_desc_counter0 = HAL_GET_FIELD(
  712. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  713. LINK_DESCRIPTOR_COUNTER0,
  714. val);
  715. val = reo_desc[HAL_OFFSET_DW(
  716. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  717. LINK_DESCRIPTOR_COUNTER1)];
  718. st->link_desc_counter1 = HAL_GET_FIELD(
  719. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  720. LINK_DESCRIPTOR_COUNTER1,
  721. val);
  722. val = reo_desc[HAL_OFFSET_DW(
  723. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  724. LINK_DESCRIPTOR_COUNTER2)];
  725. st->link_desc_counter2 = HAL_GET_FIELD(
  726. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  727. LINK_DESCRIPTOR_COUNTER2,
  728. val);
  729. val = reo_desc[HAL_OFFSET_DW(
  730. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  731. LINK_DESCRIPTOR_COUNTER_SUM)];
  732. st->link_desc_counter_sum = HAL_GET_FIELD(
  733. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  734. LINK_DESCRIPTOR_COUNTER_SUM,
  735. val);
  736. }
  737. inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
  738. struct hal_reo_update_rx_queue_status *st)
  739. {
  740. /* Offsets of descriptor fields defined in HW headers start
  741. * from the field after TLV header */
  742. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  743. /* header */
  744. HAL_REO_STATUS_GET_HEADER(reo_desc,
  745. REO_UPDATE_RX_REO_QUEUE, st->header);
  746. }
  747. /**
  748. * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
  749. * with command number
  750. * @hal_soc: Handle to HAL SoC structure
  751. * @hal_ring: Handle to HAL SRNG structure
  752. *
  753. * Return: none
  754. */
  755. inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
  756. {
  757. int cmd_num;
  758. uint32_t *desc_addr;
  759. struct hal_srng_params srng_params;
  760. uint32_t desc_size;
  761. uint32_t num_desc;
  762. hal_get_srng_params(soc, hal_srng, &srng_params);
  763. desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
  764. desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
  765. desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
  766. num_desc = srng_params.num_entries;
  767. cmd_num = 1;
  768. while (num_desc) {
  769. /* Offsets of descriptor fields defined in HW headers start
  770. * from the field after TLV header */
  771. HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
  772. REO_CMD_NUMBER, cmd_num);
  773. desc_addr += desc_size;
  774. num_desc--; cmd_num++;
  775. }
  776. soc->reo_res_bitmap = 0;
  777. }