hal_reo.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_reo.h"
  19. #include "hal_tx.h"
  20. #define BLOCK_RES_MASK 0xF
  21. static inline uint8_t hal_find_one_bit(uint8_t x)
  22. {
  23. uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
  24. uint8_t pos;
  25. for (pos = 0; y; y >>= 1)
  26. pos++;
  27. return pos-1;
  28. }
  29. static inline uint8_t hal_find_zero_bit(uint8_t x)
  30. {
  31. uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
  32. uint8_t pos;
  33. for (pos = 0; y; y >>= 1)
  34. pos++;
  35. return pos-1;
  36. }
  37. inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
  38. enum hal_reo_cmd_type type,
  39. uint32_t paddr_lo,
  40. uint8_t paddr_hi)
  41. {
  42. switch (type) {
  43. case CMD_GET_QUEUE_STATS:
  44. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
  45. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  46. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
  47. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  48. break;
  49. case CMD_FLUSH_QUEUE:
  50. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
  51. FLUSH_DESC_ADDR_31_0, paddr_lo);
  52. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  53. FLUSH_DESC_ADDR_39_32, paddr_hi);
  54. break;
  55. case CMD_FLUSH_CACHE:
  56. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
  57. FLUSH_ADDR_31_0, paddr_lo);
  58. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  59. FLUSH_ADDR_39_32, paddr_hi);
  60. break;
  61. case CMD_UPDATE_RX_REO_QUEUE:
  62. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
  63. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  64. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  65. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  66. break;
  67. default:
  68. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  69. "%s: Invalid REO command type\n", __func__);
  70. break;
  71. }
  72. }
  73. inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
  74. struct hal_reo_cmd_params *cmd)
  75. {
  76. uint32_t *reo_desc, val;
  77. hal_srng_access_start(soc, reo_ring);
  78. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  79. if (!reo_desc) {
  80. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  81. "%s: Out of cmd ring entries\n", __func__);
  82. hal_srng_access_end(soc, reo_ring);
  83. return -EBUSY;
  84. }
  85. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  86. sizeof(struct reo_get_queue_stats));
  87. /* Offsets of descriptor fields defined in HW headers start from
  88. * the field after TLV header */
  89. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  90. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats));
  91. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  92. REO_STATUS_REQUIRED, cmd->std.need_status);
  93. hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
  94. cmd->std.addr_lo,
  95. cmd->std.addr_hi);
  96. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
  97. cmd->u.stats_params.clear);
  98. hal_srng_access_end(soc, reo_ring);
  99. val = reo_desc[CMD_HEADER_DW_OFFSET];
  100. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  101. val);
  102. }
  103. inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
  104. struct hal_reo_cmd_params *cmd)
  105. {
  106. uint32_t *reo_desc, val;
  107. hal_srng_access_start(soc, reo_ring);
  108. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  109. if (!reo_desc) {
  110. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  111. "%s: Out of cmd ring entries\n", __func__);
  112. hal_srng_access_end(soc, reo_ring);
  113. return -EBUSY;
  114. }
  115. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  116. sizeof(struct reo_flush_queue));
  117. /* Offsets of descriptor fields defined in HW headers start from
  118. * the field after TLV header */
  119. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  120. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue));
  121. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  122. REO_STATUS_REQUIRED, cmd->std.need_status);
  123. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
  124. cmd->std.addr_hi);
  125. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  126. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  127. cmd->u.fl_queue_params.block_use_after_flush);
  128. if (cmd->u.fl_queue_params.block_use_after_flush) {
  129. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  130. BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
  131. }
  132. hal_srng_access_end(soc, reo_ring);
  133. val = reo_desc[CMD_HEADER_DW_OFFSET];
  134. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  135. val);
  136. }
  137. inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
  138. struct hal_reo_cmd_params *cmd)
  139. {
  140. uint32_t *reo_desc, val;
  141. struct hal_reo_cmd_flush_cache_params *cp;
  142. uint8_t index = 0;
  143. cp = &cmd->u.fl_cache_params;
  144. hal_srng_access_start(soc, reo_ring);
  145. /* We need a cache block resource for this operation, and REO HW has
  146. * only 4 such blocking resources. These resources are managed using
  147. * reo_res_bitmap, and we return failure if none is available.
  148. */
  149. if (cp->block_use_after_flush) {
  150. index = hal_find_zero_bit(soc->reo_res_bitmap);
  151. if (index > 3) {
  152. qdf_print("%s, No blocking resource available!\n", __func__);
  153. hal_srng_access_end(soc, reo_ring);
  154. return -EBUSY;
  155. }
  156. soc->index = index;
  157. }
  158. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  159. if (!reo_desc) {
  160. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  161. "%s: Out of cmd ring entries\n", __func__);
  162. hal_srng_access_end(soc, reo_ring);
  163. hal_srng_dump(reo_ring);
  164. return -EBUSY;
  165. }
  166. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  167. sizeof(struct reo_flush_cache));
  168. /* Offsets of descriptor fields defined in HW headers start from
  169. * the field after TLV header */
  170. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  171. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache));
  172. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  173. REO_STATUS_REQUIRED, cmd->std.need_status);
  174. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
  175. cmd->std.addr_hi);
  176. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  177. FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
  178. /* set it to 0 for now */
  179. cp->rel_block_index = 0;
  180. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  181. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  182. if (cp->block_use_after_flush) {
  183. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  184. CACHE_BLOCK_RESOURCE_INDEX, index);
  185. }
  186. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  187. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  188. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  189. BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
  190. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
  191. cp->flush_all);
  192. hal_srng_access_end(soc, reo_ring);
  193. val = reo_desc[CMD_HEADER_DW_OFFSET];
  194. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  195. val);
  196. }
  197. inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
  198. struct hal_reo_cmd_params *cmd)
  199. {
  200. uint32_t *reo_desc, val;
  201. uint8_t index = 0;
  202. hal_srng_access_start(soc, reo_ring);
  203. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  204. index = hal_find_one_bit(soc->reo_res_bitmap);
  205. if (index > 3) {
  206. hal_srng_access_end(soc, reo_ring);
  207. qdf_print("%s: No blocking resource to unblock!\n",
  208. __func__);
  209. return -EBUSY;
  210. }
  211. }
  212. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  213. if (!reo_desc) {
  214. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  215. "%s: Out of cmd ring entries\n", __func__);
  216. hal_srng_access_end(soc, reo_ring);
  217. return -EBUSY;
  218. }
  219. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  220. sizeof(struct reo_unblock_cache));
  221. /* Offsets of descriptor fields defined in HW headers start from
  222. * the field after TLV header */
  223. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  224. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache));
  225. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  226. REO_STATUS_REQUIRED, cmd->std.need_status);
  227. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  228. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  229. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  230. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  231. CACHE_BLOCK_RESOURCE_INDEX,
  232. cmd->u.unblk_cache_params.index);
  233. }
  234. hal_srng_access_end(soc, reo_ring);
  235. val = reo_desc[CMD_HEADER_DW_OFFSET];
  236. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  237. val);
  238. }
  239. inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
  240. struct hal_reo_cmd_params *cmd)
  241. {
  242. uint32_t *reo_desc, val;
  243. hal_srng_access_start(soc, reo_ring);
  244. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  245. if (!reo_desc) {
  246. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  247. "%s: Out of cmd ring entries\n", __func__);
  248. hal_srng_access_end(soc, reo_ring);
  249. return -EBUSY;
  250. }
  251. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  252. sizeof(struct reo_flush_timeout_list));
  253. /* Offsets of descriptor fields defined in HW headers start from
  254. * the field after TLV header */
  255. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  256. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list));
  257. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  258. REO_STATUS_REQUIRED, cmd->std.need_status);
  259. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
  260. cmd->u.fl_tim_list_params.ac_list);
  261. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  262. MINIMUM_RELEASE_DESC_COUNT,
  263. cmd->u.fl_tim_list_params.min_rel_desc);
  264. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  265. MINIMUM_FORWARD_BUF_COUNT,
  266. cmd->u.fl_tim_list_params.min_fwd_buf);
  267. hal_srng_access_end(soc, reo_ring);
  268. val = reo_desc[CMD_HEADER_DW_OFFSET];
  269. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  270. val);
  271. }
  272. inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
  273. struct hal_reo_cmd_params *cmd)
  274. {
  275. uint32_t *reo_desc, val;
  276. struct hal_reo_cmd_update_queue_params *p;
  277. p = &cmd->u.upd_queue_params;
  278. hal_srng_access_start(soc, reo_ring);
  279. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  280. if (!reo_desc) {
  281. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  282. "%s: Out of cmd ring entries\n", __func__);
  283. hal_srng_access_end(soc, reo_ring);
  284. return -EBUSY;
  285. }
  286. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  287. sizeof(struct reo_update_rx_reo_queue));
  288. /* Offsets of descriptor fields defined in HW headers start from
  289. * the field after TLV header */
  290. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  291. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue));
  292. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  293. REO_STATUS_REQUIRED, cmd->std.need_status);
  294. hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  295. cmd->std.addr_lo, cmd->std.addr_hi);
  296. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  297. UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
  298. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
  299. p->update_vld);
  300. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  301. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  302. p->update_assoc_link_desc);
  303. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  304. UPDATE_DISABLE_DUPLICATE_DETECTION,
  305. p->update_disable_dup_detect);
  306. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  307. UPDATE_DISABLE_DUPLICATE_DETECTION,
  308. p->update_disable_dup_detect);
  309. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  310. UPDATE_SOFT_REORDER_ENABLE,
  311. p->update_soft_reorder_enab);
  312. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  313. UPDATE_AC, p->update_ac);
  314. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  315. UPDATE_BAR, p->update_bar);
  316. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  317. UPDATE_BAR, p->update_bar);
  318. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  319. UPDATE_RTY, p->update_rty);
  320. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  321. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  322. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  323. UPDATE_OOR_MODE, p->update_oor_mode);
  324. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  325. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  326. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  327. UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
  328. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  329. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  330. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  331. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  332. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  333. UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
  334. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  335. UPDATE_PN_SIZE, p->update_pn_size);
  336. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  337. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  338. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  339. UPDATE_SVLD, p->update_svld);
  340. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  341. UPDATE_SSN, p->update_ssn);
  342. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  343. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  344. p->update_seq_2k_err_detect);
  345. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  346. UPDATE_PN_VALID, p->update_pn_valid);
  347. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  348. UPDATE_PN, p->update_pn);
  349. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  350. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  351. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  352. VLD, p->vld);
  353. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  354. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  355. p->assoc_link_desc);
  356. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  357. DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
  358. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  359. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  360. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
  361. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  362. BAR, p->bar);
  363. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  364. CHK_2K_MODE, p->chk_2k_mode);
  365. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  366. RTY, p->rty);
  367. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  368. OOR_MODE, p->oor_mode);
  369. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  370. PN_CHECK_NEEDED, p->pn_check_needed);
  371. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  372. PN_SHALL_BE_EVEN, p->pn_even);
  373. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  374. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  375. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  376. PN_HANDLING_ENABLE, p->pn_hand_enab);
  377. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  378. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  379. if (p->ba_window_size < 1)
  380. p->ba_window_size = 1;
  381. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  382. BA_WINDOW_SIZE, p->ba_window_size - 1);
  383. if (p->pn_size == 24)
  384. p->pn_size = PN_SIZE_24;
  385. else if (p->pn_size == 48)
  386. p->pn_size = PN_SIZE_48;
  387. else if (p->pn_size == 128)
  388. p->pn_size = PN_SIZE_128;
  389. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  390. PN_SIZE, p->pn_size);
  391. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  392. SVLD, p->svld);
  393. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  394. SSN, p->ssn);
  395. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  396. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  397. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  398. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  399. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
  400. PN_31_0, p->pn_31_0);
  401. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
  402. PN_63_32, p->pn_63_32);
  403. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
  404. PN_95_64, p->pn_95_64);
  405. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
  406. PN_127_96, p->pn_127_96);
  407. hal_srng_access_end(soc, reo_ring);
  408. val = reo_desc[CMD_HEADER_DW_OFFSET];
  409. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  410. val);
  411. }
  412. inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
  413. struct hal_reo_queue_status *st)
  414. {
  415. uint32_t val;
  416. /* Offsets of descriptor fields defined in HW headers start
  417. * from the field after TLV header */
  418. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  419. /* header */
  420. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
  421. /* SSN */
  422. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
  423. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
  424. /* current index */
  425. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
  426. CURRENT_INDEX)];
  427. st->curr_idx =
  428. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
  429. CURRENT_INDEX, val);
  430. /* PN bits */
  431. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
  432. PN_31_0)];
  433. st->pn_31_0 =
  434. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
  435. PN_31_0, val);
  436. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
  437. PN_63_32)];
  438. st->pn_63_32 =
  439. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
  440. PN_63_32, val);
  441. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
  442. PN_95_64)];
  443. st->pn_95_64 =
  444. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
  445. PN_95_64, val);
  446. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
  447. PN_127_96)];
  448. st->pn_127_96 =
  449. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
  450. PN_127_96, val);
  451. /* timestamps */
  452. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
  453. LAST_RX_ENQUEUE_TIMESTAMP)];
  454. st->last_rx_enq_tstamp =
  455. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
  456. LAST_RX_ENQUEUE_TIMESTAMP, val);
  457. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
  458. LAST_RX_DEQUEUE_TIMESTAMP)];
  459. st->last_rx_deq_tstamp =
  460. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
  461. LAST_RX_DEQUEUE_TIMESTAMP, val);
  462. /* rx bitmap */
  463. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
  464. RX_BITMAP_31_0)];
  465. st->rx_bitmap_31_0 =
  466. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
  467. RX_BITMAP_31_0, val);
  468. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
  469. RX_BITMAP_63_32)];
  470. st->rx_bitmap_63_32 =
  471. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
  472. RX_BITMAP_63_32, val);
  473. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
  474. RX_BITMAP_95_64)];
  475. st->rx_bitmap_95_64 =
  476. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
  477. RX_BITMAP_95_64, val);
  478. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
  479. RX_BITMAP_127_96)];
  480. st->rx_bitmap_127_96 =
  481. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
  482. RX_BITMAP_127_96, val);
  483. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
  484. RX_BITMAP_159_128)];
  485. st->rx_bitmap_159_128 =
  486. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
  487. RX_BITMAP_159_128, val);
  488. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
  489. RX_BITMAP_191_160)];
  490. st->rx_bitmap_191_160 =
  491. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
  492. RX_BITMAP_191_160, val);
  493. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
  494. RX_BITMAP_223_192)];
  495. st->rx_bitmap_223_192 =
  496. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
  497. RX_BITMAP_223_192, val);
  498. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
  499. RX_BITMAP_255_224)];
  500. st->rx_bitmap_255_224 =
  501. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
  502. RX_BITMAP_255_224, val);
  503. /* various counts */
  504. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  505. CURRENT_MPDU_COUNT)];
  506. st->curr_mpdu_cnt =
  507. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  508. CURRENT_MPDU_COUNT, val);
  509. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  510. CURRENT_MSDU_COUNT)];
  511. st->curr_msdu_cnt =
  512. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  513. CURRENT_MSDU_COUNT, val);
  514. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  515. TIMEOUT_COUNT)];
  516. st->fwd_timeout_cnt =
  517. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  518. TIMEOUT_COUNT, val);
  519. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  520. FORWARD_DUE_TO_BAR_COUNT)];
  521. st->fwd_bar_cnt =
  522. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  523. FORWARD_DUE_TO_BAR_COUNT, val);
  524. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  525. DUPLICATE_COUNT)];
  526. st->dup_cnt =
  527. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  528. DUPLICATE_COUNT, val);
  529. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  530. FRAMES_IN_ORDER_COUNT)];
  531. st->frms_in_order_cnt =
  532. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  533. FRAMES_IN_ORDER_COUNT, val);
  534. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  535. BAR_RECEIVED_COUNT)];
  536. st->bar_rcvd_cnt =
  537. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  538. BAR_RECEIVED_COUNT, val);
  539. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
  540. MPDU_FRAMES_PROCESSED_COUNT)];
  541. st->mpdu_frms_cnt =
  542. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
  543. MPDU_FRAMES_PROCESSED_COUNT, val);
  544. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
  545. MSDU_FRAMES_PROCESSED_COUNT)];
  546. st->msdu_frms_cnt =
  547. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
  548. MSDU_FRAMES_PROCESSED_COUNT, val);
  549. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
  550. TOTAL_PROCESSED_BYTE_COUNT)];
  551. st->total_cnt =
  552. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
  553. TOTAL_PROCESSED_BYTE_COUNT, val);
  554. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  555. LATE_RECEIVE_MPDU_COUNT)];
  556. st->late_recv_mpdu_cnt =
  557. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  558. LATE_RECEIVE_MPDU_COUNT, val);
  559. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  560. WINDOW_JUMP_2K)];
  561. st->win_jump_2k =
  562. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  563. WINDOW_JUMP_2K, val);
  564. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  565. HOLE_COUNT)];
  566. st->hole_cnt =
  567. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  568. HOLE_COUNT, val);
  569. }
  570. inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
  571. struct hal_reo_flush_queue_status *st)
  572. {
  573. uint32_t val;
  574. /* Offsets of descriptor fields defined in HW headers start
  575. * from the field after TLV header */
  576. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  577. /* header */
  578. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
  579. /* error bit */
  580. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
  581. ERROR_DETECTED)];
  582. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  583. val);
  584. }
  585. inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
  586. struct hal_reo_flush_cache_status *st)
  587. {
  588. uint32_t val;
  589. /* Offsets of descriptor fields defined in HW headers start
  590. * from the field after TLV header */
  591. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  592. /* header */
  593. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
  594. /* error bit */
  595. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  596. ERROR_DETECTED)];
  597. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  598. val);
  599. /* block error */
  600. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  601. BLOCK_ERROR_DETAILS)];
  602. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  603. BLOCK_ERROR_DETAILS,
  604. val);
  605. if (!st->block_error)
  606. qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
  607. /* cache flush status */
  608. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  609. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  610. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  611. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  612. val);
  613. /* cache flush descriptor type */
  614. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  615. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  616. st->cache_flush_status_desc_type =
  617. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  618. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  619. val);
  620. /* cache flush count */
  621. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  622. CACHE_CONTROLLER_FLUSH_COUNT)];
  623. st->cache_flush_cnt =
  624. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  625. CACHE_CONTROLLER_FLUSH_COUNT,
  626. val);
  627. }
  628. inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
  629. struct hal_soc *soc,
  630. struct hal_reo_unblk_cache_status *st)
  631. {
  632. uint32_t val;
  633. /* Offsets of descriptor fields defined in HW headers start
  634. * from the field after TLV header */
  635. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  636. /* header */
  637. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
  638. /* error bit */
  639. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  640. ERROR_DETECTED)];
  641. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  642. ERROR_DETECTED,
  643. val);
  644. /* unblock type */
  645. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  646. UNBLOCK_TYPE)];
  647. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  648. UNBLOCK_TYPE,
  649. val);
  650. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  651. qdf_clear_bit(soc->index,
  652. (unsigned long *)&soc->reo_res_bitmap);
  653. }
  654. inline void hal_reo_flush_timeout_list_status(
  655. uint32_t *reo_desc,
  656. struct hal_reo_flush_timeout_list_status *st)
  657. {
  658. uint32_t val;
  659. /* Offsets of descriptor fields defined in HW headers start
  660. * from the field after TLV header */
  661. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  662. /* header */
  663. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
  664. /* error bit */
  665. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  666. ERROR_DETECTED)];
  667. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  668. ERROR_DETECTED,
  669. val);
  670. /* list empty */
  671. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  672. TIMOUT_LIST_EMPTY)];
  673. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  674. TIMOUT_LIST_EMPTY,
  675. val);
  676. /* release descriptor count */
  677. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  678. RELEASE_DESC_COUNT)];
  679. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  680. RELEASE_DESC_COUNT,
  681. val);
  682. /* forward buf count */
  683. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  684. FORWARD_BUF_COUNT)];
  685. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  686. FORWARD_BUF_COUNT,
  687. val);
  688. }
  689. inline void hal_reo_desc_thres_reached_status(
  690. uint32_t *reo_desc,
  691. struct hal_reo_desc_thres_reached_status *st)
  692. {
  693. uint32_t val;
  694. /* Offsets of descriptor fields defined in HW headers start
  695. * from the field after TLV header */
  696. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  697. /* header */
  698. HAL_REO_STATUS_GET_HEADER(reo_desc,
  699. REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
  700. /* threshold index */
  701. val = reo_desc[HAL_OFFSET_DW(
  702. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  703. THRESHOLD_INDEX)];
  704. st->thres_index = HAL_GET_FIELD(
  705. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  706. THRESHOLD_INDEX,
  707. val);
  708. /* link desc counters */
  709. val = reo_desc[HAL_OFFSET_DW(
  710. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  711. LINK_DESCRIPTOR_COUNTER0)];
  712. st->link_desc_counter0 = HAL_GET_FIELD(
  713. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  714. LINK_DESCRIPTOR_COUNTER0,
  715. val);
  716. val = reo_desc[HAL_OFFSET_DW(
  717. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  718. LINK_DESCRIPTOR_COUNTER1)];
  719. st->link_desc_counter1 = HAL_GET_FIELD(
  720. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  721. LINK_DESCRIPTOR_COUNTER1,
  722. val);
  723. val = reo_desc[HAL_OFFSET_DW(
  724. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  725. LINK_DESCRIPTOR_COUNTER2)];
  726. st->link_desc_counter2 = HAL_GET_FIELD(
  727. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  728. LINK_DESCRIPTOR_COUNTER2,
  729. val);
  730. val = reo_desc[HAL_OFFSET_DW(
  731. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  732. LINK_DESCRIPTOR_COUNTER_SUM)];
  733. st->link_desc_counter_sum = HAL_GET_FIELD(
  734. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  735. LINK_DESCRIPTOR_COUNTER_SUM,
  736. val);
  737. }
  738. inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
  739. struct hal_reo_update_rx_queue_status *st)
  740. {
  741. /* Offsets of descriptor fields defined in HW headers start
  742. * from the field after TLV header */
  743. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  744. /* header */
  745. HAL_REO_STATUS_GET_HEADER(reo_desc,
  746. REO_UPDATE_RX_REO_QUEUE, st->header);
  747. }
  748. /**
  749. * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
  750. * with command number
  751. * @hal_soc: Handle to HAL SoC structure
  752. * @hal_ring: Handle to HAL SRNG structure
  753. *
  754. * Return: none
  755. */
  756. inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
  757. {
  758. int cmd_num;
  759. uint32_t *desc_addr;
  760. struct hal_srng_params srng_params;
  761. uint32_t desc_size;
  762. uint32_t num_desc;
  763. hal_get_srng_params(soc, hal_srng, &srng_params);
  764. desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
  765. desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
  766. desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
  767. num_desc = srng_params.num_entries;
  768. cmd_num = 1;
  769. while (num_desc) {
  770. /* Offsets of descriptor fields defined in HW headers start
  771. * from the field after TLV header */
  772. HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
  773. REO_CMD_NUMBER, cmd_num);
  774. desc_addr += desc_size;
  775. num_desc--; cmd_num++;
  776. }
  777. soc->reo_res_bitmap = 0;
  778. }