hal_reo.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "hal_reo.h"
  20. #include "hal_tx.h"
  21. #include "qdf_module.h"
  22. #define BLOCK_RES_MASK 0xF
  23. static inline uint8_t hal_find_one_bit(uint8_t x)
  24. {
  25. uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
  26. uint8_t pos;
  27. for (pos = 0; y; y >>= 1)
  28. pos++;
  29. return pos-1;
  30. }
  31. static inline uint8_t hal_find_zero_bit(uint8_t x)
  32. {
  33. uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
  34. uint8_t pos;
  35. for (pos = 0; y; y >>= 1)
  36. pos++;
  37. return pos-1;
  38. }
  39. inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
  40. enum hal_reo_cmd_type type,
  41. uint32_t paddr_lo,
  42. uint8_t paddr_hi)
  43. {
  44. switch (type) {
  45. case CMD_GET_QUEUE_STATS:
  46. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
  47. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  48. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
  49. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  50. break;
  51. case CMD_FLUSH_QUEUE:
  52. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
  53. FLUSH_DESC_ADDR_31_0, paddr_lo);
  54. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  55. FLUSH_DESC_ADDR_39_32, paddr_hi);
  56. break;
  57. case CMD_FLUSH_CACHE:
  58. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
  59. FLUSH_ADDR_31_0, paddr_lo);
  60. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  61. FLUSH_ADDR_39_32, paddr_hi);
  62. break;
  63. case CMD_UPDATE_RX_REO_QUEUE:
  64. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
  65. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  66. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  67. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  68. break;
  69. default:
  70. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  71. "%s: Invalid REO command type", __func__);
  72. break;
  73. }
  74. }
  75. inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
  76. struct hal_reo_cmd_params *cmd)
  77. {
  78. uint32_t *reo_desc, val;
  79. hal_srng_access_start(soc, reo_ring);
  80. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  81. if (!reo_desc) {
  82. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  83. "%s: Out of cmd ring entries", __func__);
  84. hal_srng_access_end(soc, reo_ring);
  85. return -EBUSY;
  86. }
  87. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  88. sizeof(struct reo_get_queue_stats));
  89. /* Offsets of descriptor fields defined in HW headers start from
  90. * the field after TLV header */
  91. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  92. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_get_queue_stats));
  93. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  94. REO_STATUS_REQUIRED, cmd->std.need_status);
  95. hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
  96. cmd->std.addr_lo,
  97. cmd->std.addr_hi);
  98. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
  99. cmd->u.stats_params.clear);
  100. hal_srng_access_end(soc, reo_ring);
  101. val = reo_desc[CMD_HEADER_DW_OFFSET];
  102. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  103. val);
  104. }
  105. qdf_export_symbol(hal_reo_cmd_queue_stats);
  106. inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
  107. struct hal_reo_cmd_params *cmd)
  108. {
  109. uint32_t *reo_desc, val;
  110. hal_srng_access_start(soc, reo_ring);
  111. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  112. if (!reo_desc) {
  113. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  114. "%s: Out of cmd ring entries", __func__);
  115. hal_srng_access_end(soc, reo_ring);
  116. return -EBUSY;
  117. }
  118. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  119. sizeof(struct reo_flush_queue));
  120. /* Offsets of descriptor fields defined in HW headers start from
  121. * the field after TLV header */
  122. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  123. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_queue));
  124. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  125. REO_STATUS_REQUIRED, cmd->std.need_status);
  126. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
  127. cmd->std.addr_hi);
  128. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  129. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  130. cmd->u.fl_queue_params.block_use_after_flush);
  131. if (cmd->u.fl_queue_params.block_use_after_flush) {
  132. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  133. BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
  134. }
  135. hal_srng_access_end(soc, reo_ring);
  136. val = reo_desc[CMD_HEADER_DW_OFFSET];
  137. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  138. val);
  139. }
  140. qdf_export_symbol(hal_reo_cmd_flush_queue);
  141. inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
  142. struct hal_reo_cmd_params *cmd)
  143. {
  144. uint32_t *reo_desc, val;
  145. struct hal_reo_cmd_flush_cache_params *cp;
  146. uint8_t index = 0;
  147. cp = &cmd->u.fl_cache_params;
  148. hal_srng_access_start(soc, reo_ring);
  149. /* We need a cache block resource for this operation, and REO HW has
  150. * only 4 such blocking resources. These resources are managed using
  151. * reo_res_bitmap, and we return failure if none is available.
  152. */
  153. if (cp->block_use_after_flush) {
  154. index = hal_find_zero_bit(soc->reo_res_bitmap);
  155. if (index > 3) {
  156. qdf_print("%s, No blocking resource available!",
  157. __func__);
  158. hal_srng_access_end(soc, reo_ring);
  159. return -EBUSY;
  160. }
  161. soc->index = index;
  162. }
  163. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  164. if (!reo_desc) {
  165. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  166. "%s: Out of cmd ring entries", __func__);
  167. hal_srng_access_end(soc, reo_ring);
  168. hal_srng_dump(reo_ring);
  169. return -EBUSY;
  170. }
  171. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  172. sizeof(struct reo_flush_cache));
  173. /* Offsets of descriptor fields defined in HW headers start from
  174. * the field after TLV header */
  175. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  176. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_cache));
  177. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  178. REO_STATUS_REQUIRED, cmd->std.need_status);
  179. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
  180. cmd->std.addr_hi);
  181. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  182. FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
  183. /* set it to 0 for now */
  184. cp->rel_block_index = 0;
  185. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  186. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  187. if (cp->block_use_after_flush) {
  188. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  189. CACHE_BLOCK_RESOURCE_INDEX, index);
  190. }
  191. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  192. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  193. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  194. BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
  195. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
  196. cp->flush_all);
  197. hal_srng_access_end(soc, reo_ring);
  198. val = reo_desc[CMD_HEADER_DW_OFFSET];
  199. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  200. val);
  201. }
  202. qdf_export_symbol(hal_reo_cmd_flush_cache);
  203. inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
  204. struct hal_reo_cmd_params *cmd)
  205. {
  206. uint32_t *reo_desc, val;
  207. uint8_t index = 0;
  208. hal_srng_access_start(soc, reo_ring);
  209. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  210. index = hal_find_one_bit(soc->reo_res_bitmap);
  211. if (index > 3) {
  212. hal_srng_access_end(soc, reo_ring);
  213. qdf_print("%s: No blocking resource to unblock!",
  214. __func__);
  215. return -EBUSY;
  216. }
  217. }
  218. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  219. if (!reo_desc) {
  220. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  221. "%s: Out of cmd ring entries", __func__);
  222. hal_srng_access_end(soc, reo_ring);
  223. return -EBUSY;
  224. }
  225. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  226. sizeof(struct reo_unblock_cache));
  227. /* Offsets of descriptor fields defined in HW headers start from
  228. * the field after TLV header */
  229. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  230. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_unblock_cache));
  231. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  232. REO_STATUS_REQUIRED, cmd->std.need_status);
  233. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  234. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  235. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  236. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  237. CACHE_BLOCK_RESOURCE_INDEX,
  238. cmd->u.unblk_cache_params.index);
  239. }
  240. hal_srng_access_end(soc, reo_ring);
  241. val = reo_desc[CMD_HEADER_DW_OFFSET];
  242. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  243. val);
  244. }
  245. qdf_export_symbol(hal_reo_cmd_unblock_cache);
  246. inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
  247. struct hal_reo_cmd_params *cmd)
  248. {
  249. uint32_t *reo_desc, val;
  250. hal_srng_access_start(soc, reo_ring);
  251. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  252. if (!reo_desc) {
  253. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  254. "%s: Out of cmd ring entries", __func__);
  255. hal_srng_access_end(soc, reo_ring);
  256. return -EBUSY;
  257. }
  258. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  259. sizeof(struct reo_flush_timeout_list));
  260. /* Offsets of descriptor fields defined in HW headers start from
  261. * the field after TLV header */
  262. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  263. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_flush_timeout_list));
  264. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  265. REO_STATUS_REQUIRED, cmd->std.need_status);
  266. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
  267. cmd->u.fl_tim_list_params.ac_list);
  268. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  269. MINIMUM_RELEASE_DESC_COUNT,
  270. cmd->u.fl_tim_list_params.min_rel_desc);
  271. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  272. MINIMUM_FORWARD_BUF_COUNT,
  273. cmd->u.fl_tim_list_params.min_fwd_buf);
  274. hal_srng_access_end(soc, reo_ring);
  275. val = reo_desc[CMD_HEADER_DW_OFFSET];
  276. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  277. val);
  278. }
  279. qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
  280. inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
  281. struct hal_reo_cmd_params *cmd)
  282. {
  283. uint32_t *reo_desc, val;
  284. struct hal_reo_cmd_update_queue_params *p;
  285. p = &cmd->u.upd_queue_params;
  286. hal_srng_access_start(soc, reo_ring);
  287. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  288. if (!reo_desc) {
  289. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  290. "%s: Out of cmd ring entries", __func__);
  291. hal_srng_access_end(soc, reo_ring);
  292. return -EBUSY;
  293. }
  294. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  295. sizeof(struct reo_update_rx_reo_queue));
  296. /* Offsets of descriptor fields defined in HW headers start from
  297. * the field after TLV header */
  298. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  299. qdf_mem_zero((void *)reo_desc, sizeof(struct reo_update_rx_reo_queue));
  300. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  301. REO_STATUS_REQUIRED, cmd->std.need_status);
  302. hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  303. cmd->std.addr_lo, cmd->std.addr_hi);
  304. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  305. UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
  306. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
  307. p->update_vld);
  308. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  309. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  310. p->update_assoc_link_desc);
  311. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  312. UPDATE_DISABLE_DUPLICATE_DETECTION,
  313. p->update_disable_dup_detect);
  314. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  315. UPDATE_DISABLE_DUPLICATE_DETECTION,
  316. p->update_disable_dup_detect);
  317. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  318. UPDATE_SOFT_REORDER_ENABLE,
  319. p->update_soft_reorder_enab);
  320. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  321. UPDATE_AC, p->update_ac);
  322. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  323. UPDATE_BAR, p->update_bar);
  324. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  325. UPDATE_BAR, p->update_bar);
  326. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  327. UPDATE_RTY, p->update_rty);
  328. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  329. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  330. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  331. UPDATE_OOR_MODE, p->update_oor_mode);
  332. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  333. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  334. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  335. UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
  336. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  337. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  338. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  339. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  340. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  341. UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
  342. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  343. UPDATE_PN_SIZE, p->update_pn_size);
  344. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  345. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  346. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  347. UPDATE_SVLD, p->update_svld);
  348. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  349. UPDATE_SSN, p->update_ssn);
  350. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  351. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  352. p->update_seq_2k_err_detect);
  353. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  354. UPDATE_PN_VALID, p->update_pn_valid);
  355. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  356. UPDATE_PN, p->update_pn);
  357. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  358. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  359. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  360. VLD, p->vld);
  361. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  362. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  363. p->assoc_link_desc);
  364. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  365. DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
  366. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  367. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  368. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
  369. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  370. BAR, p->bar);
  371. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  372. CHK_2K_MODE, p->chk_2k_mode);
  373. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  374. RTY, p->rty);
  375. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  376. OOR_MODE, p->oor_mode);
  377. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  378. PN_CHECK_NEEDED, p->pn_check_needed);
  379. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  380. PN_SHALL_BE_EVEN, p->pn_even);
  381. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  382. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  383. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  384. PN_HANDLING_ENABLE, p->pn_hand_enab);
  385. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  386. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  387. if (p->ba_window_size < 1)
  388. p->ba_window_size = 1;
  389. /*
  390. * WAR to get 2k exception in Non BA case.
  391. * Setting window size to 2 to get 2k jump exception
  392. * when we receive aggregates in Non BA case
  393. */
  394. if (p->ba_window_size == 1)
  395. p->ba_window_size++;
  396. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  397. BA_WINDOW_SIZE, p->ba_window_size - 1);
  398. if (p->pn_size == 24)
  399. p->pn_size = PN_SIZE_24;
  400. else if (p->pn_size == 48)
  401. p->pn_size = PN_SIZE_48;
  402. else if (p->pn_size == 128)
  403. p->pn_size = PN_SIZE_128;
  404. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  405. PN_SIZE, p->pn_size);
  406. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  407. SVLD, p->svld);
  408. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  409. SSN, p->ssn);
  410. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  411. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  412. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  413. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  414. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
  415. PN_31_0, p->pn_31_0);
  416. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
  417. PN_63_32, p->pn_63_32);
  418. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
  419. PN_95_64, p->pn_95_64);
  420. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
  421. PN_127_96, p->pn_127_96);
  422. hal_srng_access_end(soc, reo_ring);
  423. val = reo_desc[CMD_HEADER_DW_OFFSET];
  424. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  425. val);
  426. }
  427. qdf_export_symbol(hal_reo_cmd_update_rx_queue);
  428. inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
  429. struct hal_reo_queue_status *st)
  430. {
  431. uint32_t val;
  432. /* Offsets of descriptor fields defined in HW headers start
  433. * from the field after TLV header */
  434. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  435. /* header */
  436. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_GET_QUEUE_STATS, st->header);
  437. /* SSN */
  438. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
  439. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
  440. /* current index */
  441. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
  442. CURRENT_INDEX)];
  443. st->curr_idx =
  444. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
  445. CURRENT_INDEX, val);
  446. /* PN bits */
  447. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
  448. PN_31_0)];
  449. st->pn_31_0 =
  450. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
  451. PN_31_0, val);
  452. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
  453. PN_63_32)];
  454. st->pn_63_32 =
  455. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
  456. PN_63_32, val);
  457. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
  458. PN_95_64)];
  459. st->pn_95_64 =
  460. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
  461. PN_95_64, val);
  462. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
  463. PN_127_96)];
  464. st->pn_127_96 =
  465. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
  466. PN_127_96, val);
  467. /* timestamps */
  468. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
  469. LAST_RX_ENQUEUE_TIMESTAMP)];
  470. st->last_rx_enq_tstamp =
  471. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
  472. LAST_RX_ENQUEUE_TIMESTAMP, val);
  473. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
  474. LAST_RX_DEQUEUE_TIMESTAMP)];
  475. st->last_rx_deq_tstamp =
  476. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
  477. LAST_RX_DEQUEUE_TIMESTAMP, val);
  478. /* rx bitmap */
  479. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
  480. RX_BITMAP_31_0)];
  481. st->rx_bitmap_31_0 =
  482. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
  483. RX_BITMAP_31_0, val);
  484. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
  485. RX_BITMAP_63_32)];
  486. st->rx_bitmap_63_32 =
  487. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
  488. RX_BITMAP_63_32, val);
  489. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
  490. RX_BITMAP_95_64)];
  491. st->rx_bitmap_95_64 =
  492. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
  493. RX_BITMAP_95_64, val);
  494. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
  495. RX_BITMAP_127_96)];
  496. st->rx_bitmap_127_96 =
  497. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
  498. RX_BITMAP_127_96, val);
  499. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
  500. RX_BITMAP_159_128)];
  501. st->rx_bitmap_159_128 =
  502. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
  503. RX_BITMAP_159_128, val);
  504. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
  505. RX_BITMAP_191_160)];
  506. st->rx_bitmap_191_160 =
  507. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
  508. RX_BITMAP_191_160, val);
  509. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
  510. RX_BITMAP_223_192)];
  511. st->rx_bitmap_223_192 =
  512. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
  513. RX_BITMAP_223_192, val);
  514. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
  515. RX_BITMAP_255_224)];
  516. st->rx_bitmap_255_224 =
  517. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
  518. RX_BITMAP_255_224, val);
  519. /* various counts */
  520. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  521. CURRENT_MPDU_COUNT)];
  522. st->curr_mpdu_cnt =
  523. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  524. CURRENT_MPDU_COUNT, val);
  525. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  526. CURRENT_MSDU_COUNT)];
  527. st->curr_msdu_cnt =
  528. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  529. CURRENT_MSDU_COUNT, val);
  530. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  531. TIMEOUT_COUNT)];
  532. st->fwd_timeout_cnt =
  533. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  534. TIMEOUT_COUNT, val);
  535. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  536. FORWARD_DUE_TO_BAR_COUNT)];
  537. st->fwd_bar_cnt =
  538. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  539. FORWARD_DUE_TO_BAR_COUNT, val);
  540. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  541. DUPLICATE_COUNT)];
  542. st->dup_cnt =
  543. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  544. DUPLICATE_COUNT, val);
  545. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  546. FRAMES_IN_ORDER_COUNT)];
  547. st->frms_in_order_cnt =
  548. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  549. FRAMES_IN_ORDER_COUNT, val);
  550. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  551. BAR_RECEIVED_COUNT)];
  552. st->bar_rcvd_cnt =
  553. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  554. BAR_RECEIVED_COUNT, val);
  555. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
  556. MPDU_FRAMES_PROCESSED_COUNT)];
  557. st->mpdu_frms_cnt =
  558. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
  559. MPDU_FRAMES_PROCESSED_COUNT, val);
  560. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
  561. MSDU_FRAMES_PROCESSED_COUNT)];
  562. st->msdu_frms_cnt =
  563. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
  564. MSDU_FRAMES_PROCESSED_COUNT, val);
  565. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
  566. TOTAL_PROCESSED_BYTE_COUNT)];
  567. st->total_cnt =
  568. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
  569. TOTAL_PROCESSED_BYTE_COUNT, val);
  570. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  571. LATE_RECEIVE_MPDU_COUNT)];
  572. st->late_recv_mpdu_cnt =
  573. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  574. LATE_RECEIVE_MPDU_COUNT, val);
  575. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  576. WINDOW_JUMP_2K)];
  577. st->win_jump_2k =
  578. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  579. WINDOW_JUMP_2K, val);
  580. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  581. HOLE_COUNT)];
  582. st->hole_cnt =
  583. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  584. HOLE_COUNT, val);
  585. }
  586. qdf_export_symbol(hal_reo_queue_stats_status);
  587. inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
  588. struct hal_reo_flush_queue_status *st)
  589. {
  590. uint32_t val;
  591. /* Offsets of descriptor fields defined in HW headers start
  592. * from the field after TLV header */
  593. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  594. /* header */
  595. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_QUEUE, st->header);
  596. /* error bit */
  597. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
  598. ERROR_DETECTED)];
  599. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  600. val);
  601. }
  602. qdf_export_symbol(hal_reo_flush_queue_status);
  603. inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
  604. struct hal_reo_flush_cache_status *st)
  605. {
  606. uint32_t val;
  607. /* Offsets of descriptor fields defined in HW headers start
  608. * from the field after TLV header */
  609. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  610. /* header */
  611. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_CACHE, st->header);
  612. /* error bit */
  613. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  614. ERROR_DETECTED)];
  615. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  616. val);
  617. /* block error */
  618. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  619. BLOCK_ERROR_DETAILS)];
  620. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  621. BLOCK_ERROR_DETAILS,
  622. val);
  623. if (!st->block_error)
  624. qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
  625. /* cache flush status */
  626. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  627. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  628. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  629. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  630. val);
  631. /* cache flush descriptor type */
  632. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  633. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  634. st->cache_flush_status_desc_type =
  635. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  636. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  637. val);
  638. /* cache flush count */
  639. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  640. CACHE_CONTROLLER_FLUSH_COUNT)];
  641. st->cache_flush_cnt =
  642. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  643. CACHE_CONTROLLER_FLUSH_COUNT,
  644. val);
  645. }
  646. qdf_export_symbol(hal_reo_flush_cache_status);
  647. inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
  648. struct hal_soc *soc,
  649. struct hal_reo_unblk_cache_status *st)
  650. {
  651. uint32_t val;
  652. /* Offsets of descriptor fields defined in HW headers start
  653. * from the field after TLV header */
  654. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  655. /* header */
  656. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_UNBLOCK_CACHE, st->header);
  657. /* error bit */
  658. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  659. ERROR_DETECTED)];
  660. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  661. ERROR_DETECTED,
  662. val);
  663. /* unblock type */
  664. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  665. UNBLOCK_TYPE)];
  666. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  667. UNBLOCK_TYPE,
  668. val);
  669. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  670. qdf_clear_bit(soc->index,
  671. (unsigned long *)&soc->reo_res_bitmap);
  672. }
  673. qdf_export_symbol(hal_reo_unblock_cache_status);
  674. inline void hal_reo_flush_timeout_list_status(
  675. uint32_t *reo_desc,
  676. struct hal_reo_flush_timeout_list_status *st)
  677. {
  678. uint32_t val;
  679. /* Offsets of descriptor fields defined in HW headers start
  680. * from the field after TLV header */
  681. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  682. /* header */
  683. HAL_REO_STATUS_GET_HEADER(reo_desc, REO_FLUSH_TIMEOUT_LIST, st->header);
  684. /* error bit */
  685. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  686. ERROR_DETECTED)];
  687. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  688. ERROR_DETECTED,
  689. val);
  690. /* list empty */
  691. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  692. TIMOUT_LIST_EMPTY)];
  693. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  694. TIMOUT_LIST_EMPTY,
  695. val);
  696. /* release descriptor count */
  697. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  698. RELEASE_DESC_COUNT)];
  699. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  700. RELEASE_DESC_COUNT,
  701. val);
  702. /* forward buf count */
  703. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  704. FORWARD_BUF_COUNT)];
  705. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  706. FORWARD_BUF_COUNT,
  707. val);
  708. }
  709. qdf_export_symbol(hal_reo_flush_timeout_list_status);
  710. inline void hal_reo_desc_thres_reached_status(
  711. uint32_t *reo_desc,
  712. struct hal_reo_desc_thres_reached_status *st)
  713. {
  714. uint32_t val;
  715. /* Offsets of descriptor fields defined in HW headers start
  716. * from the field after TLV header */
  717. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  718. /* header */
  719. HAL_REO_STATUS_GET_HEADER(reo_desc,
  720. REO_DESCRIPTOR_THRESHOLD_REACHED, st->header);
  721. /* threshold index */
  722. val = reo_desc[HAL_OFFSET_DW(
  723. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  724. THRESHOLD_INDEX)];
  725. st->thres_index = HAL_GET_FIELD(
  726. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  727. THRESHOLD_INDEX,
  728. val);
  729. /* link desc counters */
  730. val = reo_desc[HAL_OFFSET_DW(
  731. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  732. LINK_DESCRIPTOR_COUNTER0)];
  733. st->link_desc_counter0 = HAL_GET_FIELD(
  734. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  735. LINK_DESCRIPTOR_COUNTER0,
  736. val);
  737. val = reo_desc[HAL_OFFSET_DW(
  738. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  739. LINK_DESCRIPTOR_COUNTER1)];
  740. st->link_desc_counter1 = HAL_GET_FIELD(
  741. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  742. LINK_DESCRIPTOR_COUNTER1,
  743. val);
  744. val = reo_desc[HAL_OFFSET_DW(
  745. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  746. LINK_DESCRIPTOR_COUNTER2)];
  747. st->link_desc_counter2 = HAL_GET_FIELD(
  748. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  749. LINK_DESCRIPTOR_COUNTER2,
  750. val);
  751. val = reo_desc[HAL_OFFSET_DW(
  752. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  753. LINK_DESCRIPTOR_COUNTER_SUM)];
  754. st->link_desc_counter_sum = HAL_GET_FIELD(
  755. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  756. LINK_DESCRIPTOR_COUNTER_SUM,
  757. val);
  758. }
  759. qdf_export_symbol(hal_reo_desc_thres_reached_status);
  760. inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
  761. struct hal_reo_update_rx_queue_status *st)
  762. {
  763. /* Offsets of descriptor fields defined in HW headers start
  764. * from the field after TLV header */
  765. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  766. /* header */
  767. HAL_REO_STATUS_GET_HEADER(reo_desc,
  768. REO_UPDATE_RX_REO_QUEUE, st->header);
  769. }
  770. qdf_export_symbol(hal_reo_rx_update_queue_status);
  771. /**
  772. * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
  773. * with command number
  774. * @hal_soc: Handle to HAL SoC structure
  775. * @hal_ring: Handle to HAL SRNG structure
  776. *
  777. * Return: none
  778. */
  779. inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
  780. {
  781. int cmd_num;
  782. uint32_t *desc_addr;
  783. struct hal_srng_params srng_params;
  784. uint32_t desc_size;
  785. uint32_t num_desc;
  786. hal_get_srng_params(soc, hal_srng, &srng_params);
  787. desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
  788. desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
  789. desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
  790. num_desc = srng_params.num_entries;
  791. cmd_num = 1;
  792. while (num_desc) {
  793. /* Offsets of descriptor fields defined in HW headers start
  794. * from the field after TLV header */
  795. HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
  796. REO_CMD_NUMBER, cmd_num);
  797. desc_addr += desc_size;
  798. num_desc--; cmd_num++;
  799. }
  800. soc->reo_res_bitmap = 0;
  801. }
  802. qdf_export_symbol(hal_reo_init_cmd_ring);