hal_reo.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_api.h"
  19. #include "hal_hw_headers.h"
  20. #include "hal_reo.h"
  21. #include "hal_tx.h"
  22. #include "hal_rx.h"
  23. #include "qdf_module.h"
  24. /* TODO: See if the following definition is available in HW headers */
  25. #define HAL_REO_OWNED 4
  26. #define HAL_REO_QUEUE_DESC 8
  27. #define HAL_REO_QUEUE_EXT_DESC 9
  28. /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
  29. * how these counters are assigned
  30. */
  31. #define HAL_RX_LINK_DESC_CNTR 1
  32. /* TODO: Following definition should be from HW headers */
  33. #define HAL_DESC_REO_OWNED 4
  34. /**
  35. * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
  36. * @owner - owner info
  37. * @buffer_type - buffer type
  38. */
  39. static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
  40. uint32_t buffer_type)
  41. {
  42. HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
  43. owner);
  44. HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
  45. buffer_type);
  46. }
  47. #ifndef TID_TO_WME_AC
  48. #define WME_AC_BE 0 /* best effort */
  49. #define WME_AC_BK 1 /* background */
  50. #define WME_AC_VI 2 /* video */
  51. #define WME_AC_VO 3 /* voice */
  52. #define TID_TO_WME_AC(_tid) ( \
  53. (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
  54. (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
  55. (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
  56. WME_AC_VO)
  57. #endif
  58. #define HAL_NON_QOS_TID 16
  59. /**
  60. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  61. *
  62. * @hal_soc: Opaque HAL SOC handle
  63. * @ba_window_size: BlockAck window size
  64. * @start_seq: Starting sequence number
  65. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  66. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  67. * @tid: TID
  68. *
  69. */
  70. void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
  71. uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
  72. int pn_type)
  73. {
  74. uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
  75. uint32_t *reo_queue_ext_desc;
  76. uint32_t reg_val;
  77. uint32_t pn_enable;
  78. uint32_t pn_size = 0;
  79. qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
  80. hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
  81. HAL_REO_QUEUE_DESC);
  82. /* Fixed pattern in reserved bits for debugging */
  83. HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
  84. RESERVED_0A, 0xDDBEEF);
  85. /* This a just a SW meta data and will be copied to REO destination
  86. * descriptors indicated by hardware.
  87. * TODO: Setting TID in this field. See if we should set something else.
  88. */
  89. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
  90. RECEIVE_QUEUE_NUMBER, tid);
  91. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  92. VLD, 1);
  93. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  94. ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
  95. /*
  96. * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
  97. */
  98. reg_val = TID_TO_WME_AC(tid);
  99. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
  100. if (ba_window_size < 1)
  101. ba_window_size = 1;
  102. /* Set RTY bit for non-BA case. Duplicate detection is currently not
  103. * done by HW in non-BA case if RTY bit is not set.
  104. * TODO: This is a temporary War and should be removed once HW fix is
  105. * made to check and discard duplicates even if RTY bit is not set.
  106. */
  107. if (ba_window_size == 1)
  108. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
  109. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
  110. ba_window_size - 1);
  111. switch (pn_type) {
  112. case HAL_PN_WPA:
  113. pn_enable = 1;
  114. pn_size = PN_SIZE_48;
  115. break;
  116. case HAL_PN_WAPI_EVEN:
  117. case HAL_PN_WAPI_UNEVEN:
  118. pn_enable = 1;
  119. pn_size = PN_SIZE_128;
  120. break;
  121. default:
  122. pn_enable = 0;
  123. break;
  124. }
  125. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
  126. pn_enable);
  127. if (pn_type == HAL_PN_WAPI_EVEN)
  128. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  129. PN_SHALL_BE_EVEN, 1);
  130. else if (pn_type == HAL_PN_WAPI_UNEVEN)
  131. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  132. PN_SHALL_BE_UNEVEN, 1);
  133. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_HANDLING_ENABLE,
  134. pn_enable);
  135. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
  136. pn_size);
  137. /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
  138. * based on BA window size and/or AMPDU capabilities
  139. */
  140. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  141. IGNORE_AMPDU_FLAG, 1);
  142. if (start_seq <= 0xfff)
  143. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
  144. start_seq);
  145. /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
  146. * but REO is not delivering packets if we set it to 1. Need to enable
  147. * this once the issue is resolved
  148. */
  149. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
  150. /* TODO: Check if we should set start PN for WAPI */
  151. #ifdef notyet
  152. /* Setup first queue extension if BA window size is more than 1 */
  153. if (ba_window_size > 1) {
  154. reo_queue_ext_desc =
  155. (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
  156. 1);
  157. qdf_mem_zero(reo_queue_ext_desc,
  158. sizeof(struct rx_reo_queue_ext));
  159. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  160. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  161. }
  162. /* Setup second queue extension if BA window size is more than 105 */
  163. if (ba_window_size > 105) {
  164. reo_queue_ext_desc = (uint32_t *)
  165. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  166. qdf_mem_zero(reo_queue_ext_desc,
  167. sizeof(struct rx_reo_queue_ext));
  168. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  169. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  170. }
  171. /* Setup third queue extension if BA window size is more than 210 */
  172. if (ba_window_size > 210) {
  173. reo_queue_ext_desc = (uint32_t *)
  174. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  175. qdf_mem_zero(reo_queue_ext_desc,
  176. sizeof(struct rx_reo_queue_ext));
  177. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  178. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  179. }
  180. #else
  181. /* TODO: HW queue descriptors are currently allocated for max BA
  182. * window size for all QOS TIDs so that same descriptor can be used
  183. * later when ADDBA request is recevied. This should be changed to
  184. * allocate HW queue descriptors based on BA window size being
  185. * negotiated (0 for non BA cases), and reallocate when BA window
  186. * size changes and also send WMI message to FW to change the REO
  187. * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
  188. */
  189. if (tid != HAL_NON_QOS_TID) {
  190. reo_queue_ext_desc = (uint32_t *)
  191. (((struct rx_reo_queue *)reo_queue_desc) + 1);
  192. qdf_mem_zero(reo_queue_ext_desc, 3 *
  193. sizeof(struct rx_reo_queue_ext));
  194. /* Initialize first reo queue extension descriptor */
  195. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  196. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  197. /* Fixed pattern in reserved bits for debugging */
  198. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  199. UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
  200. /* Initialize second reo queue extension descriptor */
  201. reo_queue_ext_desc = (uint32_t *)
  202. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  203. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  204. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  205. /* Fixed pattern in reserved bits for debugging */
  206. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  207. UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
  208. /* Initialize third reo queue extension descriptor */
  209. reo_queue_ext_desc = (uint32_t *)
  210. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  211. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  212. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  213. /* Fixed pattern in reserved bits for debugging */
  214. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  215. UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
  216. }
  217. #endif
  218. }
  219. qdf_export_symbol(hal_reo_qdesc_setup);
  220. /**
  221. * hal_get_ba_aging_timeout - Get BA Aging timeout
  222. *
  223. * @hal_soc: Opaque HAL SOC handle
  224. * @ac: Access category
  225. * @value: window size to get
  226. */
  227. void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac,
  228. uint32_t *value)
  229. {
  230. struct hal_soc *soc = (struct hal_soc *)hal_soc;
  231. switch (ac) {
  232. case WME_AC_BE:
  233. *value = HAL_REG_READ(soc,
  234. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  235. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  236. break;
  237. case WME_AC_BK:
  238. *value = HAL_REG_READ(soc,
  239. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  240. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  241. break;
  242. case WME_AC_VI:
  243. *value = HAL_REG_READ(soc,
  244. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  245. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  246. break;
  247. case WME_AC_VO:
  248. *value = HAL_REG_READ(soc,
  249. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  250. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  251. break;
  252. default:
  253. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  254. "Invalid AC: %d\n", ac);
  255. }
  256. }
  257. qdf_export_symbol(hal_get_ba_aging_timeout);
  258. /**
  259. * hal_set_ba_aging_timeout - Set BA Aging timeout
  260. *
  261. * @hal_soc: Opaque HAL SOC handle
  262. * @ac: Access category
  263. * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
  264. * @value: Input value to set
  265. */
  266. void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac,
  267. uint32_t value)
  268. {
  269. struct hal_soc *soc = (struct hal_soc *)hal_soc;
  270. switch (ac) {
  271. case WME_AC_BE:
  272. HAL_REG_WRITE(soc,
  273. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  274. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  275. value * 1000);
  276. break;
  277. case WME_AC_BK:
  278. HAL_REG_WRITE(soc,
  279. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  280. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  281. value * 1000);
  282. break;
  283. case WME_AC_VI:
  284. HAL_REG_WRITE(soc,
  285. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  286. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  287. value * 1000);
  288. break;
  289. case WME_AC_VO:
  290. HAL_REG_WRITE(soc,
  291. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  292. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  293. value * 1000);
  294. break;
  295. default:
  296. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  297. "Invalid AC: %d\n", ac);
  298. }
  299. }
  300. qdf_export_symbol(hal_set_ba_aging_timeout);
  301. #define BLOCK_RES_MASK 0xF
  302. static inline uint8_t hal_find_one_bit(uint8_t x)
  303. {
  304. uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
  305. uint8_t pos;
  306. for (pos = 0; y; y >>= 1)
  307. pos++;
  308. return pos-1;
  309. }
  310. static inline uint8_t hal_find_zero_bit(uint8_t x)
  311. {
  312. uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
  313. uint8_t pos;
  314. for (pos = 0; y; y >>= 1)
  315. pos++;
  316. return pos-1;
  317. }
  318. inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
  319. enum hal_reo_cmd_type type,
  320. uint32_t paddr_lo,
  321. uint8_t paddr_hi)
  322. {
  323. switch (type) {
  324. case CMD_GET_QUEUE_STATS:
  325. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
  326. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  327. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
  328. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  329. break;
  330. case CMD_FLUSH_QUEUE:
  331. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
  332. FLUSH_DESC_ADDR_31_0, paddr_lo);
  333. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  334. FLUSH_DESC_ADDR_39_32, paddr_hi);
  335. break;
  336. case CMD_FLUSH_CACHE:
  337. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
  338. FLUSH_ADDR_31_0, paddr_lo);
  339. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  340. FLUSH_ADDR_39_32, paddr_hi);
  341. break;
  342. case CMD_UPDATE_RX_REO_QUEUE:
  343. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
  344. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  345. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  346. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  347. break;
  348. default:
  349. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  350. "%s: Invalid REO command type", __func__);
  351. break;
  352. }
  353. }
  354. inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
  355. struct hal_reo_cmd_params *cmd)
  356. {
  357. uint32_t *reo_desc, val;
  358. hal_srng_access_start(soc, reo_ring);
  359. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  360. if (!reo_desc) {
  361. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  362. "%s: Out of cmd ring entries", __func__);
  363. hal_srng_access_end(soc, reo_ring);
  364. return -EBUSY;
  365. }
  366. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  367. sizeof(struct reo_get_queue_stats));
  368. /* Offsets of descriptor fields defined in HW headers start from
  369. * the field after TLV header */
  370. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  371. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  372. sizeof(struct reo_get_queue_stats) -
  373. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  374. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  375. REO_STATUS_REQUIRED, cmd->std.need_status);
  376. hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
  377. cmd->std.addr_lo,
  378. cmd->std.addr_hi);
  379. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
  380. cmd->u.stats_params.clear);
  381. hal_srng_access_end(soc, reo_ring);
  382. val = reo_desc[CMD_HEADER_DW_OFFSET];
  383. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  384. val);
  385. }
  386. qdf_export_symbol(hal_reo_cmd_queue_stats);
  387. inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
  388. struct hal_reo_cmd_params *cmd)
  389. {
  390. uint32_t *reo_desc, val;
  391. hal_srng_access_start(soc, reo_ring);
  392. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  393. if (!reo_desc) {
  394. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  395. "%s: Out of cmd ring entries", __func__);
  396. hal_srng_access_end(soc, reo_ring);
  397. return -EBUSY;
  398. }
  399. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  400. sizeof(struct reo_flush_queue));
  401. /* Offsets of descriptor fields defined in HW headers start from
  402. * the field after TLV header */
  403. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  404. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  405. sizeof(struct reo_flush_queue) -
  406. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  407. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  408. REO_STATUS_REQUIRED, cmd->std.need_status);
  409. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
  410. cmd->std.addr_hi);
  411. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  412. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  413. cmd->u.fl_queue_params.block_use_after_flush);
  414. if (cmd->u.fl_queue_params.block_use_after_flush) {
  415. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  416. BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
  417. }
  418. hal_srng_access_end(soc, reo_ring);
  419. val = reo_desc[CMD_HEADER_DW_OFFSET];
  420. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  421. val);
  422. }
  423. qdf_export_symbol(hal_reo_cmd_flush_queue);
  424. inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
  425. struct hal_reo_cmd_params *cmd)
  426. {
  427. uint32_t *reo_desc, val;
  428. struct hal_reo_cmd_flush_cache_params *cp;
  429. uint8_t index = 0;
  430. cp = &cmd->u.fl_cache_params;
  431. hal_srng_access_start(soc, reo_ring);
  432. /* We need a cache block resource for this operation, and REO HW has
  433. * only 4 such blocking resources. These resources are managed using
  434. * reo_res_bitmap, and we return failure if none is available.
  435. */
  436. if (cp->block_use_after_flush) {
  437. index = hal_find_zero_bit(soc->reo_res_bitmap);
  438. if (index > 3) {
  439. qdf_print("%s, No blocking resource available!",
  440. __func__);
  441. hal_srng_access_end(soc, reo_ring);
  442. return -EBUSY;
  443. }
  444. soc->index = index;
  445. }
  446. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  447. if (!reo_desc) {
  448. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  449. "%s: Out of cmd ring entries", __func__);
  450. hal_srng_access_end(soc, reo_ring);
  451. hal_srng_dump(reo_ring);
  452. return -EBUSY;
  453. }
  454. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  455. sizeof(struct reo_flush_cache));
  456. /* Offsets of descriptor fields defined in HW headers start from
  457. * the field after TLV header */
  458. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  459. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  460. sizeof(struct reo_flush_cache) -
  461. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  462. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  463. REO_STATUS_REQUIRED, cmd->std.need_status);
  464. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
  465. cmd->std.addr_hi);
  466. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  467. FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
  468. /* set it to 0 for now */
  469. cp->rel_block_index = 0;
  470. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  471. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  472. if (cp->block_use_after_flush) {
  473. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  474. CACHE_BLOCK_RESOURCE_INDEX, index);
  475. }
  476. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  477. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  478. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  479. BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
  480. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
  481. cp->flush_all);
  482. hal_srng_access_end(soc, reo_ring);
  483. val = reo_desc[CMD_HEADER_DW_OFFSET];
  484. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  485. val);
  486. }
  487. qdf_export_symbol(hal_reo_cmd_flush_cache);
  488. inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
  489. struct hal_reo_cmd_params *cmd)
  490. {
  491. uint32_t *reo_desc, val;
  492. uint8_t index = 0;
  493. hal_srng_access_start(soc, reo_ring);
  494. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  495. index = hal_find_one_bit(soc->reo_res_bitmap);
  496. if (index > 3) {
  497. hal_srng_access_end(soc, reo_ring);
  498. qdf_print("%s: No blocking resource to unblock!",
  499. __func__);
  500. return -EBUSY;
  501. }
  502. }
  503. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  504. if (!reo_desc) {
  505. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  506. "%s: Out of cmd ring entries", __func__);
  507. hal_srng_access_end(soc, reo_ring);
  508. return -EBUSY;
  509. }
  510. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  511. sizeof(struct reo_unblock_cache));
  512. /* Offsets of descriptor fields defined in HW headers start from
  513. * the field after TLV header */
  514. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  515. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  516. sizeof(struct reo_unblock_cache) -
  517. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  518. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  519. REO_STATUS_REQUIRED, cmd->std.need_status);
  520. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  521. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  522. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  523. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  524. CACHE_BLOCK_RESOURCE_INDEX,
  525. cmd->u.unblk_cache_params.index);
  526. }
  527. hal_srng_access_end(soc, reo_ring);
  528. val = reo_desc[CMD_HEADER_DW_OFFSET];
  529. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  530. val);
  531. }
  532. qdf_export_symbol(hal_reo_cmd_unblock_cache);
  533. inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
  534. struct hal_reo_cmd_params *cmd)
  535. {
  536. uint32_t *reo_desc, val;
  537. hal_srng_access_start(soc, reo_ring);
  538. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  539. if (!reo_desc) {
  540. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  541. "%s: Out of cmd ring entries", __func__);
  542. hal_srng_access_end(soc, reo_ring);
  543. return -EBUSY;
  544. }
  545. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  546. sizeof(struct reo_flush_timeout_list));
  547. /* Offsets of descriptor fields defined in HW headers start from
  548. * the field after TLV header */
  549. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  550. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  551. sizeof(struct reo_flush_timeout_list) -
  552. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  553. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  554. REO_STATUS_REQUIRED, cmd->std.need_status);
  555. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
  556. cmd->u.fl_tim_list_params.ac_list);
  557. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  558. MINIMUM_RELEASE_DESC_COUNT,
  559. cmd->u.fl_tim_list_params.min_rel_desc);
  560. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  561. MINIMUM_FORWARD_BUF_COUNT,
  562. cmd->u.fl_tim_list_params.min_fwd_buf);
  563. hal_srng_access_end(soc, reo_ring);
  564. val = reo_desc[CMD_HEADER_DW_OFFSET];
  565. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  566. val);
  567. }
  568. qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
  569. inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
  570. struct hal_reo_cmd_params *cmd)
  571. {
  572. uint32_t *reo_desc, val;
  573. struct hal_reo_cmd_update_queue_params *p;
  574. p = &cmd->u.upd_queue_params;
  575. hal_srng_access_start(soc, reo_ring);
  576. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  577. if (!reo_desc) {
  578. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  579. "%s: Out of cmd ring entries", __func__);
  580. hal_srng_access_end(soc, reo_ring);
  581. return -EBUSY;
  582. }
  583. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  584. sizeof(struct reo_update_rx_reo_queue));
  585. /* Offsets of descriptor fields defined in HW headers start from
  586. * the field after TLV header */
  587. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  588. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  589. sizeof(struct reo_update_rx_reo_queue) -
  590. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  591. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  592. REO_STATUS_REQUIRED, cmd->std.need_status);
  593. hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  594. cmd->std.addr_lo, cmd->std.addr_hi);
  595. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  596. UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
  597. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
  598. p->update_vld);
  599. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  600. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  601. p->update_assoc_link_desc);
  602. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  603. UPDATE_DISABLE_DUPLICATE_DETECTION,
  604. p->update_disable_dup_detect);
  605. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  606. UPDATE_DISABLE_DUPLICATE_DETECTION,
  607. p->update_disable_dup_detect);
  608. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  609. UPDATE_SOFT_REORDER_ENABLE,
  610. p->update_soft_reorder_enab);
  611. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  612. UPDATE_AC, p->update_ac);
  613. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  614. UPDATE_BAR, p->update_bar);
  615. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  616. UPDATE_BAR, p->update_bar);
  617. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  618. UPDATE_RTY, p->update_rty);
  619. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  620. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  621. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  622. UPDATE_OOR_MODE, p->update_oor_mode);
  623. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  624. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  625. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  626. UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
  627. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  628. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  629. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  630. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  631. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  632. UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
  633. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  634. UPDATE_PN_SIZE, p->update_pn_size);
  635. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  636. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  637. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  638. UPDATE_SVLD, p->update_svld);
  639. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  640. UPDATE_SSN, p->update_ssn);
  641. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  642. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  643. p->update_seq_2k_err_detect);
  644. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  645. UPDATE_PN_VALID, p->update_pn_valid);
  646. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  647. UPDATE_PN, p->update_pn);
  648. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  649. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  650. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  651. VLD, p->vld);
  652. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  653. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  654. p->assoc_link_desc);
  655. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  656. DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
  657. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  658. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  659. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
  660. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  661. BAR, p->bar);
  662. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  663. CHK_2K_MODE, p->chk_2k_mode);
  664. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  665. RTY, p->rty);
  666. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  667. OOR_MODE, p->oor_mode);
  668. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  669. PN_CHECK_NEEDED, p->pn_check_needed);
  670. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  671. PN_SHALL_BE_EVEN, p->pn_even);
  672. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  673. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  674. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  675. PN_HANDLING_ENABLE, p->pn_hand_enab);
  676. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  677. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  678. if (p->ba_window_size < 1)
  679. p->ba_window_size = 1;
  680. /*
  681. * WAR to get 2k exception in Non BA case.
  682. * Setting window size to 2 to get 2k jump exception
  683. * when we receive aggregates in Non BA case
  684. */
  685. if (p->ba_window_size == 1)
  686. p->ba_window_size++;
  687. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  688. BA_WINDOW_SIZE, p->ba_window_size - 1);
  689. if (p->pn_size == 24)
  690. p->pn_size = PN_SIZE_24;
  691. else if (p->pn_size == 48)
  692. p->pn_size = PN_SIZE_48;
  693. else if (p->pn_size == 128)
  694. p->pn_size = PN_SIZE_128;
  695. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  696. PN_SIZE, p->pn_size);
  697. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  698. SVLD, p->svld);
  699. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  700. SSN, p->ssn);
  701. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  702. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  703. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  704. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  705. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
  706. PN_31_0, p->pn_31_0);
  707. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
  708. PN_63_32, p->pn_63_32);
  709. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
  710. PN_95_64, p->pn_95_64);
  711. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
  712. PN_127_96, p->pn_127_96);
  713. hal_srng_access_end(soc, reo_ring);
  714. val = reo_desc[CMD_HEADER_DW_OFFSET];
  715. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  716. val);
  717. }
  718. qdf_export_symbol(hal_reo_cmd_update_rx_queue);
  719. inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
  720. struct hal_reo_queue_status *st,
  721. struct hal_soc *hal_soc)
  722. {
  723. uint32_t val;
  724. /* Offsets of descriptor fields defined in HW headers start
  725. * from the field after TLV header */
  726. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  727. /* header */
  728. hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
  729. &(st->header), hal_soc);
  730. /* SSN */
  731. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
  732. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
  733. /* current index */
  734. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
  735. CURRENT_INDEX)];
  736. st->curr_idx =
  737. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
  738. CURRENT_INDEX, val);
  739. /* PN bits */
  740. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
  741. PN_31_0)];
  742. st->pn_31_0 =
  743. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
  744. PN_31_0, val);
  745. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
  746. PN_63_32)];
  747. st->pn_63_32 =
  748. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
  749. PN_63_32, val);
  750. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
  751. PN_95_64)];
  752. st->pn_95_64 =
  753. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
  754. PN_95_64, val);
  755. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
  756. PN_127_96)];
  757. st->pn_127_96 =
  758. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
  759. PN_127_96, val);
  760. /* timestamps */
  761. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
  762. LAST_RX_ENQUEUE_TIMESTAMP)];
  763. st->last_rx_enq_tstamp =
  764. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
  765. LAST_RX_ENQUEUE_TIMESTAMP, val);
  766. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
  767. LAST_RX_DEQUEUE_TIMESTAMP)];
  768. st->last_rx_deq_tstamp =
  769. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
  770. LAST_RX_DEQUEUE_TIMESTAMP, val);
  771. /* rx bitmap */
  772. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
  773. RX_BITMAP_31_0)];
  774. st->rx_bitmap_31_0 =
  775. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
  776. RX_BITMAP_31_0, val);
  777. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
  778. RX_BITMAP_63_32)];
  779. st->rx_bitmap_63_32 =
  780. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
  781. RX_BITMAP_63_32, val);
  782. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
  783. RX_BITMAP_95_64)];
  784. st->rx_bitmap_95_64 =
  785. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
  786. RX_BITMAP_95_64, val);
  787. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
  788. RX_BITMAP_127_96)];
  789. st->rx_bitmap_127_96 =
  790. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
  791. RX_BITMAP_127_96, val);
  792. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
  793. RX_BITMAP_159_128)];
  794. st->rx_bitmap_159_128 =
  795. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
  796. RX_BITMAP_159_128, val);
  797. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
  798. RX_BITMAP_191_160)];
  799. st->rx_bitmap_191_160 =
  800. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
  801. RX_BITMAP_191_160, val);
  802. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
  803. RX_BITMAP_223_192)];
  804. st->rx_bitmap_223_192 =
  805. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
  806. RX_BITMAP_223_192, val);
  807. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
  808. RX_BITMAP_255_224)];
  809. st->rx_bitmap_255_224 =
  810. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
  811. RX_BITMAP_255_224, val);
  812. /* various counts */
  813. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  814. CURRENT_MPDU_COUNT)];
  815. st->curr_mpdu_cnt =
  816. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  817. CURRENT_MPDU_COUNT, val);
  818. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  819. CURRENT_MSDU_COUNT)];
  820. st->curr_msdu_cnt =
  821. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  822. CURRENT_MSDU_COUNT, val);
  823. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  824. TIMEOUT_COUNT)];
  825. st->fwd_timeout_cnt =
  826. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  827. TIMEOUT_COUNT, val);
  828. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  829. FORWARD_DUE_TO_BAR_COUNT)];
  830. st->fwd_bar_cnt =
  831. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  832. FORWARD_DUE_TO_BAR_COUNT, val);
  833. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  834. DUPLICATE_COUNT)];
  835. st->dup_cnt =
  836. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  837. DUPLICATE_COUNT, val);
  838. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  839. FRAMES_IN_ORDER_COUNT)];
  840. st->frms_in_order_cnt =
  841. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  842. FRAMES_IN_ORDER_COUNT, val);
  843. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  844. BAR_RECEIVED_COUNT)];
  845. st->bar_rcvd_cnt =
  846. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  847. BAR_RECEIVED_COUNT, val);
  848. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
  849. MPDU_FRAMES_PROCESSED_COUNT)];
  850. st->mpdu_frms_cnt =
  851. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
  852. MPDU_FRAMES_PROCESSED_COUNT, val);
  853. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
  854. MSDU_FRAMES_PROCESSED_COUNT)];
  855. st->msdu_frms_cnt =
  856. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
  857. MSDU_FRAMES_PROCESSED_COUNT, val);
  858. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
  859. TOTAL_PROCESSED_BYTE_COUNT)];
  860. st->total_cnt =
  861. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
  862. TOTAL_PROCESSED_BYTE_COUNT, val);
  863. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  864. LATE_RECEIVE_MPDU_COUNT)];
  865. st->late_recv_mpdu_cnt =
  866. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  867. LATE_RECEIVE_MPDU_COUNT, val);
  868. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  869. WINDOW_JUMP_2K)];
  870. st->win_jump_2k =
  871. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  872. WINDOW_JUMP_2K, val);
  873. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  874. HOLE_COUNT)];
  875. st->hole_cnt =
  876. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  877. HOLE_COUNT, val);
  878. }
  879. qdf_export_symbol(hal_reo_queue_stats_status);
  880. inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
  881. struct hal_reo_flush_queue_status *st,
  882. struct hal_soc *hal_soc)
  883. {
  884. uint32_t val;
  885. /* Offsets of descriptor fields defined in HW headers start
  886. * from the field after TLV header */
  887. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  888. /* header */
  889. hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
  890. &(st->header), hal_soc);
  891. /* error bit */
  892. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
  893. ERROR_DETECTED)];
  894. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  895. val);
  896. }
  897. qdf_export_symbol(hal_reo_flush_queue_status);
  898. inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
  899. struct hal_reo_flush_cache_status *st,
  900. struct hal_soc *hal_soc)
  901. {
  902. uint32_t val;
  903. /* Offsets of descriptor fields defined in HW headers start
  904. * from the field after TLV header */
  905. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  906. /* header */
  907. hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
  908. &(st->header), hal_soc);
  909. /* error bit */
  910. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  911. ERROR_DETECTED)];
  912. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  913. val);
  914. /* block error */
  915. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  916. BLOCK_ERROR_DETAILS)];
  917. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  918. BLOCK_ERROR_DETAILS,
  919. val);
  920. if (!st->block_error)
  921. qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
  922. /* cache flush status */
  923. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  924. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  925. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  926. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  927. val);
  928. /* cache flush descriptor type */
  929. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  930. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  931. st->cache_flush_status_desc_type =
  932. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  933. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  934. val);
  935. /* cache flush count */
  936. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  937. CACHE_CONTROLLER_FLUSH_COUNT)];
  938. st->cache_flush_cnt =
  939. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  940. CACHE_CONTROLLER_FLUSH_COUNT,
  941. val);
  942. }
  943. qdf_export_symbol(hal_reo_flush_cache_status);
  944. inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
  945. struct hal_soc *soc,
  946. struct hal_reo_unblk_cache_status *st)
  947. {
  948. uint32_t val;
  949. /* Offsets of descriptor fields defined in HW headers start
  950. * from the field after TLV header */
  951. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  952. /* header */
  953. hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
  954. &(st->header), soc);
  955. /* error bit */
  956. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  957. ERROR_DETECTED)];
  958. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  959. ERROR_DETECTED,
  960. val);
  961. /* unblock type */
  962. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  963. UNBLOCK_TYPE)];
  964. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  965. UNBLOCK_TYPE,
  966. val);
  967. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  968. qdf_clear_bit(soc->index,
  969. (unsigned long *)&soc->reo_res_bitmap);
  970. }
  971. qdf_export_symbol(hal_reo_unblock_cache_status);
  972. inline void hal_reo_flush_timeout_list_status(
  973. uint32_t *reo_desc,
  974. struct hal_reo_flush_timeout_list_status *st,
  975. struct hal_soc *hal_soc)
  976. {
  977. uint32_t val;
  978. /* Offsets of descriptor fields defined in HW headers start
  979. * from the field after TLV header */
  980. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  981. /* header */
  982. hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
  983. &(st->header), hal_soc);
  984. /* error bit */
  985. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  986. ERROR_DETECTED)];
  987. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  988. ERROR_DETECTED,
  989. val);
  990. /* list empty */
  991. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  992. TIMOUT_LIST_EMPTY)];
  993. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  994. TIMOUT_LIST_EMPTY,
  995. val);
  996. /* release descriptor count */
  997. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  998. RELEASE_DESC_COUNT)];
  999. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1000. RELEASE_DESC_COUNT,
  1001. val);
  1002. /* forward buf count */
  1003. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1004. FORWARD_BUF_COUNT)];
  1005. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1006. FORWARD_BUF_COUNT,
  1007. val);
  1008. }
  1009. qdf_export_symbol(hal_reo_flush_timeout_list_status);
  1010. inline void hal_reo_desc_thres_reached_status(
  1011. uint32_t *reo_desc,
  1012. struct hal_reo_desc_thres_reached_status *st,
  1013. struct hal_soc *hal_soc)
  1014. {
  1015. uint32_t val;
  1016. /* Offsets of descriptor fields defined in HW headers start
  1017. * from the field after TLV header */
  1018. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  1019. /* header */
  1020. hal_reo_status_get_header(reo_desc,
  1021. HAL_REO_DESC_THRES_STATUS_TLV,
  1022. &(st->header), hal_soc);
  1023. /* threshold index */
  1024. val = reo_desc[HAL_OFFSET_DW(
  1025. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  1026. THRESHOLD_INDEX)];
  1027. st->thres_index = HAL_GET_FIELD(
  1028. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  1029. THRESHOLD_INDEX,
  1030. val);
  1031. /* link desc counters */
  1032. val = reo_desc[HAL_OFFSET_DW(
  1033. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  1034. LINK_DESCRIPTOR_COUNTER0)];
  1035. st->link_desc_counter0 = HAL_GET_FIELD(
  1036. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  1037. LINK_DESCRIPTOR_COUNTER0,
  1038. val);
  1039. val = reo_desc[HAL_OFFSET_DW(
  1040. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  1041. LINK_DESCRIPTOR_COUNTER1)];
  1042. st->link_desc_counter1 = HAL_GET_FIELD(
  1043. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  1044. LINK_DESCRIPTOR_COUNTER1,
  1045. val);
  1046. val = reo_desc[HAL_OFFSET_DW(
  1047. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  1048. LINK_DESCRIPTOR_COUNTER2)];
  1049. st->link_desc_counter2 = HAL_GET_FIELD(
  1050. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  1051. LINK_DESCRIPTOR_COUNTER2,
  1052. val);
  1053. val = reo_desc[HAL_OFFSET_DW(
  1054. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  1055. LINK_DESCRIPTOR_COUNTER_SUM)];
  1056. st->link_desc_counter_sum = HAL_GET_FIELD(
  1057. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  1058. LINK_DESCRIPTOR_COUNTER_SUM,
  1059. val);
  1060. }
  1061. qdf_export_symbol(hal_reo_desc_thres_reached_status);
  1062. inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
  1063. struct hal_reo_update_rx_queue_status *st,
  1064. struct hal_soc *hal_soc)
  1065. {
  1066. /* Offsets of descriptor fields defined in HW headers start
  1067. * from the field after TLV header */
  1068. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  1069. /* header */
  1070. hal_reo_status_get_header(reo_desc,
  1071. HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
  1072. &(st->header), hal_soc);
  1073. }
  1074. qdf_export_symbol(hal_reo_rx_update_queue_status);
  1075. /**
  1076. * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
  1077. * with command number
  1078. * @hal_soc: Handle to HAL SoC structure
  1079. * @hal_ring: Handle to HAL SRNG structure
  1080. *
  1081. * Return: none
  1082. */
  1083. inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
  1084. {
  1085. int cmd_num;
  1086. uint32_t *desc_addr;
  1087. struct hal_srng_params srng_params;
  1088. uint32_t desc_size;
  1089. uint32_t num_desc;
  1090. hal_get_srng_params(soc, hal_srng, &srng_params);
  1091. desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
  1092. desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
  1093. desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
  1094. num_desc = srng_params.num_entries;
  1095. cmd_num = 1;
  1096. while (num_desc) {
  1097. /* Offsets of descriptor fields defined in HW headers start
  1098. * from the field after TLV header */
  1099. HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
  1100. REO_CMD_NUMBER, cmd_num);
  1101. desc_addr += desc_size;
  1102. num_desc--; cmd_num++;
  1103. }
  1104. soc->reo_res_bitmap = 0;
  1105. }
  1106. qdf_export_symbol(hal_reo_init_cmd_ring);