hal_reo.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335
  1. /*
  2. * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_api.h"
  19. #include "hal_hw_headers.h"
  20. #include "hal_reo.h"
  21. #include "hal_tx.h"
  22. #include "hal_rx.h"
  23. #include "qdf_module.h"
  24. /* TODO: See if the following definition is available in HW headers */
  25. #define HAL_REO_OWNED 4
  26. #define HAL_REO_QUEUE_DESC 8
  27. #define HAL_REO_QUEUE_EXT_DESC 9
  28. /* TODO: Using associated link desc counter 1 for Rx. Check with FW on
  29. * how these counters are assigned
  30. */
  31. #define HAL_RX_LINK_DESC_CNTR 1
  32. /* TODO: Following definition should be from HW headers */
  33. #define HAL_DESC_REO_OWNED 4
  34. /**
  35. * hal_uniform_desc_hdr_setup - setup reo_queue_ext descritpro
  36. * @owner - owner info
  37. * @buffer_type - buffer type
  38. */
  39. static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
  40. uint32_t buffer_type)
  41. {
  42. HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
  43. owner);
  44. HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
  45. buffer_type);
  46. }
  47. #ifndef TID_TO_WME_AC
  48. #define WME_AC_BE 0 /* best effort */
  49. #define WME_AC_BK 1 /* background */
  50. #define WME_AC_VI 2 /* video */
  51. #define WME_AC_VO 3 /* voice */
  52. #define TID_TO_WME_AC(_tid) ( \
  53. (((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
  54. (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
  55. (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
  56. WME_AC_VO)
  57. #endif
  58. #define HAL_NON_QOS_TID 16
  59. /**
  60. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  61. *
  62. * @hal_soc: Opaque HAL SOC handle
  63. * @ba_window_size: BlockAck window size
  64. * @start_seq: Starting sequence number
  65. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  66. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  67. * @tid: TID
  68. *
  69. */
  70. void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
  71. uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
  72. int pn_type)
  73. {
  74. uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
  75. uint32_t *reo_queue_ext_desc;
  76. uint32_t reg_val;
  77. uint32_t pn_enable;
  78. uint32_t pn_size = 0;
  79. qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
  80. hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
  81. HAL_REO_QUEUE_DESC);
  82. /* Fixed pattern in reserved bits for debugging */
  83. HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER_0,
  84. RESERVED_0A, 0xDDBEEF);
  85. /* This a just a SW meta data and will be copied to REO destination
  86. * descriptors indicated by hardware.
  87. * TODO: Setting TID in this field. See if we should set something else.
  88. */
  89. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
  90. RECEIVE_QUEUE_NUMBER, tid);
  91. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  92. VLD, 1);
  93. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  94. ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
  95. /*
  96. * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
  97. */
  98. reg_val = TID_TO_WME_AC(tid);
  99. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
  100. if (ba_window_size < 1)
  101. ba_window_size = 1;
  102. /* WAR to get 2k exception in Non BA case.
  103. * Setting window size to 2 to get 2k jump exception
  104. * when we receive aggregates in Non BA case
  105. */
  106. if ((ba_window_size == 1) && (tid != HAL_NON_QOS_TID))
  107. ba_window_size++;
  108. /* Set RTY bit for non-BA case. Duplicate detection is currently not
  109. * done by HW in non-BA case if RTY bit is not set.
  110. * TODO: This is a temporary War and should be removed once HW fix is
  111. * made to check and discard duplicates even if RTY bit is not set.
  112. */
  113. if (ba_window_size == 1)
  114. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, RTY, 1);
  115. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
  116. ba_window_size - 1);
  117. switch (pn_type) {
  118. case HAL_PN_WPA:
  119. pn_enable = 1;
  120. pn_size = PN_SIZE_48;
  121. break;
  122. case HAL_PN_WAPI_EVEN:
  123. case HAL_PN_WAPI_UNEVEN:
  124. pn_enable = 1;
  125. pn_size = PN_SIZE_128;
  126. break;
  127. default:
  128. pn_enable = 0;
  129. break;
  130. }
  131. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
  132. pn_enable);
  133. if (pn_type == HAL_PN_WAPI_EVEN)
  134. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  135. PN_SHALL_BE_EVEN, 1);
  136. else if (pn_type == HAL_PN_WAPI_UNEVEN)
  137. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  138. PN_SHALL_BE_UNEVEN, 1);
  139. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_HANDLING_ENABLE,
  140. pn_enable);
  141. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
  142. pn_size);
  143. /* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
  144. * based on BA window size and/or AMPDU capabilities
  145. */
  146. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
  147. IGNORE_AMPDU_FLAG, 1);
  148. if (start_seq <= 0xfff)
  149. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
  150. start_seq);
  151. /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
  152. * but REO is not delivering packets if we set it to 1. Need to enable
  153. * this once the issue is resolved
  154. */
  155. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 0);
  156. /* TODO: Check if we should set start PN for WAPI */
  157. #ifdef notyet
  158. /* Setup first queue extension if BA window size is more than 1 */
  159. if (ba_window_size > 1) {
  160. reo_queue_ext_desc =
  161. (uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
  162. 1);
  163. qdf_mem_zero(reo_queue_ext_desc,
  164. sizeof(struct rx_reo_queue_ext));
  165. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  166. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  167. }
  168. /* Setup second queue extension if BA window size is more than 105 */
  169. if (ba_window_size > 105) {
  170. reo_queue_ext_desc = (uint32_t *)
  171. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  172. qdf_mem_zero(reo_queue_ext_desc,
  173. sizeof(struct rx_reo_queue_ext));
  174. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  175. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  176. }
  177. /* Setup third queue extension if BA window size is more than 210 */
  178. if (ba_window_size > 210) {
  179. reo_queue_ext_desc = (uint32_t *)
  180. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  181. qdf_mem_zero(reo_queue_ext_desc,
  182. sizeof(struct rx_reo_queue_ext));
  183. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  184. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  185. }
  186. #else
  187. /* TODO: HW queue descriptors are currently allocated for max BA
  188. * window size for all QOS TIDs so that same descriptor can be used
  189. * later when ADDBA request is recevied. This should be changed to
  190. * allocate HW queue descriptors based on BA window size being
  191. * negotiated (0 for non BA cases), and reallocate when BA window
  192. * size changes and also send WMI message to FW to change the REO
  193. * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
  194. */
  195. if (tid != HAL_NON_QOS_TID) {
  196. reo_queue_ext_desc = (uint32_t *)
  197. (((struct rx_reo_queue *)reo_queue_desc) + 1);
  198. qdf_mem_zero(reo_queue_ext_desc, 3 *
  199. sizeof(struct rx_reo_queue_ext));
  200. /* Initialize first reo queue extension descriptor */
  201. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  202. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  203. /* Fixed pattern in reserved bits for debugging */
  204. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  205. UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xADBEEF);
  206. /* Initialize second reo queue extension descriptor */
  207. reo_queue_ext_desc = (uint32_t *)
  208. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  209. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  210. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  211. /* Fixed pattern in reserved bits for debugging */
  212. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  213. UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xBDBEEF);
  214. /* Initialize third reo queue extension descriptor */
  215. reo_queue_ext_desc = (uint32_t *)
  216. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  217. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  218. HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
  219. /* Fixed pattern in reserved bits for debugging */
  220. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  221. UNIFORM_DESCRIPTOR_HEADER_0, RESERVED_0A, 0xCDBEEF);
  222. }
  223. #endif
  224. }
  225. qdf_export_symbol(hal_reo_qdesc_setup);
  226. /**
  227. * hal_get_ba_aging_timeout - Get BA Aging timeout
  228. *
  229. * @hal_soc: Opaque HAL SOC handle
  230. * @ac: Access category
  231. * @value: window size to get
  232. */
  233. void hal_get_ba_aging_timeout(void *hal_soc, uint8_t ac,
  234. uint32_t *value)
  235. {
  236. struct hal_soc *soc = (struct hal_soc *)hal_soc;
  237. switch (ac) {
  238. case WME_AC_BE:
  239. *value = HAL_REG_READ(soc,
  240. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  241. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  242. break;
  243. case WME_AC_BK:
  244. *value = HAL_REG_READ(soc,
  245. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  246. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  247. break;
  248. case WME_AC_VI:
  249. *value = HAL_REG_READ(soc,
  250. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  251. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  252. break;
  253. case WME_AC_VO:
  254. *value = HAL_REG_READ(soc,
  255. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  256. SEQ_WCSS_UMAC_REO_REG_OFFSET)) / 1000;
  257. break;
  258. default:
  259. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  260. "Invalid AC: %d\n", ac);
  261. }
  262. }
  263. qdf_export_symbol(hal_get_ba_aging_timeout);
  264. /**
  265. * hal_set_ba_aging_timeout - Set BA Aging timeout
  266. *
  267. * @hal_soc: Opaque HAL SOC handle
  268. * @ac: Access category
  269. * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
  270. * @value: Input value to set
  271. */
  272. void hal_set_ba_aging_timeout(void *hal_soc, uint8_t ac,
  273. uint32_t value)
  274. {
  275. struct hal_soc *soc = (struct hal_soc *)hal_soc;
  276. switch (ac) {
  277. case WME_AC_BE:
  278. HAL_REG_WRITE(soc,
  279. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  280. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  281. value * 1000);
  282. break;
  283. case WME_AC_BK:
  284. HAL_REG_WRITE(soc,
  285. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  286. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  287. value * 1000);
  288. break;
  289. case WME_AC_VI:
  290. HAL_REG_WRITE(soc,
  291. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  292. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  293. value * 1000);
  294. break;
  295. case WME_AC_VO:
  296. HAL_REG_WRITE(soc,
  297. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  298. SEQ_WCSS_UMAC_REO_REG_OFFSET),
  299. value * 1000);
  300. break;
  301. default:
  302. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  303. "Invalid AC: %d\n", ac);
  304. }
  305. }
  306. qdf_export_symbol(hal_set_ba_aging_timeout);
  307. #define BLOCK_RES_MASK 0xF
  308. static inline uint8_t hal_find_one_bit(uint8_t x)
  309. {
  310. uint8_t y = (x & (~x + 1)) & BLOCK_RES_MASK;
  311. uint8_t pos;
  312. for (pos = 0; y; y >>= 1)
  313. pos++;
  314. return pos-1;
  315. }
  316. static inline uint8_t hal_find_zero_bit(uint8_t x)
  317. {
  318. uint8_t y = (~x & (x+1)) & BLOCK_RES_MASK;
  319. uint8_t pos;
  320. for (pos = 0; y; y >>= 1)
  321. pos++;
  322. return pos-1;
  323. }
  324. inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
  325. enum hal_reo_cmd_type type,
  326. uint32_t paddr_lo,
  327. uint8_t paddr_hi)
  328. {
  329. switch (type) {
  330. case CMD_GET_QUEUE_STATS:
  331. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_1,
  332. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  333. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2,
  334. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  335. break;
  336. case CMD_FLUSH_QUEUE:
  337. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_1,
  338. FLUSH_DESC_ADDR_31_0, paddr_lo);
  339. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  340. FLUSH_DESC_ADDR_39_32, paddr_hi);
  341. break;
  342. case CMD_FLUSH_CACHE:
  343. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_1,
  344. FLUSH_ADDR_31_0, paddr_lo);
  345. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  346. FLUSH_ADDR_39_32, paddr_hi);
  347. break;
  348. case CMD_UPDATE_RX_REO_QUEUE:
  349. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_1,
  350. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  351. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  352. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  353. break;
  354. default:
  355. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  356. "%s: Invalid REO command type", __func__);
  357. break;
  358. }
  359. }
  360. inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
  361. struct hal_reo_cmd_params *cmd)
  362. {
  363. uint32_t *reo_desc, val;
  364. hal_srng_access_start(soc, reo_ring);
  365. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  366. if (!reo_desc) {
  367. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  368. "%s: Out of cmd ring entries", __func__);
  369. hal_srng_access_end(soc, reo_ring);
  370. return -EBUSY;
  371. }
  372. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  373. sizeof(struct reo_get_queue_stats));
  374. /* Offsets of descriptor fields defined in HW headers start from
  375. * the field after TLV header */
  376. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  377. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  378. sizeof(struct reo_get_queue_stats) -
  379. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  380. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  381. REO_STATUS_REQUIRED, cmd->std.need_status);
  382. hal_reo_cmd_set_descr_addr(reo_desc, CMD_GET_QUEUE_STATS,
  383. cmd->std.addr_lo,
  384. cmd->std.addr_hi);
  385. HAL_DESC_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS_2, CLEAR_STATS,
  386. cmd->u.stats_params.clear);
  387. hal_srng_access_end(soc, reo_ring);
  388. val = reo_desc[CMD_HEADER_DW_OFFSET];
  389. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  390. val);
  391. }
  392. qdf_export_symbol(hal_reo_cmd_queue_stats);
  393. inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
  394. struct hal_reo_cmd_params *cmd)
  395. {
  396. uint32_t *reo_desc, val;
  397. hal_srng_access_start(soc, reo_ring);
  398. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  399. if (!reo_desc) {
  400. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  401. "%s: Out of cmd ring entries", __func__);
  402. hal_srng_access_end(soc, reo_ring);
  403. return -EBUSY;
  404. }
  405. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  406. sizeof(struct reo_flush_queue));
  407. /* Offsets of descriptor fields defined in HW headers start from
  408. * the field after TLV header */
  409. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  410. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  411. sizeof(struct reo_flush_queue) -
  412. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  413. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  414. REO_STATUS_REQUIRED, cmd->std.need_status);
  415. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_QUEUE, cmd->std.addr_lo,
  416. cmd->std.addr_hi);
  417. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  418. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  419. cmd->u.fl_queue_params.block_use_after_flush);
  420. if (cmd->u.fl_queue_params.block_use_after_flush) {
  421. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_QUEUE_2,
  422. BLOCK_RESOURCE_INDEX, cmd->u.fl_queue_params.index);
  423. }
  424. hal_srng_access_end(soc, reo_ring);
  425. val = reo_desc[CMD_HEADER_DW_OFFSET];
  426. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  427. val);
  428. }
  429. qdf_export_symbol(hal_reo_cmd_flush_queue);
  430. inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
  431. struct hal_reo_cmd_params *cmd)
  432. {
  433. uint32_t *reo_desc, val;
  434. struct hal_reo_cmd_flush_cache_params *cp;
  435. uint8_t index = 0;
  436. cp = &cmd->u.fl_cache_params;
  437. hal_srng_access_start(soc, reo_ring);
  438. /* We need a cache block resource for this operation, and REO HW has
  439. * only 4 such blocking resources. These resources are managed using
  440. * reo_res_bitmap, and we return failure if none is available.
  441. */
  442. if (cp->block_use_after_flush) {
  443. index = hal_find_zero_bit(soc->reo_res_bitmap);
  444. if (index > 3) {
  445. qdf_print("%s, No blocking resource available!",
  446. __func__);
  447. hal_srng_access_end(soc, reo_ring);
  448. return -EBUSY;
  449. }
  450. soc->index = index;
  451. }
  452. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  453. if (!reo_desc) {
  454. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  455. "%s: Out of cmd ring entries", __func__);
  456. hal_srng_access_end(soc, reo_ring);
  457. hal_srng_dump(reo_ring);
  458. return -EBUSY;
  459. }
  460. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  461. sizeof(struct reo_flush_cache));
  462. /* Offsets of descriptor fields defined in HW headers start from
  463. * the field after TLV header */
  464. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  465. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  466. sizeof(struct reo_flush_cache) -
  467. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  468. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  469. REO_STATUS_REQUIRED, cmd->std.need_status);
  470. hal_reo_cmd_set_descr_addr(reo_desc, CMD_FLUSH_CACHE, cmd->std.addr_lo,
  471. cmd->std.addr_hi);
  472. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  473. FORWARD_ALL_MPDUS_IN_QUEUE, cp->fwd_mpdus_in_queue);
  474. /* set it to 0 for now */
  475. cp->rel_block_index = 0;
  476. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  477. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  478. if (cp->block_use_after_flush) {
  479. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  480. CACHE_BLOCK_RESOURCE_INDEX, index);
  481. }
  482. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  483. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  484. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2,
  485. BLOCK_CACHE_USAGE_AFTER_FLUSH, cp->block_use_after_flush);
  486. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_CACHE_2, FLUSH_ENTIRE_CACHE,
  487. cp->flush_all);
  488. hal_srng_access_end(soc, reo_ring);
  489. val = reo_desc[CMD_HEADER_DW_OFFSET];
  490. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  491. val);
  492. }
  493. qdf_export_symbol(hal_reo_cmd_flush_cache);
  494. inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
  495. struct hal_reo_cmd_params *cmd)
  496. {
  497. uint32_t *reo_desc, val;
  498. uint8_t index = 0;
  499. hal_srng_access_start(soc, reo_ring);
  500. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  501. index = hal_find_one_bit(soc->reo_res_bitmap);
  502. if (index > 3) {
  503. hal_srng_access_end(soc, reo_ring);
  504. qdf_print("%s: No blocking resource to unblock!",
  505. __func__);
  506. return -EBUSY;
  507. }
  508. }
  509. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  510. if (!reo_desc) {
  511. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  512. "%s: Out of cmd ring entries", __func__);
  513. hal_srng_access_end(soc, reo_ring);
  514. return -EBUSY;
  515. }
  516. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  517. sizeof(struct reo_unblock_cache));
  518. /* Offsets of descriptor fields defined in HW headers start from
  519. * the field after TLV header */
  520. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  521. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  522. sizeof(struct reo_unblock_cache) -
  523. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  524. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  525. REO_STATUS_REQUIRED, cmd->std.need_status);
  526. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  527. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  528. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  529. HAL_DESC_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE_1,
  530. CACHE_BLOCK_RESOURCE_INDEX,
  531. cmd->u.unblk_cache_params.index);
  532. }
  533. hal_srng_access_end(soc, reo_ring);
  534. val = reo_desc[CMD_HEADER_DW_OFFSET];
  535. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  536. val);
  537. }
  538. qdf_export_symbol(hal_reo_cmd_unblock_cache);
  539. inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
  540. struct hal_reo_cmd_params *cmd)
  541. {
  542. uint32_t *reo_desc, val;
  543. hal_srng_access_start(soc, reo_ring);
  544. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  545. if (!reo_desc) {
  546. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  547. "%s: Out of cmd ring entries", __func__);
  548. hal_srng_access_end(soc, reo_ring);
  549. return -EBUSY;
  550. }
  551. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  552. sizeof(struct reo_flush_timeout_list));
  553. /* Offsets of descriptor fields defined in HW headers start from
  554. * the field after TLV header */
  555. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  556. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  557. sizeof(struct reo_flush_timeout_list) -
  558. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  559. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  560. REO_STATUS_REQUIRED, cmd->std.need_status);
  561. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_1, AC_TIMOUT_LIST,
  562. cmd->u.fl_tim_list_params.ac_list);
  563. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  564. MINIMUM_RELEASE_DESC_COUNT,
  565. cmd->u.fl_tim_list_params.min_rel_desc);
  566. HAL_DESC_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST_2,
  567. MINIMUM_FORWARD_BUF_COUNT,
  568. cmd->u.fl_tim_list_params.min_fwd_buf);
  569. hal_srng_access_end(soc, reo_ring);
  570. val = reo_desc[CMD_HEADER_DW_OFFSET];
  571. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  572. val);
  573. }
  574. qdf_export_symbol(hal_reo_cmd_flush_timeout_list);
  575. inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
  576. struct hal_reo_cmd_params *cmd)
  577. {
  578. uint32_t *reo_desc, val;
  579. struct hal_reo_cmd_update_queue_params *p;
  580. p = &cmd->u.upd_queue_params;
  581. hal_srng_access_start(soc, reo_ring);
  582. reo_desc = hal_srng_src_get_next(soc, reo_ring);
  583. if (!reo_desc) {
  584. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  585. "%s: Out of cmd ring entries", __func__);
  586. hal_srng_access_end(soc, reo_ring);
  587. return -EBUSY;
  588. }
  589. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  590. sizeof(struct reo_update_rx_reo_queue));
  591. /* Offsets of descriptor fields defined in HW headers start from
  592. * the field after TLV header */
  593. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  594. qdf_mem_zero((void *)(reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  595. sizeof(struct reo_update_rx_reo_queue) -
  596. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  597. HAL_DESC_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER_0,
  598. REO_STATUS_REQUIRED, cmd->std.need_status);
  599. hal_reo_cmd_set_descr_addr(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  600. cmd->std.addr_lo, cmd->std.addr_hi);
  601. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  602. UPDATE_RECEIVE_QUEUE_NUMBER, p->update_rx_queue_num);
  603. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2, UPDATE_VLD,
  604. p->update_vld);
  605. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  606. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  607. p->update_assoc_link_desc);
  608. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  609. UPDATE_DISABLE_DUPLICATE_DETECTION,
  610. p->update_disable_dup_detect);
  611. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  612. UPDATE_DISABLE_DUPLICATE_DETECTION,
  613. p->update_disable_dup_detect);
  614. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  615. UPDATE_SOFT_REORDER_ENABLE,
  616. p->update_soft_reorder_enab);
  617. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  618. UPDATE_AC, p->update_ac);
  619. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  620. UPDATE_BAR, p->update_bar);
  621. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  622. UPDATE_BAR, p->update_bar);
  623. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  624. UPDATE_RTY, p->update_rty);
  625. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  626. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  627. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  628. UPDATE_OOR_MODE, p->update_oor_mode);
  629. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  630. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  631. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  632. UPDATE_PN_CHECK_NEEDED, p->update_pn_check_needed);
  633. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  634. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  635. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  636. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  637. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  638. UPDATE_PN_HANDLING_ENABLE, p->update_pn_hand_enab);
  639. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  640. UPDATE_PN_SIZE, p->update_pn_size);
  641. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  642. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  643. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  644. UPDATE_SVLD, p->update_svld);
  645. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  646. UPDATE_SSN, p->update_ssn);
  647. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  648. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  649. p->update_seq_2k_err_detect);
  650. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  651. UPDATE_PN_VALID, p->update_pn_valid);
  652. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_2,
  653. UPDATE_PN, p->update_pn);
  654. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  655. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  656. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  657. VLD, p->vld);
  658. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  659. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  660. p->assoc_link_desc);
  661. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  662. DISABLE_DUPLICATE_DETECTION, p->disable_dup_detect);
  663. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  664. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  665. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3, AC, p->ac);
  666. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  667. BAR, p->bar);
  668. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  669. CHK_2K_MODE, p->chk_2k_mode);
  670. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  671. RTY, p->rty);
  672. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  673. OOR_MODE, p->oor_mode);
  674. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  675. PN_CHECK_NEEDED, p->pn_check_needed);
  676. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  677. PN_SHALL_BE_EVEN, p->pn_even);
  678. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  679. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  680. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  681. PN_HANDLING_ENABLE, p->pn_hand_enab);
  682. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_3,
  683. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  684. if (p->ba_window_size < 1)
  685. p->ba_window_size = 1;
  686. /*
  687. * WAR to get 2k exception in Non BA case.
  688. * Setting window size to 2 to get 2k jump exception
  689. * when we receive aggregates in Non BA case
  690. */
  691. if (p->ba_window_size == 1)
  692. p->ba_window_size++;
  693. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  694. BA_WINDOW_SIZE, p->ba_window_size - 1);
  695. if (p->pn_size == 24)
  696. p->pn_size = PN_SIZE_24;
  697. else if (p->pn_size == 48)
  698. p->pn_size = PN_SIZE_48;
  699. else if (p->pn_size == 128)
  700. p->pn_size = PN_SIZE_128;
  701. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  702. PN_SIZE, p->pn_size);
  703. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  704. SVLD, p->svld);
  705. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  706. SSN, p->ssn);
  707. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  708. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  709. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_4,
  710. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  711. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_5,
  712. PN_31_0, p->pn_31_0);
  713. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_6,
  714. PN_63_32, p->pn_63_32);
  715. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_7,
  716. PN_95_64, p->pn_95_64);
  717. HAL_DESC_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE_8,
  718. PN_127_96, p->pn_127_96);
  719. hal_srng_access_end(soc, reo_ring);
  720. val = reo_desc[CMD_HEADER_DW_OFFSET];
  721. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER_0, REO_CMD_NUMBER,
  722. val);
  723. }
  724. qdf_export_symbol(hal_reo_cmd_update_rx_queue);
  725. inline void hal_reo_queue_stats_status(uint32_t *reo_desc,
  726. struct hal_reo_queue_status *st,
  727. struct hal_soc *hal_soc)
  728. {
  729. uint32_t val;
  730. /* Offsets of descriptor fields defined in HW headers start
  731. * from the field after TLV header */
  732. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  733. /* header */
  734. hal_reo_status_get_header(reo_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
  735. &(st->header), hal_soc);
  736. /* SSN */
  737. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2, SSN)];
  738. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2, SSN, val);
  739. /* current index */
  740. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_2,
  741. CURRENT_INDEX)];
  742. st->curr_idx =
  743. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_2,
  744. CURRENT_INDEX, val);
  745. /* PN bits */
  746. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_3,
  747. PN_31_0)];
  748. st->pn_31_0 =
  749. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_3,
  750. PN_31_0, val);
  751. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_4,
  752. PN_63_32)];
  753. st->pn_63_32 =
  754. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_4,
  755. PN_63_32, val);
  756. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_5,
  757. PN_95_64)];
  758. st->pn_95_64 =
  759. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_5,
  760. PN_95_64, val);
  761. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_6,
  762. PN_127_96)];
  763. st->pn_127_96 =
  764. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_6,
  765. PN_127_96, val);
  766. /* timestamps */
  767. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_7,
  768. LAST_RX_ENQUEUE_TIMESTAMP)];
  769. st->last_rx_enq_tstamp =
  770. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_7,
  771. LAST_RX_ENQUEUE_TIMESTAMP, val);
  772. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_8,
  773. LAST_RX_DEQUEUE_TIMESTAMP)];
  774. st->last_rx_deq_tstamp =
  775. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_8,
  776. LAST_RX_DEQUEUE_TIMESTAMP, val);
  777. /* rx bitmap */
  778. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_9,
  779. RX_BITMAP_31_0)];
  780. st->rx_bitmap_31_0 =
  781. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_9,
  782. RX_BITMAP_31_0, val);
  783. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_10,
  784. RX_BITMAP_63_32)];
  785. st->rx_bitmap_63_32 =
  786. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_10,
  787. RX_BITMAP_63_32, val);
  788. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_11,
  789. RX_BITMAP_95_64)];
  790. st->rx_bitmap_95_64 =
  791. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_11,
  792. RX_BITMAP_95_64, val);
  793. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_12,
  794. RX_BITMAP_127_96)];
  795. st->rx_bitmap_127_96 =
  796. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_12,
  797. RX_BITMAP_127_96, val);
  798. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_13,
  799. RX_BITMAP_159_128)];
  800. st->rx_bitmap_159_128 =
  801. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_13,
  802. RX_BITMAP_159_128, val);
  803. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_14,
  804. RX_BITMAP_191_160)];
  805. st->rx_bitmap_191_160 =
  806. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_14,
  807. RX_BITMAP_191_160, val);
  808. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_15,
  809. RX_BITMAP_223_192)];
  810. st->rx_bitmap_223_192 =
  811. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_15,
  812. RX_BITMAP_223_192, val);
  813. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_16,
  814. RX_BITMAP_255_224)];
  815. st->rx_bitmap_255_224 =
  816. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_16,
  817. RX_BITMAP_255_224, val);
  818. /* various counts */
  819. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  820. CURRENT_MPDU_COUNT)];
  821. st->curr_mpdu_cnt =
  822. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  823. CURRENT_MPDU_COUNT, val);
  824. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_17,
  825. CURRENT_MSDU_COUNT)];
  826. st->curr_msdu_cnt =
  827. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_17,
  828. CURRENT_MSDU_COUNT, val);
  829. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  830. TIMEOUT_COUNT)];
  831. st->fwd_timeout_cnt =
  832. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  833. TIMEOUT_COUNT, val);
  834. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  835. FORWARD_DUE_TO_BAR_COUNT)];
  836. st->fwd_bar_cnt =
  837. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  838. FORWARD_DUE_TO_BAR_COUNT, val);
  839. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_18,
  840. DUPLICATE_COUNT)];
  841. st->dup_cnt =
  842. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_18,
  843. DUPLICATE_COUNT, val);
  844. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  845. FRAMES_IN_ORDER_COUNT)];
  846. st->frms_in_order_cnt =
  847. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  848. FRAMES_IN_ORDER_COUNT, val);
  849. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_19,
  850. BAR_RECEIVED_COUNT)];
  851. st->bar_rcvd_cnt =
  852. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_19,
  853. BAR_RECEIVED_COUNT, val);
  854. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_20,
  855. MPDU_FRAMES_PROCESSED_COUNT)];
  856. st->mpdu_frms_cnt =
  857. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_20,
  858. MPDU_FRAMES_PROCESSED_COUNT, val);
  859. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_21,
  860. MSDU_FRAMES_PROCESSED_COUNT)];
  861. st->msdu_frms_cnt =
  862. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_21,
  863. MSDU_FRAMES_PROCESSED_COUNT, val);
  864. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_22,
  865. TOTAL_PROCESSED_BYTE_COUNT)];
  866. st->total_cnt =
  867. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_22,
  868. TOTAL_PROCESSED_BYTE_COUNT, val);
  869. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  870. LATE_RECEIVE_MPDU_COUNT)];
  871. st->late_recv_mpdu_cnt =
  872. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  873. LATE_RECEIVE_MPDU_COUNT, val);
  874. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  875. WINDOW_JUMP_2K)];
  876. st->win_jump_2k =
  877. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  878. WINDOW_JUMP_2K, val);
  879. val = reo_desc[HAL_OFFSET_DW(REO_GET_QUEUE_STATS_STATUS_23,
  880. HOLE_COUNT)];
  881. st->hole_cnt =
  882. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS_23,
  883. HOLE_COUNT, val);
  884. }
  885. qdf_export_symbol(hal_reo_queue_stats_status);
  886. inline void hal_reo_flush_queue_status(uint32_t *reo_desc,
  887. struct hal_reo_flush_queue_status *st,
  888. struct hal_soc *hal_soc)
  889. {
  890. uint32_t val;
  891. /* Offsets of descriptor fields defined in HW headers start
  892. * from the field after TLV header */
  893. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  894. /* header */
  895. hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
  896. &(st->header), hal_soc);
  897. /* error bit */
  898. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS_2,
  899. ERROR_DETECTED)];
  900. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  901. val);
  902. }
  903. qdf_export_symbol(hal_reo_flush_queue_status);
  904. inline void hal_reo_flush_cache_status(uint32_t *reo_desc, struct hal_soc *soc,
  905. struct hal_reo_flush_cache_status *st,
  906. struct hal_soc *hal_soc)
  907. {
  908. uint32_t val;
  909. /* Offsets of descriptor fields defined in HW headers start
  910. * from the field after TLV header */
  911. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  912. /* header */
  913. hal_reo_status_get_header(reo_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
  914. &(st->header), hal_soc);
  915. /* error bit */
  916. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  917. ERROR_DETECTED)];
  918. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS_2, ERROR_DETECTED,
  919. val);
  920. /* block error */
  921. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  922. BLOCK_ERROR_DETAILS)];
  923. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  924. BLOCK_ERROR_DETAILS,
  925. val);
  926. if (!st->block_error)
  927. qdf_set_bit(soc->index, (unsigned long *)&soc->reo_res_bitmap);
  928. /* cache flush status */
  929. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  930. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  931. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  932. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  933. val);
  934. /* cache flush descriptor type */
  935. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  936. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  937. st->cache_flush_status_desc_type =
  938. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  939. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  940. val);
  941. /* cache flush count */
  942. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_CACHE_STATUS_2,
  943. CACHE_CONTROLLER_FLUSH_COUNT)];
  944. st->cache_flush_cnt =
  945. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS_2,
  946. CACHE_CONTROLLER_FLUSH_COUNT,
  947. val);
  948. }
  949. qdf_export_symbol(hal_reo_flush_cache_status);
  950. inline void hal_reo_unblock_cache_status(uint32_t *reo_desc,
  951. struct hal_soc *soc,
  952. struct hal_reo_unblk_cache_status *st)
  953. {
  954. uint32_t val;
  955. /* Offsets of descriptor fields defined in HW headers start
  956. * from the field after TLV header */
  957. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  958. /* header */
  959. hal_reo_status_get_header(reo_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
  960. &(st->header), soc);
  961. /* error bit */
  962. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  963. ERROR_DETECTED)];
  964. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  965. ERROR_DETECTED,
  966. val);
  967. /* unblock type */
  968. val = reo_desc[HAL_OFFSET_DW(REO_UNBLOCK_CACHE_STATUS_2,
  969. UNBLOCK_TYPE)];
  970. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS_2,
  971. UNBLOCK_TYPE,
  972. val);
  973. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  974. qdf_clear_bit(soc->index,
  975. (unsigned long *)&soc->reo_res_bitmap);
  976. }
  977. qdf_export_symbol(hal_reo_unblock_cache_status);
  978. inline void hal_reo_flush_timeout_list_status(
  979. uint32_t *reo_desc,
  980. struct hal_reo_flush_timeout_list_status *st,
  981. struct hal_soc *hal_soc)
  982. {
  983. uint32_t val;
  984. /* Offsets of descriptor fields defined in HW headers start
  985. * from the field after TLV header */
  986. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  987. /* header */
  988. hal_reo_status_get_header(reo_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
  989. &(st->header), hal_soc);
  990. /* error bit */
  991. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  992. ERROR_DETECTED)];
  993. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  994. ERROR_DETECTED,
  995. val);
  996. /* list empty */
  997. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  998. TIMOUT_LIST_EMPTY)];
  999. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_2,
  1000. TIMOUT_LIST_EMPTY,
  1001. val);
  1002. /* release descriptor count */
  1003. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1004. RELEASE_DESC_COUNT)];
  1005. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1006. RELEASE_DESC_COUNT,
  1007. val);
  1008. /* forward buf count */
  1009. val = reo_desc[HAL_OFFSET_DW(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1010. FORWARD_BUF_COUNT)];
  1011. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS_3,
  1012. FORWARD_BUF_COUNT,
  1013. val);
  1014. }
  1015. qdf_export_symbol(hal_reo_flush_timeout_list_status);
  1016. inline void hal_reo_desc_thres_reached_status(
  1017. uint32_t *reo_desc,
  1018. struct hal_reo_desc_thres_reached_status *st,
  1019. struct hal_soc *hal_soc)
  1020. {
  1021. uint32_t val;
  1022. /* Offsets of descriptor fields defined in HW headers start
  1023. * from the field after TLV header */
  1024. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  1025. /* header */
  1026. hal_reo_status_get_header(reo_desc,
  1027. HAL_REO_DESC_THRES_STATUS_TLV,
  1028. &(st->header), hal_soc);
  1029. /* threshold index */
  1030. val = reo_desc[HAL_OFFSET_DW(
  1031. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  1032. THRESHOLD_INDEX)];
  1033. st->thres_index = HAL_GET_FIELD(
  1034. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_2,
  1035. THRESHOLD_INDEX,
  1036. val);
  1037. /* link desc counters */
  1038. val = reo_desc[HAL_OFFSET_DW(
  1039. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  1040. LINK_DESCRIPTOR_COUNTER0)];
  1041. st->link_desc_counter0 = HAL_GET_FIELD(
  1042. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_3,
  1043. LINK_DESCRIPTOR_COUNTER0,
  1044. val);
  1045. val = reo_desc[HAL_OFFSET_DW(
  1046. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  1047. LINK_DESCRIPTOR_COUNTER1)];
  1048. st->link_desc_counter1 = HAL_GET_FIELD(
  1049. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_4,
  1050. LINK_DESCRIPTOR_COUNTER1,
  1051. val);
  1052. val = reo_desc[HAL_OFFSET_DW(
  1053. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  1054. LINK_DESCRIPTOR_COUNTER2)];
  1055. st->link_desc_counter2 = HAL_GET_FIELD(
  1056. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_5,
  1057. LINK_DESCRIPTOR_COUNTER2,
  1058. val);
  1059. val = reo_desc[HAL_OFFSET_DW(
  1060. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  1061. LINK_DESCRIPTOR_COUNTER_SUM)];
  1062. st->link_desc_counter_sum = HAL_GET_FIELD(
  1063. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS_6,
  1064. LINK_DESCRIPTOR_COUNTER_SUM,
  1065. val);
  1066. }
  1067. qdf_export_symbol(hal_reo_desc_thres_reached_status);
  1068. inline void hal_reo_rx_update_queue_status(uint32_t *reo_desc,
  1069. struct hal_reo_update_rx_queue_status *st,
  1070. struct hal_soc *hal_soc)
  1071. {
  1072. /* Offsets of descriptor fields defined in HW headers start
  1073. * from the field after TLV header */
  1074. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  1075. /* header */
  1076. hal_reo_status_get_header(reo_desc,
  1077. HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
  1078. &(st->header), hal_soc);
  1079. }
  1080. qdf_export_symbol(hal_reo_rx_update_queue_status);
  1081. /**
  1082. * hal_reo_init_cmd_ring() - Initialize descriptors of REO command SRNG
  1083. * with command number
  1084. * @hal_soc: Handle to HAL SoC structure
  1085. * @hal_ring: Handle to HAL SRNG structure
  1086. *
  1087. * Return: none
  1088. */
  1089. inline void hal_reo_init_cmd_ring(struct hal_soc *soc, void *hal_srng)
  1090. {
  1091. int cmd_num;
  1092. uint32_t *desc_addr;
  1093. struct hal_srng_params srng_params;
  1094. uint32_t desc_size;
  1095. uint32_t num_desc;
  1096. hal_get_srng_params(soc, hal_srng, &srng_params);
  1097. desc_addr = (uint32_t *)(srng_params.ring_base_vaddr);
  1098. desc_addr += (sizeof(struct tlv_32_hdr) >> 2);
  1099. desc_size = hal_srng_get_entrysize(soc, REO_CMD) >> 2;
  1100. num_desc = srng_params.num_entries;
  1101. cmd_num = 1;
  1102. while (num_desc) {
  1103. /* Offsets of descriptor fields defined in HW headers start
  1104. * from the field after TLV header */
  1105. HAL_DESC_SET_FIELD(desc_addr, UNIFORM_REO_CMD_HEADER_0,
  1106. REO_CMD_NUMBER, cmd_num);
  1107. desc_addr += desc_size;
  1108. num_desc--; cmd_num++;
  1109. }
  1110. soc->reo_res_bitmap = 0;
  1111. }
  1112. qdf_export_symbol(hal_reo_init_cmd_ring);