hal_be_reo.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364
  1. /*
  2. * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "qdf_module.h"
  19. #include "hal_hw_headers.h"
  20. #include "hal_be_hw_headers.h"
  21. #include "hal_reo.h"
  22. #include "hal_be_reo.h"
  23. #include "hal_be_api.h"
  24. uint32_t hal_get_reo_reg_base_offset_be(void)
  25. {
  26. return REO_REG_REG_BASE;
  27. }
  28. /**
  29. * hal_reo_qdesc_setup - Setup HW REO queue descriptor
  30. *
  31. * @hal_soc: Opaque HAL SOC handle
  32. * @ba_window_size: BlockAck window size
  33. * @start_seq: Starting sequence number
  34. * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
  35. * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
  36. * @tid: TID
  37. *
  38. */
  39. void hal_reo_qdesc_setup_be(hal_soc_handle_t hal_soc_hdl, int tid,
  40. uint32_t ba_window_size,
  41. uint32_t start_seq, void *hw_qdesc_vaddr,
  42. qdf_dma_addr_t hw_qdesc_paddr,
  43. int pn_type)
  44. {
  45. uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
  46. uint32_t *reo_queue_ext_desc;
  47. uint32_t reg_val;
  48. uint32_t pn_enable;
  49. uint32_t pn_size = 0;
  50. qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
  51. hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
  52. HAL_REO_QUEUE_DESC);
  53. /* Fixed pattern in reserved bits for debugging */
  54. HAL_DESC_SET_FIELD(reo_queue_desc, UNIFORM_DESCRIPTOR_HEADER,
  55. RESERVED_0A, 0xDDBEEF);
  56. /* This a just a SW meta data and will be copied to REO destination
  57. * descriptors indicated by hardware.
  58. * TODO: Setting TID in this field. See if we should set something else.
  59. */
  60. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  61. RECEIVE_QUEUE_NUMBER, tid);
  62. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  63. VLD, 1);
  64. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  65. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  66. HAL_RX_LINK_DESC_CNTR);
  67. /*
  68. * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
  69. */
  70. reg_val = TID_TO_WME_AC(tid);
  71. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, AC, reg_val);
  72. if (ba_window_size < 1)
  73. ba_window_size = 1;
  74. /* WAR to get 2k exception in Non BA case.
  75. * Setting window size to 2 to get 2k jump exception
  76. * when we receive aggregates in Non BA case
  77. */
  78. ba_window_size = hal_update_non_ba_win_size(tid, ba_window_size);
  79. /* Set RTY bit for non-BA case. Duplicate detection is currently not
  80. * done by HW in non-BA case if RTY bit is not set.
  81. * TODO: This is a temporary War and should be removed once HW fix is
  82. * made to check and discard duplicates even if RTY bit is not set.
  83. */
  84. if (ba_window_size == 1)
  85. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, RTY, 1);
  86. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, BA_WINDOW_SIZE,
  87. ba_window_size - 1);
  88. switch (pn_type) {
  89. case HAL_PN_WPA:
  90. pn_enable = 1;
  91. pn_size = PN_SIZE_48;
  92. break;
  93. case HAL_PN_WAPI_EVEN:
  94. case HAL_PN_WAPI_UNEVEN:
  95. pn_enable = 1;
  96. pn_size = PN_SIZE_128;
  97. break;
  98. default:
  99. pn_enable = 0;
  100. break;
  101. }
  102. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_CHECK_NEEDED,
  103. pn_enable);
  104. if (pn_type == HAL_PN_WAPI_EVEN)
  105. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  106. PN_SHALL_BE_EVEN, 1);
  107. else if (pn_type == HAL_PN_WAPI_UNEVEN)
  108. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  109. PN_SHALL_BE_UNEVEN, 1);
  110. /*
  111. * TODO: Need to check if PN handling in SW needs to be enabled
  112. * So far this is not a requirement
  113. */
  114. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, PN_SIZE,
  115. pn_size);
  116. /* TODO: Check if RX_REO_QUEUE_IGNORE_AMPDU_FLAG need to be set
  117. * based on BA window size and/or AMPDU capabilities
  118. */
  119. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE,
  120. IGNORE_AMPDU_FLAG, 1);
  121. if (start_seq <= 0xfff)
  122. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SSN,
  123. start_seq);
  124. /* TODO: SVLD should be set to 1 if a valid SSN is received in ADDBA,
  125. * but REO is not delivering packets if we set it to 1. Need to enable
  126. * this once the issue is resolved
  127. */
  128. HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE, SVLD, 0);
  129. /* TODO: Check if we should set start PN for WAPI */
  130. /* TODO: HW queue descriptors are currently allocated for max BA
  131. * window size for all QOS TIDs so that same descriptor can be used
  132. * later when ADDBA request is recevied. This should be changed to
  133. * allocate HW queue descriptors based on BA window size being
  134. * negotiated (0 for non BA cases), and reallocate when BA window
  135. * size changes and also send WMI message to FW to change the REO
  136. * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
  137. */
  138. if (tid == HAL_NON_QOS_TID)
  139. return;
  140. reo_queue_ext_desc = (uint32_t *)
  141. (((struct rx_reo_queue *)reo_queue_desc) + 1);
  142. qdf_mem_zero(reo_queue_ext_desc, 3 *
  143. sizeof(struct rx_reo_queue_ext));
  144. /* Initialize first reo queue extension descriptor */
  145. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  146. HAL_DESC_REO_OWNED,
  147. HAL_REO_QUEUE_EXT_DESC);
  148. /* Fixed pattern in reserved bits for debugging */
  149. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  150. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  151. 0xADBEEF);
  152. /* Initialize second reo queue extension descriptor */
  153. reo_queue_ext_desc = (uint32_t *)
  154. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  155. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  156. HAL_DESC_REO_OWNED,
  157. HAL_REO_QUEUE_EXT_DESC);
  158. /* Fixed pattern in reserved bits for debugging */
  159. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  160. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  161. 0xBDBEEF);
  162. /* Initialize third reo queue extension descriptor */
  163. reo_queue_ext_desc = (uint32_t *)
  164. (((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
  165. hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
  166. HAL_DESC_REO_OWNED,
  167. HAL_REO_QUEUE_EXT_DESC);
  168. /* Fixed pattern in reserved bits for debugging */
  169. HAL_DESC_SET_FIELD(reo_queue_ext_desc,
  170. UNIFORM_DESCRIPTOR_HEADER, RESERVED_0A,
  171. 0xCDBEEF);
  172. }
  173. qdf_export_symbol(hal_reo_qdesc_setup_be);
  174. /**
  175. * hal_get_ba_aging_timeout_be - Get BA Aging timeout
  176. *
  177. * @hal_soc: Opaque HAL SOC handle
  178. * @ac: Access category
  179. * @value: window size to get
  180. */
  181. void hal_get_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
  182. uint32_t *value)
  183. {
  184. struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
  185. switch (ac) {
  186. case WME_AC_BE:
  187. *value = HAL_REG_READ(soc,
  188. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  189. REO_REG_REG_BASE)) / 1000;
  190. break;
  191. case WME_AC_BK:
  192. *value = HAL_REG_READ(soc,
  193. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  194. REO_REG_REG_BASE)) / 1000;
  195. break;
  196. case WME_AC_VI:
  197. *value = HAL_REG_READ(soc,
  198. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  199. REO_REG_REG_BASE)) / 1000;
  200. break;
  201. case WME_AC_VO:
  202. *value = HAL_REG_READ(soc,
  203. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  204. REO_REG_REG_BASE)) / 1000;
  205. break;
  206. default:
  207. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  208. "Invalid AC: %d\n", ac);
  209. }
  210. }
  211. qdf_export_symbol(hal_get_ba_aging_timeout_be);
  212. /**
  213. * hal_set_ba_aging_timeout_be - Set BA Aging timeout
  214. *
  215. * @hal_soc: Opaque HAL SOC handle
  216. * @ac: Access category
  217. * ac: 0 - Background, 1 - Best Effort, 2 - Video, 3 - Voice
  218. * @value: Input value to set
  219. */
  220. void hal_set_ba_aging_timeout_be(hal_soc_handle_t hal_soc_hdl, uint8_t ac,
  221. uint32_t value)
  222. {
  223. struct hal_soc *soc = (struct hal_soc *)hal_soc_hdl;
  224. switch (ac) {
  225. case WME_AC_BE:
  226. HAL_REG_WRITE(soc,
  227. HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
  228. REO_REG_REG_BASE),
  229. value * 1000);
  230. break;
  231. case WME_AC_BK:
  232. HAL_REG_WRITE(soc,
  233. HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
  234. REO_REG_REG_BASE),
  235. value * 1000);
  236. break;
  237. case WME_AC_VI:
  238. HAL_REG_WRITE(soc,
  239. HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
  240. REO_REG_REG_BASE),
  241. value * 1000);
  242. break;
  243. case WME_AC_VO:
  244. HAL_REG_WRITE(soc,
  245. HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
  246. REO_REG_REG_BASE),
  247. value * 1000);
  248. break;
  249. default:
  250. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  251. "Invalid AC: %d\n", ac);
  252. }
  253. }
  254. qdf_export_symbol(hal_set_ba_aging_timeout_be);
  255. static inline void
  256. hal_reo_cmd_set_descr_addr_be(uint32_t *reo_desc,
  257. enum hal_reo_cmd_type type,
  258. uint32_t paddr_lo,
  259. uint8_t paddr_hi)
  260. {
  261. switch (type) {
  262. case CMD_GET_QUEUE_STATS:
  263. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
  264. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  265. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS,
  266. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  267. break;
  268. case CMD_FLUSH_QUEUE:
  269. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  270. FLUSH_DESC_ADDR_31_0, paddr_lo);
  271. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  272. FLUSH_DESC_ADDR_39_32, paddr_hi);
  273. break;
  274. case CMD_FLUSH_CACHE:
  275. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  276. FLUSH_ADDR_31_0, paddr_lo);
  277. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  278. FLUSH_ADDR_39_32, paddr_hi);
  279. break;
  280. case CMD_UPDATE_RX_REO_QUEUE:
  281. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  282. RX_REO_QUEUE_DESC_ADDR_31_0, paddr_lo);
  283. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  284. RX_REO_QUEUE_DESC_ADDR_39_32, paddr_hi);
  285. break;
  286. default:
  287. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  288. "%s: Invalid REO command type", __func__);
  289. break;
  290. }
  291. }
  292. static inline int
  293. hal_reo_cmd_queue_stats_be(hal_ring_handle_t hal_ring_hdl,
  294. hal_soc_handle_t hal_soc_hdl,
  295. struct hal_reo_cmd_params *cmd)
  296. {
  297. uint32_t *reo_desc, val;
  298. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  299. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  300. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  301. if (!reo_desc) {
  302. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  303. "%s: Out of cmd ring entries", __func__);
  304. hal_srng_access_end(hal_soc, hal_ring_hdl);
  305. return -EBUSY;
  306. }
  307. HAL_SET_TLV_HDR(reo_desc, WIFIREO_GET_QUEUE_STATS_E,
  308. sizeof(struct reo_get_queue_stats));
  309. /*
  310. * Offsets of descriptor fields defined in HW headers start from
  311. * the field after TLV header
  312. */
  313. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  314. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  315. sizeof(struct reo_get_queue_stats) -
  316. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  317. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  318. REO_STATUS_REQUIRED, cmd->std.need_status);
  319. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_GET_QUEUE_STATS,
  320. cmd->std.addr_lo,
  321. cmd->std.addr_hi);
  322. HAL_DESC_64_SET_FIELD(reo_desc, REO_GET_QUEUE_STATS, CLEAR_STATS,
  323. cmd->u.stats_params.clear);
  324. if (hif_pm_runtime_get(hal_soc->hif_handle,
  325. RTPM_ID_HAL_REO_CMD, true) == 0) {
  326. hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
  327. hif_pm_runtime_put(hal_soc->hif_handle,
  328. RTPM_ID_HAL_REO_CMD);
  329. } else {
  330. hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
  331. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  332. hal_srng_inc_flush_cnt(hal_ring_hdl);
  333. }
  334. val = reo_desc[CMD_HEADER_DW_OFFSET];
  335. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  336. val);
  337. }
  338. static inline int
  339. hal_reo_cmd_flush_queue_be(hal_ring_handle_t hal_ring_hdl,
  340. hal_soc_handle_t hal_soc_hdl,
  341. struct hal_reo_cmd_params *cmd)
  342. {
  343. uint32_t *reo_desc, val;
  344. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  345. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  346. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  347. if (!reo_desc) {
  348. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  349. "%s: Out of cmd ring entries", __func__);
  350. hal_srng_access_end(hal_soc, hal_ring_hdl);
  351. return -EBUSY;
  352. }
  353. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_QUEUE_E,
  354. sizeof(struct reo_flush_queue));
  355. /*
  356. * Offsets of descriptor fields defined in HW headers start from
  357. * the field after TLV header
  358. */
  359. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  360. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  361. sizeof(struct reo_flush_queue) -
  362. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  363. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  364. REO_STATUS_REQUIRED, cmd->std.need_status);
  365. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_QUEUE,
  366. cmd->std.addr_lo, cmd->std.addr_hi);
  367. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  368. BLOCK_DESC_ADDR_USAGE_AFTER_FLUSH,
  369. cmd->u.fl_queue_params.block_use_after_flush);
  370. if (cmd->u.fl_queue_params.block_use_after_flush) {
  371. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_QUEUE,
  372. BLOCK_RESOURCE_INDEX,
  373. cmd->u.fl_queue_params.index);
  374. }
  375. hal_srng_access_end(hal_soc, hal_ring_hdl);
  376. val = reo_desc[CMD_HEADER_DW_OFFSET];
  377. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  378. val);
  379. }
  380. static inline int
  381. hal_reo_cmd_flush_cache_be(hal_ring_handle_t hal_ring_hdl,
  382. hal_soc_handle_t hal_soc_hdl,
  383. struct hal_reo_cmd_params *cmd)
  384. {
  385. uint32_t *reo_desc, val;
  386. struct hal_reo_cmd_flush_cache_params *cp;
  387. uint8_t index = 0;
  388. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  389. cp = &cmd->u.fl_cache_params;
  390. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  391. /* We need a cache block resource for this operation, and REO HW has
  392. * only 4 such blocking resources. These resources are managed using
  393. * reo_res_bitmap, and we return failure if none is available.
  394. */
  395. if (cp->block_use_after_flush) {
  396. index = hal_find_zero_bit(hal_soc->reo_res_bitmap);
  397. if (index > 3) {
  398. qdf_print("No blocking resource available!");
  399. hal_srng_access_end(hal_soc, hal_ring_hdl);
  400. return -EBUSY;
  401. }
  402. hal_soc->index = index;
  403. }
  404. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  405. if (!reo_desc) {
  406. hal_srng_access_end(hal_soc, hal_ring_hdl);
  407. hal_srng_dump(hal_ring_handle_to_hal_srng(hal_ring_hdl));
  408. return -EBUSY;
  409. }
  410. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_CACHE_E,
  411. sizeof(struct reo_flush_cache));
  412. /*
  413. * Offsets of descriptor fields defined in HW headers start from
  414. * the field after TLV header
  415. */
  416. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  417. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  418. sizeof(struct reo_flush_cache) -
  419. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  420. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  421. REO_STATUS_REQUIRED, cmd->std.need_status);
  422. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_FLUSH_CACHE,
  423. cmd->std.addr_lo, cmd->std.addr_hi);
  424. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  425. FORWARD_ALL_MPDUS_IN_QUEUE,
  426. cp->fwd_mpdus_in_queue);
  427. /* set it to 0 for now */
  428. cp->rel_block_index = 0;
  429. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  430. RELEASE_CACHE_BLOCK_INDEX, cp->rel_block_index);
  431. if (cp->block_use_after_flush) {
  432. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  433. CACHE_BLOCK_RESOURCE_INDEX, index);
  434. }
  435. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  436. FLUSH_WITHOUT_INVALIDATE, cp->flush_no_inval);
  437. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE,
  438. BLOCK_CACHE_USAGE_AFTER_FLUSH,
  439. cp->block_use_after_flush);
  440. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_CACHE, FLUSH_ENTIRE_CACHE,
  441. cp->flush_entire_cache);
  442. if (hif_pm_runtime_get(hal_soc->hif_handle,
  443. RTPM_ID_HAL_REO_CMD, true) == 0) {
  444. hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
  445. hif_pm_runtime_put(hal_soc->hif_handle,
  446. RTPM_ID_HAL_REO_CMD);
  447. } else {
  448. hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
  449. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  450. hal_srng_inc_flush_cnt(hal_ring_hdl);
  451. }
  452. val = reo_desc[CMD_HEADER_DW_OFFSET];
  453. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  454. val);
  455. }
  456. static inline int
  457. hal_reo_cmd_unblock_cache_be(hal_ring_handle_t hal_ring_hdl,
  458. hal_soc_handle_t hal_soc_hdl,
  459. struct hal_reo_cmd_params *cmd)
  460. {
  461. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  462. uint32_t *reo_desc, val;
  463. uint8_t index = 0;
  464. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  465. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  466. index = hal_find_one_bit(hal_soc->reo_res_bitmap);
  467. if (index > 3) {
  468. hal_srng_access_end(hal_soc, hal_ring_hdl);
  469. qdf_print("No blocking resource to unblock!");
  470. return -EBUSY;
  471. }
  472. }
  473. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  474. if (!reo_desc) {
  475. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  476. "%s: Out of cmd ring entries", __func__);
  477. hal_srng_access_end(hal_soc, hal_ring_hdl);
  478. return -EBUSY;
  479. }
  480. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UNBLOCK_CACHE_E,
  481. sizeof(struct reo_unblock_cache));
  482. /*
  483. * Offsets of descriptor fields defined in HW headers start from
  484. * the field after TLV header
  485. */
  486. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  487. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  488. sizeof(struct reo_unblock_cache) -
  489. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  490. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  491. REO_STATUS_REQUIRED, cmd->std.need_status);
  492. HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
  493. UNBLOCK_TYPE, cmd->u.unblk_cache_params.type);
  494. if (cmd->u.unblk_cache_params.type == UNBLOCK_RES_INDEX) {
  495. HAL_DESC_64_SET_FIELD(reo_desc, REO_UNBLOCK_CACHE,
  496. CACHE_BLOCK_RESOURCE_INDEX,
  497. cmd->u.unblk_cache_params.index);
  498. }
  499. hal_srng_access_end(hal_soc, hal_ring_hdl);
  500. val = reo_desc[CMD_HEADER_DW_OFFSET];
  501. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  502. val);
  503. }
  504. static inline int
  505. hal_reo_cmd_flush_timeout_list_be(hal_ring_handle_t hal_ring_hdl,
  506. hal_soc_handle_t hal_soc_hdl,
  507. struct hal_reo_cmd_params *cmd)
  508. {
  509. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  510. uint32_t *reo_desc, val;
  511. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  512. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  513. if (!reo_desc) {
  514. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  515. "%s: Out of cmd ring entries", __func__);
  516. hal_srng_access_end(hal_soc, hal_ring_hdl);
  517. return -EBUSY;
  518. }
  519. HAL_SET_TLV_HDR(reo_desc, WIFIREO_FLUSH_TIMEOUT_LIST_E,
  520. sizeof(struct reo_flush_timeout_list));
  521. /*
  522. * Offsets of descriptor fields defined in HW headers start from
  523. * the field after TLV header
  524. */
  525. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  526. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  527. sizeof(struct reo_flush_timeout_list) -
  528. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  529. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  530. REO_STATUS_REQUIRED, cmd->std.need_status);
  531. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST, AC_TIMOUT_LIST,
  532. cmd->u.fl_tim_list_params.ac_list);
  533. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
  534. MINIMUM_RELEASE_DESC_COUNT,
  535. cmd->u.fl_tim_list_params.min_rel_desc);
  536. HAL_DESC_64_SET_FIELD(reo_desc, REO_FLUSH_TIMEOUT_LIST,
  537. MINIMUM_FORWARD_BUF_COUNT,
  538. cmd->u.fl_tim_list_params.min_fwd_buf);
  539. hal_srng_access_end(hal_soc, hal_ring_hdl);
  540. val = reo_desc[CMD_HEADER_DW_OFFSET];
  541. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  542. val);
  543. }
  544. static inline int
  545. hal_reo_cmd_update_rx_queue_be(hal_ring_handle_t hal_ring_hdl,
  546. hal_soc_handle_t hal_soc_hdl,
  547. struct hal_reo_cmd_params *cmd)
  548. {
  549. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  550. uint32_t *reo_desc, val;
  551. struct hal_reo_cmd_update_queue_params *p;
  552. p = &cmd->u.upd_queue_params;
  553. hal_srng_access_start(hal_soc_hdl, hal_ring_hdl);
  554. reo_desc = hal_srng_src_get_next(hal_soc, hal_ring_hdl);
  555. if (!reo_desc) {
  556. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  557. "%s: Out of cmd ring entries", __func__);
  558. hal_srng_access_end(hal_soc, hal_ring_hdl);
  559. return -EBUSY;
  560. }
  561. HAL_SET_TLV_HDR(reo_desc, WIFIREO_UPDATE_RX_REO_QUEUE_E,
  562. sizeof(struct reo_update_rx_reo_queue));
  563. /*
  564. * Offsets of descriptor fields defined in HW headers start from
  565. * the field after TLV header
  566. */
  567. reo_desc += (sizeof(struct tlv_32_hdr) >> 2);
  568. qdf_mem_zero((reo_desc + NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER),
  569. sizeof(struct reo_update_rx_reo_queue) -
  570. (NUM_OF_DWORDS_UNIFORM_REO_CMD_HEADER << 2));
  571. HAL_DESC_64_SET_FIELD(reo_desc, UNIFORM_REO_CMD_HEADER,
  572. REO_STATUS_REQUIRED, cmd->std.need_status);
  573. hal_reo_cmd_set_descr_addr_be(reo_desc, CMD_UPDATE_RX_REO_QUEUE,
  574. cmd->std.addr_lo, cmd->std.addr_hi);
  575. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  576. UPDATE_RECEIVE_QUEUE_NUMBER,
  577. p->update_rx_queue_num);
  578. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, UPDATE_VLD,
  579. p->update_vld);
  580. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  581. UPDATE_ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  582. p->update_assoc_link_desc);
  583. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  584. UPDATE_DISABLE_DUPLICATE_DETECTION,
  585. p->update_disable_dup_detect);
  586. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  587. UPDATE_DISABLE_DUPLICATE_DETECTION,
  588. p->update_disable_dup_detect);
  589. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  590. UPDATE_SOFT_REORDER_ENABLE,
  591. p->update_soft_reorder_enab);
  592. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  593. UPDATE_AC, p->update_ac);
  594. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  595. UPDATE_BAR, p->update_bar);
  596. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  597. UPDATE_BAR, p->update_bar);
  598. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  599. UPDATE_RTY, p->update_rty);
  600. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  601. UPDATE_CHK_2K_MODE, p->update_chk_2k_mode);
  602. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  603. UPDATE_OOR_MODE, p->update_oor_mode);
  604. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  605. UPDATE_BA_WINDOW_SIZE, p->update_ba_window_size);
  606. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  607. UPDATE_PN_CHECK_NEEDED,
  608. p->update_pn_check_needed);
  609. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  610. UPDATE_PN_SHALL_BE_EVEN, p->update_pn_even);
  611. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  612. UPDATE_PN_SHALL_BE_UNEVEN, p->update_pn_uneven);
  613. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  614. UPDATE_PN_HANDLING_ENABLE,
  615. p->update_pn_hand_enab);
  616. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  617. UPDATE_PN_SIZE, p->update_pn_size);
  618. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  619. UPDATE_IGNORE_AMPDU_FLAG, p->update_ignore_ampdu);
  620. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  621. UPDATE_SVLD, p->update_svld);
  622. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  623. UPDATE_SSN, p->update_ssn);
  624. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  625. UPDATE_SEQ_2K_ERROR_DETECTED_FLAG,
  626. p->update_seq_2k_err_detect);
  627. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  628. UPDATE_PN_VALID, p->update_pn_valid);
  629. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  630. UPDATE_PN, p->update_pn);
  631. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  632. RECEIVE_QUEUE_NUMBER, p->rx_queue_num);
  633. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  634. VLD, p->vld);
  635. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  636. ASSOCIATED_LINK_DESCRIPTOR_COUNTER,
  637. p->assoc_link_desc);
  638. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  639. DISABLE_DUPLICATE_DETECTION,
  640. p->disable_dup_detect);
  641. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  642. SOFT_REORDER_ENABLE, p->soft_reorder_enab);
  643. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE, AC, p->ac);
  644. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  645. BAR, p->bar);
  646. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  647. CHK_2K_MODE, p->chk_2k_mode);
  648. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  649. RTY, p->rty);
  650. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  651. OOR_MODE, p->oor_mode);
  652. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  653. PN_CHECK_NEEDED, p->pn_check_needed);
  654. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  655. PN_SHALL_BE_EVEN, p->pn_even);
  656. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  657. PN_SHALL_BE_UNEVEN, p->pn_uneven);
  658. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  659. PN_HANDLING_ENABLE, p->pn_hand_enab);
  660. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  661. IGNORE_AMPDU_FLAG, p->ignore_ampdu);
  662. if (p->ba_window_size < 1)
  663. p->ba_window_size = 1;
  664. /*
  665. * WAR to get 2k exception in Non BA case.
  666. * Setting window size to 2 to get 2k jump exception
  667. * when we receive aggregates in Non BA case
  668. */
  669. if (p->ba_window_size == 1)
  670. p->ba_window_size++;
  671. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  672. BA_WINDOW_SIZE, p->ba_window_size - 1);
  673. if (p->pn_size == 24)
  674. p->pn_size = PN_SIZE_24;
  675. else if (p->pn_size == 48)
  676. p->pn_size = PN_SIZE_48;
  677. else if (p->pn_size == 128)
  678. p->pn_size = PN_SIZE_128;
  679. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  680. PN_SIZE, p->pn_size);
  681. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  682. SVLD, p->svld);
  683. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  684. SSN, p->ssn);
  685. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  686. SEQ_2K_ERROR_DETECTED_FLAG, p->seq_2k_err_detect);
  687. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  688. PN_ERROR_DETECTED_FLAG, p->pn_err_detect);
  689. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  690. PN_31_0, p->pn_31_0);
  691. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  692. PN_63_32, p->pn_63_32);
  693. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  694. PN_95_64, p->pn_95_64);
  695. HAL_DESC_64_SET_FIELD(reo_desc, REO_UPDATE_RX_REO_QUEUE,
  696. PN_127_96, p->pn_127_96);
  697. if (hif_pm_runtime_get(hal_soc->hif_handle,
  698. RTPM_ID_HAL_REO_CMD, false) == 0) {
  699. hal_srng_access_end(hal_soc_hdl, hal_ring_hdl);
  700. hif_pm_runtime_put(hal_soc->hif_handle,
  701. RTPM_ID_HAL_REO_CMD);
  702. } else {
  703. hal_srng_access_end_reap(hal_soc_hdl, hal_ring_hdl);
  704. hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
  705. hal_srng_inc_flush_cnt(hal_ring_hdl);
  706. }
  707. val = reo_desc[CMD_HEADER_DW_OFFSET];
  708. return HAL_GET_FIELD(UNIFORM_REO_CMD_HEADER, REO_CMD_NUMBER,
  709. val);
  710. }
  711. int hal_reo_send_cmd_be(hal_soc_handle_t hal_soc_hdl,
  712. hal_ring_handle_t hal_ring_hdl,
  713. enum hal_reo_cmd_type cmd,
  714. void *params)
  715. {
  716. struct hal_reo_cmd_params *cmd_params =
  717. (struct hal_reo_cmd_params *)params;
  718. int num = 0;
  719. switch (cmd) {
  720. case CMD_GET_QUEUE_STATS:
  721. num = hal_reo_cmd_queue_stats_be(hal_ring_hdl,
  722. hal_soc_hdl, cmd_params);
  723. break;
  724. case CMD_FLUSH_QUEUE:
  725. num = hal_reo_cmd_flush_queue_be(hal_ring_hdl,
  726. hal_soc_hdl, cmd_params);
  727. break;
  728. case CMD_FLUSH_CACHE:
  729. num = hal_reo_cmd_flush_cache_be(hal_ring_hdl,
  730. hal_soc_hdl, cmd_params);
  731. break;
  732. case CMD_UNBLOCK_CACHE:
  733. num = hal_reo_cmd_unblock_cache_be(hal_ring_hdl,
  734. hal_soc_hdl, cmd_params);
  735. break;
  736. case CMD_FLUSH_TIMEOUT_LIST:
  737. num = hal_reo_cmd_flush_timeout_list_be(hal_ring_hdl,
  738. hal_soc_hdl,
  739. cmd_params);
  740. break;
  741. case CMD_UPDATE_RX_REO_QUEUE:
  742. num = hal_reo_cmd_update_rx_queue_be(hal_ring_hdl,
  743. hal_soc_hdl, cmd_params);
  744. break;
  745. default:
  746. hal_err("Invalid REO command type: %d", cmd);
  747. return -EINVAL;
  748. };
  749. return num;
  750. }
  751. void
  752. hal_reo_queue_stats_status_be(hal_ring_desc_t ring_desc,
  753. void *st_handle,
  754. hal_soc_handle_t hal_soc_hdl)
  755. {
  756. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  757. struct hal_reo_queue_status *st =
  758. (struct hal_reo_queue_status *)st_handle;
  759. uint64_t *reo_desc = (uint64_t *)ring_desc;
  760. uint64_t val;
  761. /*
  762. * Offsets of descriptor fields defined in HW headers start
  763. * from the field after TLV header
  764. */
  765. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  766. /* header */
  767. hal_reo_status_get_header(ring_desc, HAL_REO_QUEUE_STATS_STATUS_TLV,
  768. &(st->header), hal_soc);
  769. /* SSN */
  770. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS, SSN)];
  771. st->ssn = HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS, SSN, val);
  772. /* current index */
  773. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  774. CURRENT_INDEX)];
  775. st->curr_idx =
  776. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  777. CURRENT_INDEX, val);
  778. /* PN bits */
  779. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  780. PN_31_0)];
  781. st->pn_31_0 =
  782. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  783. PN_31_0, val);
  784. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  785. PN_63_32)];
  786. st->pn_63_32 =
  787. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  788. PN_63_32, val);
  789. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  790. PN_95_64)];
  791. st->pn_95_64 =
  792. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  793. PN_95_64, val);
  794. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  795. PN_127_96)];
  796. st->pn_127_96 =
  797. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  798. PN_127_96, val);
  799. /* timestamps */
  800. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  801. LAST_RX_ENQUEUE_TIMESTAMP)];
  802. st->last_rx_enq_tstamp =
  803. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  804. LAST_RX_ENQUEUE_TIMESTAMP, val);
  805. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  806. LAST_RX_DEQUEUE_TIMESTAMP)];
  807. st->last_rx_deq_tstamp =
  808. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  809. LAST_RX_DEQUEUE_TIMESTAMP, val);
  810. /* rx bitmap */
  811. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  812. RX_BITMAP_31_0)];
  813. st->rx_bitmap_31_0 =
  814. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  815. RX_BITMAP_31_0, val);
  816. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  817. RX_BITMAP_63_32)];
  818. st->rx_bitmap_63_32 =
  819. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  820. RX_BITMAP_63_32, val);
  821. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  822. RX_BITMAP_95_64)];
  823. st->rx_bitmap_95_64 =
  824. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  825. RX_BITMAP_95_64, val);
  826. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  827. RX_BITMAP_127_96)];
  828. st->rx_bitmap_127_96 =
  829. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  830. RX_BITMAP_127_96, val);
  831. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  832. RX_BITMAP_159_128)];
  833. st->rx_bitmap_159_128 =
  834. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  835. RX_BITMAP_159_128, val);
  836. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  837. RX_BITMAP_191_160)];
  838. st->rx_bitmap_191_160 =
  839. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  840. RX_BITMAP_191_160, val);
  841. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  842. RX_BITMAP_223_192)];
  843. st->rx_bitmap_223_192 =
  844. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  845. RX_BITMAP_223_192, val);
  846. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  847. RX_BITMAP_255_224)];
  848. st->rx_bitmap_255_224 =
  849. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  850. RX_BITMAP_255_224, val);
  851. /* various counts */
  852. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  853. CURRENT_MPDU_COUNT)];
  854. st->curr_mpdu_cnt =
  855. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  856. CURRENT_MPDU_COUNT, val);
  857. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  858. CURRENT_MSDU_COUNT)];
  859. st->curr_msdu_cnt =
  860. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  861. CURRENT_MSDU_COUNT, val);
  862. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  863. TIMEOUT_COUNT)];
  864. st->fwd_timeout_cnt =
  865. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  866. TIMEOUT_COUNT, val);
  867. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  868. FORWARD_DUE_TO_BAR_COUNT)];
  869. st->fwd_bar_cnt =
  870. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  871. FORWARD_DUE_TO_BAR_COUNT, val);
  872. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  873. DUPLICATE_COUNT)];
  874. st->dup_cnt =
  875. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  876. DUPLICATE_COUNT, val);
  877. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  878. FRAMES_IN_ORDER_COUNT)];
  879. st->frms_in_order_cnt =
  880. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  881. FRAMES_IN_ORDER_COUNT, val);
  882. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  883. BAR_RECEIVED_COUNT)];
  884. st->bar_rcvd_cnt =
  885. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  886. BAR_RECEIVED_COUNT, val);
  887. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  888. MPDU_FRAMES_PROCESSED_COUNT)];
  889. st->mpdu_frms_cnt =
  890. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  891. MPDU_FRAMES_PROCESSED_COUNT, val);
  892. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  893. MSDU_FRAMES_PROCESSED_COUNT)];
  894. st->msdu_frms_cnt =
  895. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  896. MSDU_FRAMES_PROCESSED_COUNT, val);
  897. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  898. TOTAL_PROCESSED_BYTE_COUNT)];
  899. st->total_cnt =
  900. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  901. TOTAL_PROCESSED_BYTE_COUNT, val);
  902. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  903. LATE_RECEIVE_MPDU_COUNT)];
  904. st->late_recv_mpdu_cnt =
  905. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  906. LATE_RECEIVE_MPDU_COUNT, val);
  907. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  908. WINDOW_JUMP_2K)];
  909. st->win_jump_2k =
  910. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  911. WINDOW_JUMP_2K, val);
  912. val = reo_desc[HAL_OFFSET_QW(REO_GET_QUEUE_STATS_STATUS,
  913. HOLE_COUNT)];
  914. st->hole_cnt =
  915. HAL_GET_FIELD(REO_GET_QUEUE_STATS_STATUS,
  916. HOLE_COUNT, val);
  917. }
  918. void
  919. hal_reo_flush_queue_status_be(hal_ring_desc_t ring_desc,
  920. void *st_handle,
  921. hal_soc_handle_t hal_soc_hdl)
  922. {
  923. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  924. struct hal_reo_flush_queue_status *st =
  925. (struct hal_reo_flush_queue_status *)st_handle;
  926. uint64_t *reo_desc = (uint64_t *)ring_desc;
  927. uint64_t val;
  928. /*
  929. * Offsets of descriptor fields defined in HW headers start
  930. * from the field after TLV header
  931. */
  932. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  933. /* header */
  934. hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_QUEUE_STATUS_TLV,
  935. &(st->header), hal_soc);
  936. /* error bit */
  937. val = reo_desc[HAL_OFFSET(REO_FLUSH_QUEUE_STATUS,
  938. ERROR_DETECTED)];
  939. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
  940. val);
  941. }
  942. void
  943. hal_reo_flush_cache_status_be(hal_ring_desc_t ring_desc,
  944. void *st_handle,
  945. hal_soc_handle_t hal_soc_hdl)
  946. {
  947. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  948. struct hal_reo_flush_cache_status *st =
  949. (struct hal_reo_flush_cache_status *)st_handle;
  950. uint64_t *reo_desc = (uint64_t *)ring_desc;
  951. uint64_t val;
  952. /*
  953. * Offsets of descriptor fields defined in HW headers start
  954. * from the field after TLV header
  955. */
  956. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  957. /* header */
  958. hal_reo_status_get_header(ring_desc, HAL_REO_FLUSH_CACHE_STATUS_TLV,
  959. &(st->header), hal_soc);
  960. /* error bit */
  961. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  962. ERROR_DETECTED)];
  963. st->error = HAL_GET_FIELD(REO_FLUSH_QUEUE_STATUS, ERROR_DETECTED,
  964. val);
  965. /* block error */
  966. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  967. BLOCK_ERROR_DETAILS)];
  968. st->block_error = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  969. BLOCK_ERROR_DETAILS,
  970. val);
  971. if (!st->block_error)
  972. qdf_set_bit(hal_soc->index,
  973. (unsigned long *)&hal_soc->reo_res_bitmap);
  974. /* cache flush status */
  975. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  976. CACHE_CONTROLLER_FLUSH_STATUS_HIT)];
  977. st->cache_flush_status = HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  978. CACHE_CONTROLLER_FLUSH_STATUS_HIT,
  979. val);
  980. /* cache flush descriptor type */
  981. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  982. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE)];
  983. st->cache_flush_status_desc_type =
  984. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  985. CACHE_CONTROLLER_FLUSH_STATUS_DESC_TYPE,
  986. val);
  987. /* cache flush count */
  988. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_CACHE_STATUS,
  989. CACHE_CONTROLLER_FLUSH_COUNT)];
  990. st->cache_flush_cnt =
  991. HAL_GET_FIELD(REO_FLUSH_CACHE_STATUS,
  992. CACHE_CONTROLLER_FLUSH_COUNT,
  993. val);
  994. }
  995. void
  996. hal_reo_unblock_cache_status_be(hal_ring_desc_t ring_desc,
  997. hal_soc_handle_t hal_soc_hdl,
  998. void *st_handle)
  999. {
  1000. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1001. struct hal_reo_unblk_cache_status *st =
  1002. (struct hal_reo_unblk_cache_status *)st_handle;
  1003. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1004. uint64_t val;
  1005. /*
  1006. * Offsets of descriptor fields defined in HW headers start
  1007. * from the field after TLV header
  1008. */
  1009. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1010. /* header */
  1011. hal_reo_status_get_header(ring_desc, HAL_REO_UNBLK_CACHE_STATUS_TLV,
  1012. &st->header, hal_soc);
  1013. /* error bit */
  1014. val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
  1015. ERROR_DETECTED)];
  1016. st->error = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
  1017. ERROR_DETECTED,
  1018. val);
  1019. /* unblock type */
  1020. val = reo_desc[HAL_OFFSET_QW(REO_UNBLOCK_CACHE_STATUS,
  1021. UNBLOCK_TYPE)];
  1022. st->unblock_type = HAL_GET_FIELD(REO_UNBLOCK_CACHE_STATUS,
  1023. UNBLOCK_TYPE,
  1024. val);
  1025. if (!st->error && (st->unblock_type == UNBLOCK_RES_INDEX))
  1026. qdf_clear_bit(hal_soc->index,
  1027. (unsigned long *)&hal_soc->reo_res_bitmap);
  1028. }
  1029. void hal_reo_flush_timeout_list_status_be(hal_ring_desc_t ring_desc,
  1030. void *st_handle,
  1031. hal_soc_handle_t hal_soc_hdl)
  1032. {
  1033. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1034. struct hal_reo_flush_timeout_list_status *st =
  1035. (struct hal_reo_flush_timeout_list_status *)st_handle;
  1036. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1037. uint64_t val;
  1038. /*
  1039. * Offsets of descriptor fields defined in HW headers start
  1040. * from the field after TLV header
  1041. */
  1042. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1043. /* header */
  1044. hal_reo_status_get_header(ring_desc, HAL_REO_TIMOUT_LIST_STATUS_TLV,
  1045. &(st->header), hal_soc);
  1046. /* error bit */
  1047. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1048. ERROR_DETECTED)];
  1049. st->error = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1050. ERROR_DETECTED,
  1051. val);
  1052. /* list empty */
  1053. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1054. TIMOUT_LIST_EMPTY)];
  1055. st->list_empty = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1056. TIMOUT_LIST_EMPTY,
  1057. val);
  1058. /* release descriptor count */
  1059. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1060. RELEASE_DESC_COUNT)];
  1061. st->rel_desc_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1062. RELEASE_DESC_COUNT,
  1063. val);
  1064. /* forward buf count */
  1065. val = reo_desc[HAL_OFFSET_QW(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1066. FORWARD_BUF_COUNT)];
  1067. st->fwd_buf_cnt = HAL_GET_FIELD(REO_FLUSH_TIMEOUT_LIST_STATUS,
  1068. FORWARD_BUF_COUNT,
  1069. val);
  1070. }
  1071. void hal_reo_desc_thres_reached_status_be(hal_ring_desc_t ring_desc,
  1072. void *st_handle,
  1073. hal_soc_handle_t hal_soc_hdl)
  1074. {
  1075. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1076. struct hal_reo_desc_thres_reached_status *st =
  1077. (struct hal_reo_desc_thres_reached_status *)st_handle;
  1078. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1079. uint64_t val;
  1080. /*
  1081. * Offsets of descriptor fields defined in HW headers start
  1082. * from the field after TLV header
  1083. */
  1084. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1085. /* header */
  1086. hal_reo_status_get_header(ring_desc,
  1087. HAL_REO_DESC_THRES_STATUS_TLV,
  1088. &(st->header), hal_soc);
  1089. /* threshold index */
  1090. val = reo_desc[HAL_OFFSET_QW(
  1091. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1092. THRESHOLD_INDEX)];
  1093. st->thres_index = HAL_GET_FIELD(
  1094. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1095. THRESHOLD_INDEX,
  1096. val);
  1097. /* link desc counters */
  1098. val = reo_desc[HAL_OFFSET_QW(
  1099. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1100. LINK_DESCRIPTOR_COUNTER0)];
  1101. st->link_desc_counter0 = HAL_GET_FIELD(
  1102. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1103. LINK_DESCRIPTOR_COUNTER0,
  1104. val);
  1105. val = reo_desc[HAL_OFFSET_QW(
  1106. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1107. LINK_DESCRIPTOR_COUNTER1)];
  1108. st->link_desc_counter1 = HAL_GET_FIELD(
  1109. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1110. LINK_DESCRIPTOR_COUNTER1,
  1111. val);
  1112. val = reo_desc[HAL_OFFSET_QW(
  1113. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1114. LINK_DESCRIPTOR_COUNTER2)];
  1115. st->link_desc_counter2 = HAL_GET_FIELD(
  1116. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1117. LINK_DESCRIPTOR_COUNTER2,
  1118. val);
  1119. val = reo_desc[HAL_OFFSET_QW(
  1120. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1121. LINK_DESCRIPTOR_COUNTER_SUM)];
  1122. st->link_desc_counter_sum = HAL_GET_FIELD(
  1123. REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS,
  1124. LINK_DESCRIPTOR_COUNTER_SUM,
  1125. val);
  1126. }
  1127. void
  1128. hal_reo_rx_update_queue_status_be(hal_ring_desc_t ring_desc,
  1129. void *st_handle,
  1130. hal_soc_handle_t hal_soc_hdl)
  1131. {
  1132. struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;
  1133. struct hal_reo_update_rx_queue_status *st =
  1134. (struct hal_reo_update_rx_queue_status *)st_handle;
  1135. uint64_t *reo_desc = (uint64_t *)ring_desc;
  1136. /*
  1137. * Offsets of descriptor fields defined in HW headers start
  1138. * from the field after TLV header
  1139. */
  1140. reo_desc += HAL_GET_NUM_QWORDS(sizeof(struct tlv_32_hdr));
  1141. /* header */
  1142. hal_reo_status_get_header(ring_desc,
  1143. HAL_REO_UPDATE_RX_QUEUE_STATUS_TLV,
  1144. &(st->header), hal_soc);
  1145. }
  1146. uint8_t hal_get_tlv_hdr_size_be(void)
  1147. {
  1148. return sizeof(struct tlv_32_hdr);
  1149. }