dp_rx_mon_dest.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "dp_types.h"
  19. #include "dp_rx.h"
  20. #include "dp_peer.h"
  21. #include "hal_rx.h"
  22. #include "hal_api.h"
  23. #include "qdf_trace.h"
  24. #include "qdf_nbuf.h"
  25. #include "hal_api_mon.h"
  26. #include "dp_rx_mon.h"
  27. #include "wlan_cfg.h"
  28. #include "dp_internal.h"
  29. /**
  30. * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
  31. * (WBM), following error handling
  32. *
  33. * @dp_pdev: core txrx pdev context
  34. * @buf_addr_info: void pointer to monitor link descriptor buf addr info
  35. * Return: QDF_STATUS
  36. */
  37. static QDF_STATUS
  38. dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
  39. void *buf_addr_info, int mac_id)
  40. {
  41. struct dp_srng *dp_srng;
  42. void *hal_srng;
  43. void *hal_soc;
  44. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  45. void *src_srng_desc;
  46. int mac_for_pdev = dp_get_mac_id_for_mac(dp_pdev->soc, mac_id);
  47. hal_soc = dp_pdev->soc->hal_soc;
  48. dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev];
  49. hal_srng = dp_srng->hal_srng;
  50. qdf_assert(hal_srng);
  51. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_srng))) {
  52. /* TODO */
  53. /*
  54. * Need API to convert from hal_ring pointer to
  55. * Ring Type / Ring Id combo
  56. */
  57. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  58. "%s %d : \
  59. HAL RING Access For WBM Release SRNG Failed -- %pK\n",
  60. __func__, __LINE__, hal_srng);
  61. goto done;
  62. }
  63. src_srng_desc = hal_srng_src_get_next(hal_soc, hal_srng);
  64. if (qdf_likely(src_srng_desc)) {
  65. /* Return link descriptor through WBM ring (SW2WBM)*/
  66. hal_rx_mon_msdu_link_desc_set(hal_soc,
  67. src_srng_desc, buf_addr_info);
  68. status = QDF_STATUS_SUCCESS;
  69. } else {
  70. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  71. "%s %d -- Monitor Link Desc WBM Release Ring Full\n",
  72. __func__, __LINE__);
  73. }
  74. done:
  75. hal_srng_access_end(hal_soc, hal_srng);
  76. return status;
  77. }
  78. /**
  79. * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across
  80. * multiple nbufs. This function
  81. * is to return data length in
  82. * fragmented buffer
  83. *
  84. * @total_len: pointer to remaining data length.
  85. * @frag_len: pointer to data length in this fragment.
  86. */
  87. static inline void dp_mon_adjust_frag_len(uint32_t *total_len,
  88. uint32_t *frag_len)
  89. {
  90. if (*total_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
  91. *frag_len = RX_BUFFER_SIZE - RX_PKT_TLVS_LEN;
  92. *total_len -= *frag_len;
  93. } else {
  94. *frag_len = *total_len;
  95. *total_len = 0;
  96. }
  97. }
  98. /**
  99. * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW
  100. * (WBM), following error handling
  101. *
  102. * @soc: core DP main context
  103. * @mac_id: mac id which is one of 3 mac_ids
  104. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  105. * @head_msdu: head of msdu to be popped
  106. * @tail_msdu: tail of msdu to be popped
  107. * @npackets: number of packet to be popped
  108. * @ppdu_id: ppdu id of processing ppdu
  109. * @head: head of descs list to be freed
  110. * @tail: tail of decs list to be freed
  111. * Return: number of msdu in MPDU to be popped
  112. */
  113. static inline uint32_t
  114. dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  115. void *rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu,
  116. qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id,
  117. union dp_rx_desc_list_elem_t **head,
  118. union dp_rx_desc_list_elem_t **tail)
  119. {
  120. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  121. void *rx_desc_tlv;
  122. void *rx_msdu_link_desc;
  123. qdf_nbuf_t msdu;
  124. qdf_nbuf_t last;
  125. struct hal_rx_msdu_list msdu_list;
  126. uint16_t num_msdus;
  127. uint32_t rx_buf_size, rx_pkt_offset;
  128. struct hal_buf_info buf_info;
  129. void *p_buf_addr_info;
  130. void *p_last_buf_addr_info;
  131. uint32_t rx_bufs_used = 0;
  132. uint32_t msdu_ppdu_id, msdu_cnt, last_ppdu_id;
  133. uint8_t *data;
  134. uint32_t i;
  135. uint32_t total_frag_len = 0, frag_len = 0;
  136. bool is_frag, is_first_msdu;
  137. bool drop_mpdu = false;
  138. msdu = 0;
  139. last_ppdu_id = dp_pdev->ppdu_info.com_info.last_ppdu_id;
  140. last = NULL;
  141. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  142. &p_last_buf_addr_info, &msdu_cnt);
  143. if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) ==
  144. HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) {
  145. uint8_t rxdma_err =
  146. hal_rx_reo_ent_rxdma_error_code_get(
  147. rxdma_dst_ring_desc);
  148. if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) ||
  149. (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) ||
  150. (rxdma_err == HAL_RXDMA_ERR_OVERFLOW))) {
  151. drop_mpdu = true;
  152. dp_pdev->rx_mon_stats.dest_mpdu_drop++;
  153. }
  154. }
  155. is_frag = false;
  156. is_first_msdu = true;
  157. do {
  158. rx_msdu_link_desc =
  159. dp_rx_cookie_2_mon_link_desc_va(dp_pdev, &buf_info,
  160. mac_id);
  161. qdf_assert(rx_msdu_link_desc);
  162. hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus);
  163. for (i = 0; i < num_msdus; i++) {
  164. uint32_t l2_hdr_offset;
  165. struct dp_rx_desc *rx_desc =
  166. dp_rx_cookie_2_va_mon_buf(soc,
  167. msdu_list.sw_cookie[i]);
  168. qdf_assert(rx_desc);
  169. msdu = rx_desc->nbuf;
  170. if (rx_desc->unmapped == 0) {
  171. qdf_nbuf_unmap_single(soc->osdev, msdu,
  172. QDF_DMA_FROM_DEVICE);
  173. rx_desc->unmapped = 1;
  174. }
  175. if (drop_mpdu) {
  176. qdf_nbuf_free(msdu);
  177. msdu = NULL;
  178. goto next_msdu;
  179. }
  180. data = qdf_nbuf_data(msdu);
  181. rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data);
  182. QDF_TRACE(QDF_MODULE_ID_DP,
  183. QDF_TRACE_LEVEL_DEBUG,
  184. "[%s] i=%d, ppdu_id=%x, "
  185. "last_ppdu_id=%x num_msdus = %u\n",
  186. __func__, i, *ppdu_id,
  187. last_ppdu_id, num_msdus);
  188. if (is_first_msdu) {
  189. msdu_ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(
  190. rx_desc_tlv);
  191. is_first_msdu = false;
  192. QDF_TRACE(QDF_MODULE_ID_DP,
  193. QDF_TRACE_LEVEL_DEBUG,
  194. "[%s] msdu_ppdu_id=%x\n",
  195. __func__, msdu_ppdu_id);
  196. if (*ppdu_id > msdu_ppdu_id)
  197. QDF_TRACE(QDF_MODULE_ID_DP,
  198. QDF_TRACE_LEVEL_DEBUG,
  199. "[%s][%d] ppdu_id=%d "
  200. "msdu_ppdu_id=%d\n",
  201. __func__, __LINE__, *ppdu_id,
  202. msdu_ppdu_id);
  203. if ((*ppdu_id < msdu_ppdu_id) && (*ppdu_id >
  204. last_ppdu_id)) {
  205. *ppdu_id = msdu_ppdu_id;
  206. return rx_bufs_used;
  207. }
  208. }
  209. if (hal_rx_desc_is_first_msdu(rx_desc_tlv))
  210. hal_rx_mon_hw_desc_get_mpdu_status(rx_desc_tlv,
  211. &(dp_pdev->ppdu_info.rx_status));
  212. if (msdu_list.msdu_info[i].msdu_flags &
  213. HAL_MSDU_F_MSDU_CONTINUATION) {
  214. if (!is_frag) {
  215. total_frag_len =
  216. msdu_list.msdu_info[i].msdu_len;
  217. is_frag = true;
  218. }
  219. dp_mon_adjust_frag_len(
  220. &total_frag_len, &frag_len);
  221. } else {
  222. if (is_frag) {
  223. dp_mon_adjust_frag_len(
  224. &total_frag_len, &frag_len);
  225. } else {
  226. frag_len =
  227. msdu_list.msdu_info[i].msdu_len;
  228. }
  229. is_frag = false;
  230. msdu_cnt--;
  231. }
  232. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  233. "%s total_len %u frag_len %u flags %u",
  234. __func__, total_frag_len, frag_len,
  235. msdu_list.msdu_info[i].msdu_flags);
  236. rx_pkt_offset = HAL_RX_MON_HW_RX_DESC_SIZE();
  237. /*
  238. * HW structures call this L3 header padding
  239. * -- even though this is actually the offset
  240. * from the buffer beginning where the L2
  241. * header begins.
  242. */
  243. l2_hdr_offset =
  244. hal_rx_msdu_end_l3_hdr_padding_get(data);
  245. rx_buf_size = rx_pkt_offset + l2_hdr_offset
  246. + frag_len;
  247. qdf_nbuf_set_pktlen(msdu, rx_buf_size);
  248. #if 0
  249. /* Disble it.see packet on msdu done set to 0 */
  250. /*
  251. * Check if DMA completed -- msdu_done is the
  252. * last bit to be written
  253. */
  254. if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) {
  255. QDF_TRACE(QDF_MODULE_ID_DP,
  256. QDF_TRACE_LEVEL_ERROR,
  257. "%s:%d: Pkt Desc\n",
  258. __func__, __LINE__);
  259. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
  260. QDF_TRACE_LEVEL_ERROR,
  261. rx_desc_tlv, 128);
  262. qdf_assert_always(0);
  263. }
  264. #endif
  265. QDF_TRACE(QDF_MODULE_ID_DP,
  266. QDF_TRACE_LEVEL_DEBUG,
  267. "%s: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, addr=%p skb->len %lu",
  268. __func__, rx_pkt_offset, l2_hdr_offset,
  269. msdu_list.msdu_info[i].msdu_len,
  270. qdf_nbuf_data(msdu), qdf_nbuf_len(msdu));
  271. if (head_msdu && *head_msdu == NULL) {
  272. *head_msdu = msdu;
  273. } else {
  274. if (last)
  275. qdf_nbuf_set_next(last, msdu);
  276. }
  277. last = msdu;
  278. next_msdu:
  279. rx_bufs_used++;
  280. dp_rx_add_to_free_desc_list(head,
  281. tail, rx_desc);
  282. }
  283. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  284. &p_buf_addr_info);
  285. if (dp_rx_mon_link_desc_return(dp_pdev, p_last_buf_addr_info,
  286. mac_id) != QDF_STATUS_SUCCESS)
  287. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  288. "dp_rx_mon_link_desc_return failed\n");
  289. p_last_buf_addr_info = p_buf_addr_info;
  290. } while (buf_info.paddr && msdu_cnt);
  291. if (last)
  292. qdf_nbuf_set_next(last, NULL);
  293. *tail_msdu = msdu;
  294. return rx_bufs_used;
  295. }
  296. static inline
  297. void dp_rx_msdus_set_payload(qdf_nbuf_t msdu)
  298. {
  299. uint8_t *data;
  300. uint32_t rx_pkt_offset, l2_hdr_offset;
  301. data = qdf_nbuf_data(msdu);
  302. rx_pkt_offset = HAL_RX_MON_HW_RX_DESC_SIZE();
  303. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(data);
  304. qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset);
  305. }
  306. static inline
  307. qdf_nbuf_t dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc,
  308. uint32_t mac_id, qdf_nbuf_t head_msdu, qdf_nbuf_t last_msdu,
  309. struct cdp_mon_status *rx_status)
  310. {
  311. qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list;
  312. uint32_t decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
  313. mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
  314. is_amsdu, is_first_frag, amsdu_pad;
  315. void *rx_desc;
  316. char *hdr_desc;
  317. unsigned char *dest;
  318. struct ieee80211_frame *wh;
  319. struct ieee80211_qoscntl *qos;
  320. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  321. head_frag_list = NULL;
  322. mpdu_buf = NULL;
  323. /* The nbuf has been pulled just beyond the status and points to the
  324. * payload
  325. */
  326. if (!head_msdu)
  327. goto mpdu_stitch_fail;
  328. msdu_orig = head_msdu;
  329. rx_desc = qdf_nbuf_data(msdu_orig);
  330. if (HAL_RX_DESC_GET_MPDU_LENGTH_ERR(rx_desc)) {
  331. /* It looks like there is some issue on MPDU len err */
  332. /* Need further investigate if drop the packet */
  333. DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
  334. return NULL;
  335. }
  336. rx_desc = qdf_nbuf_data(last_msdu);
  337. rx_status->cdp_rs_fcs_err = HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc);
  338. dp_pdev->ppdu_info.rx_status.rs_fcs_err =
  339. HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc);
  340. /* Fill out the rx_status from the PPDU start and end fields */
  341. /* HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */
  342. rx_desc = qdf_nbuf_data(head_msdu);
  343. decap_format = HAL_RX_DESC_GET_DECAP_FORMAT(rx_desc);
  344. /* Easy case - The MSDU status indicates that this is a non-decapped
  345. * packet in RAW mode.
  346. */
  347. if (decap_format == HAL_HW_RX_DECAP_FORMAT_RAW) {
  348. /* Note that this path might suffer from headroom unavailabilty
  349. * - but the RX status is usually enough
  350. */
  351. dp_rx_msdus_set_payload(head_msdu);
  352. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  353. "[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
  354. __func__, __LINE__, head_msdu, head_msdu->next,
  355. last_msdu, last_msdu->next);
  356. mpdu_buf = head_msdu;
  357. prev_buf = mpdu_buf;
  358. frag_list_sum_len = 0;
  359. msdu = qdf_nbuf_next(head_msdu);
  360. is_first_frag = 1;
  361. while (msdu) {
  362. dp_rx_msdus_set_payload(msdu);
  363. if (is_first_frag) {
  364. is_first_frag = 0;
  365. head_frag_list = msdu;
  366. }
  367. frag_list_sum_len += qdf_nbuf_len(msdu);
  368. /* Maintain the linking of the cloned MSDUS */
  369. qdf_nbuf_set_next_ext(prev_buf, msdu);
  370. /* Move to the next */
  371. prev_buf = msdu;
  372. msdu = qdf_nbuf_next(msdu);
  373. }
  374. qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN);
  375. /* If there were more fragments to this RAW frame */
  376. if (head_frag_list) {
  377. if (frag_list_sum_len <
  378. sizeof(struct ieee80211_frame_min_one)) {
  379. DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
  380. return NULL;
  381. }
  382. frag_list_sum_len -= HAL_RX_FCS_LEN;
  383. qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
  384. frag_list_sum_len);
  385. qdf_nbuf_set_next(mpdu_buf, NULL);
  386. }
  387. goto mpdu_stitch_done;
  388. }
  389. /* Decap mode:
  390. * Calculate the amount of header in decapped packet to knock off based
  391. * on the decap type and the corresponding number of raw bytes to copy
  392. * status header
  393. */
  394. rx_desc = qdf_nbuf_data(head_msdu);
  395. hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc);
  396. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  397. "[%s][%d] decap format not raw",
  398. __func__, __LINE__);
  399. /* Base size */
  400. wifi_hdr_len = sizeof(struct ieee80211_frame);
  401. wh = (struct ieee80211_frame *)hdr_desc;
  402. dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
  403. if (dir == IEEE80211_FC1_DIR_DSTODS)
  404. wifi_hdr_len += 6;
  405. is_amsdu = 0;
  406. if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
  407. qos = (struct ieee80211_qoscntl *)
  408. (hdr_desc + wifi_hdr_len);
  409. wifi_hdr_len += 2;
  410. is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
  411. }
  412. /*Calculate security header length based on 'Protected'
  413. * and 'EXT_IV' flag
  414. * */
  415. if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
  416. char *iv = (char *)wh + wifi_hdr_len;
  417. if (iv[3] & KEY_EXTIV)
  418. sec_hdr_len = 8;
  419. else
  420. sec_hdr_len = 4;
  421. } else {
  422. sec_hdr_len = 0;
  423. }
  424. wifi_hdr_len += sec_hdr_len;
  425. /* MSDU related stuff LLC - AMSDU subframe header etc */
  426. msdu_llc_len = is_amsdu ? (14 + 8) : 8;
  427. mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
  428. /* "Decap" header to remove from MSDU buffer */
  429. decap_hdr_pull_bytes = 14;
  430. /* Allocate a new nbuf for holding the 802.11 header retrieved from the
  431. * status of the now decapped first msdu. Leave enough headroom for
  432. * accomodating any radio-tap /prism like PHY header
  433. */
  434. #define MAX_MONITOR_HEADER (512)
  435. mpdu_buf = qdf_nbuf_alloc(soc->osdev,
  436. MAX_MONITOR_HEADER + mpdu_buf_len,
  437. MAX_MONITOR_HEADER, 4, FALSE);
  438. if (!mpdu_buf)
  439. goto mpdu_stitch_done;
  440. /* Copy the MPDU related header and enc headers into the first buffer
  441. * - Note that there can be a 2 byte pad between heaader and enc header
  442. */
  443. prev_buf = mpdu_buf;
  444. dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
  445. if (!dest)
  446. goto mpdu_stitch_fail;
  447. qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
  448. hdr_desc += wifi_hdr_len;
  449. #if 0
  450. dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len);
  451. adf_os_mem_copy(dest, hdr_desc, sec_hdr_len);
  452. hdr_desc += sec_hdr_len;
  453. #endif
  454. /* The first LLC len is copied into the MPDU buffer */
  455. frag_list_sum_len = 0;
  456. msdu_orig = head_msdu;
  457. is_first_frag = 1;
  458. amsdu_pad = 0;
  459. while (msdu_orig) {
  460. /* TODO: intra AMSDU padding - do we need it ??? */
  461. msdu = msdu_orig;
  462. if (is_first_frag) {
  463. head_frag_list = msdu;
  464. } else {
  465. /* Reload the hdr ptr only on non-first MSDUs */
  466. rx_desc = qdf_nbuf_data(msdu_orig);
  467. hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc);
  468. }
  469. /* Copy this buffers MSDU related status into the prev buffer */
  470. if (is_first_frag) {
  471. is_first_frag = 0;
  472. }
  473. dest = qdf_nbuf_put_tail(prev_buf,
  474. msdu_llc_len + amsdu_pad);
  475. if (!dest)
  476. goto mpdu_stitch_fail;
  477. dest += amsdu_pad;
  478. qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
  479. dp_rx_msdus_set_payload(msdu);
  480. /* Push the MSDU buffer beyond the decap header */
  481. qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
  482. frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu)
  483. + amsdu_pad;
  484. /* Set up intra-AMSDU pad to be added to start of next buffer -
  485. * AMSDU pad is 4 byte pad on AMSDU subframe */
  486. amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
  487. amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
  488. /* TODO FIXME How do we handle MSDUs that have fraglist - Should
  489. * probably iterate all the frags cloning them along the way and
  490. * and also updating the prev_buf pointer
  491. */
  492. /* Move to the next */
  493. prev_buf = msdu;
  494. msdu_orig = qdf_nbuf_next(msdu_orig);
  495. }
  496. #if 0
  497. /* Add in the trailer section - encryption trailer + FCS */
  498. qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN);
  499. frag_list_sum_len += HAL_RX_FCS_LEN;
  500. #endif
  501. frag_list_sum_len -= msdu_llc_len;
  502. /* TODO: Convert this to suitable adf routines */
  503. qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
  504. frag_list_sum_len);
  505. mpdu_stitch_done:
  506. /* Check if this buffer contains the PPDU end status for TSF */
  507. /* Need revist this code to see where we can get tsf timestamp */
  508. #if 0
  509. /* PPDU end TLV will be retrieved from monitor status ring */
  510. last_mpdu =
  511. (*(((u_int32_t *)&rx_desc->attention)) &
  512. RX_ATTENTION_0_LAST_MPDU_MASK) >>
  513. RX_ATTENTION_0_LAST_MPDU_LSB;
  514. if (last_mpdu)
  515. rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
  516. #endif
  517. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  518. "%s %d mpdu_buf %pK mpdu_buf->len %u",
  519. __func__, __LINE__,
  520. mpdu_buf, mpdu_buf->len);
  521. return mpdu_buf;
  522. mpdu_stitch_fail:
  523. if ((mpdu_buf) && (decap_format != HAL_HW_RX_DECAP_FORMAT_RAW)) {
  524. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  525. "%s mpdu_stitch_fail mpdu_buf %pK",
  526. __func__, mpdu_buf);
  527. /* Free the head buffer */
  528. qdf_nbuf_free(mpdu_buf);
  529. }
  530. return NULL;
  531. }
  532. /**
  533. * dp_rx_extract_radiotap_info(): Extract and populate information in
  534. * struct mon_rx_status type
  535. * @rx_status: Receive status
  536. * @mon_rx_status: Monitor mode status
  537. *
  538. * Returns: None
  539. */
  540. static inline
  541. void dp_rx_extract_radiotap_info(struct cdp_mon_status *rx_status,
  542. struct mon_rx_status *rx_mon_status)
  543. {
  544. rx_mon_status->tsft = rx_status->cdp_rs_tstamp.cdp_tsf;
  545. rx_mon_status->chan_freq = rx_status->rs_freq;
  546. rx_mon_status->chan_num = rx_status->rs_channel;
  547. rx_mon_status->chan_flags = rx_status->rs_flags;
  548. rx_mon_status->rate = rx_status->rs_datarate;
  549. /* TODO: rx_mon_status->ant_signal_db */
  550. /* TODO: rx_mon_status->nr_ant */
  551. rx_mon_status->mcs = rx_status->cdf_rs_rate_mcs;
  552. rx_mon_status->is_stbc = rx_status->cdp_rs_stbc;
  553. rx_mon_status->sgi = rx_status->cdp_rs_sgi;
  554. /* TODO: rx_mon_status->ldpc */
  555. /* TODO: rx_mon_status->beamformed */
  556. /* TODO: rx_mon_status->vht_flags */
  557. /* TODO: rx_mon_status->vht_flag_values1 */
  558. }
  559. QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
  560. qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
  561. {
  562. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  563. struct cdp_mon_status *rs = &pdev->rx_mon_recv_status;
  564. qdf_nbuf_t mon_skb, skb_next;
  565. qdf_nbuf_t mon_mpdu = NULL;
  566. if ((pdev->monitor_vdev == NULL) ||
  567. (pdev->monitor_vdev->osif_rx_mon == NULL)) {
  568. goto mon_deliver_fail;
  569. }
  570. /* restitch mon MPDU for delivery via monitor interface */
  571. mon_mpdu = dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu,
  572. tail_msdu, rs);
  573. if (mon_mpdu && pdev->monitor_vdev && pdev->monitor_vdev->osif_vdev) {
  574. pdev->ppdu_info.rx_status.ppdu_id =
  575. pdev->ppdu_info.com_info.ppdu_id;
  576. qdf_nbuf_update_radiotap(&(pdev->ppdu_info.rx_status),
  577. mon_mpdu, sizeof(struct rx_pkt_tlvs));
  578. pdev->monitor_vdev->osif_rx_mon(
  579. pdev->monitor_vdev->osif_vdev, mon_mpdu, NULL);
  580. } else {
  581. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  582. "[%s][%d] mon_mpdu=%p pdev->monitor_vdev %p osif_vdev %p",
  583. __func__, __LINE__, mon_mpdu, pdev->monitor_vdev,
  584. pdev->monitor_vdev->osif_vdev);
  585. goto mon_deliver_fail;
  586. }
  587. return QDF_STATUS_SUCCESS;
  588. mon_deliver_fail:
  589. mon_skb = head_msdu;
  590. while (mon_skb) {
  591. skb_next = qdf_nbuf_next(mon_skb);
  592. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  593. "[%s][%d] mon_skb=%p len %u", __func__, __LINE__,
  594. mon_skb, mon_skb->len);
  595. qdf_nbuf_free(mon_skb);
  596. mon_skb = skb_next;
  597. }
  598. return QDF_STATUS_E_INVAL;
  599. }
  600. /**
  601. * dp_rx_mon_dest_process() - Brain of the Rx processing functionality
  602. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  603. * @soc: core txrx main contex
  604. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  605. * @quota: No. of units (packets) that can be serviced in one shot.
  606. *
  607. * This function implements the core of Rx functionality. This is
  608. * expected to handle only non-error frames.
  609. *
  610. * Return: none
  611. */
  612. void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  613. {
  614. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  615. void *hal_soc;
  616. void *rxdma_dst_ring_desc;
  617. void *mon_dst_srng;
  618. union dp_rx_desc_list_elem_t *head = NULL;
  619. union dp_rx_desc_list_elem_t *tail = NULL;
  620. uint32_t ppdu_id;
  621. uint32_t rx_bufs_used;
  622. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  623. struct cdp_pdev_mon_stats *rx_mon_stats;
  624. mon_dst_srng = pdev->rxdma_mon_dst_ring[mac_for_pdev].hal_srng;
  625. if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
  626. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  627. "%s %d : HAL Monitor Destination Ring Init Failed -- %pK\n",
  628. __func__, __LINE__, mon_dst_srng);
  629. return;
  630. }
  631. hal_soc = soc->hal_soc;
  632. qdf_assert(hal_soc);
  633. qdf_spin_lock_bh(&pdev->mon_lock);
  634. if (pdev->monitor_vdev == NULL) {
  635. qdf_spin_unlock(&pdev->mon_lock);
  636. return;
  637. }
  638. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
  639. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  640. "%s %d : HAL Monitor Destination Ring access Failed -- %pK\n",
  641. __func__, __LINE__, mon_dst_srng);
  642. return;
  643. }
  644. ppdu_id = pdev->ppdu_info.com_info.ppdu_id;
  645. rx_bufs_used = 0;
  646. rx_mon_stats = &pdev->rx_mon_stats;
  647. while (qdf_likely(rxdma_dst_ring_desc =
  648. hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
  649. qdf_nbuf_t head_msdu, tail_msdu;
  650. uint32_t npackets;
  651. head_msdu = (qdf_nbuf_t) NULL;
  652. tail_msdu = (qdf_nbuf_t) NULL;
  653. rx_bufs_used += dp_rx_mon_mpdu_pop(soc, mac_id,
  654. rxdma_dst_ring_desc,
  655. &head_msdu, &tail_msdu,
  656. &npackets, &ppdu_id,
  657. &head, &tail);
  658. if (ppdu_id != pdev->ppdu_info.com_info.ppdu_id) {
  659. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  660. qdf_mem_zero(&(pdev->ppdu_info.rx_status),
  661. sizeof(pdev->ppdu_info.rx_status));
  662. pdev->ppdu_info.com_info.last_ppdu_id =
  663. pdev->ppdu_info.com_info.ppdu_id;
  664. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  665. "%s %d ppdu_id %x != ppdu_info.com_info .ppdu_id %x",
  666. __func__, __LINE__,
  667. ppdu_id, pdev->ppdu_info.com_info.ppdu_id);
  668. break;
  669. }
  670. if (qdf_likely((head_msdu != NULL) && (tail_msdu != NULL))) {
  671. rx_mon_stats->dest_mpdu_done++;
  672. dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu);
  673. }
  674. rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
  675. mon_dst_srng);
  676. }
  677. hal_srng_access_end(hal_soc, mon_dst_srng);
  678. qdf_spin_unlock_bh(&pdev->mon_lock);
  679. if (rx_bufs_used) {
  680. rx_mon_stats->dest_ppdu_done++;
  681. dp_rx_buffers_replenish(soc, mac_id,
  682. &pdev->rxdma_mon_buf_ring[mac_for_pdev],
  683. &soc->rx_desc_mon[mac_id], rx_bufs_used, &head, &tail);
  684. }
  685. }
  686. static QDF_STATUS
  687. dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
  688. uint8_t pdev_id = pdev->pdev_id;
  689. struct dp_soc *soc = pdev->soc;
  690. union dp_rx_desc_list_elem_t *desc_list = NULL;
  691. union dp_rx_desc_list_elem_t *tail = NULL;
  692. struct dp_srng *rxdma_srng;
  693. uint32_t rxdma_entries;
  694. struct rx_desc_pool *rx_desc_pool;
  695. QDF_STATUS status;
  696. uint8_t mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  697. rxdma_srng = &pdev->rxdma_mon_buf_ring[mac_for_pdev];
  698. rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize(
  699. soc->hal_soc,
  700. RXDMA_MONITOR_BUF);
  701. rx_desc_pool = &soc->rx_desc_mon[mac_id];
  702. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  703. "%s: Mon RX Desc Pool[%d] allocation size=%d"
  704. , __func__, pdev_id, rxdma_entries*3);
  705. status = dp_rx_desc_pool_alloc(soc, mac_id,
  706. rxdma_entries*3, rx_desc_pool);
  707. if (!QDF_IS_STATUS_SUCCESS(status)) {
  708. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  709. "%s: dp_rx_desc_pool_alloc() failed \n", __func__);
  710. return status;
  711. }
  712. rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
  713. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  714. "%s: Mon RX Buffers Replenish pdev_id=%d",
  715. __func__, pdev_id);
  716. status = dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
  717. rxdma_entries, &desc_list, &tail);
  718. if (!QDF_IS_STATUS_SUCCESS(status)) {
  719. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  720. "%s: dp_rx_buffers_replenish() failed",
  721. __func__);
  722. return status;
  723. }
  724. return QDF_STATUS_SUCCESS;
  725. }
  726. static QDF_STATUS
  727. dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id)
  728. {
  729. struct dp_soc *soc = pdev->soc;
  730. struct rx_desc_pool *rx_desc_pool;
  731. rx_desc_pool = &soc->rx_desc_mon[mac_id];
  732. if (rx_desc_pool->pool_size != 0)
  733. dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool);
  734. return QDF_STATUS_SUCCESS;
  735. }
  736. /*
  737. * Allocate and setup link descriptor pool that will be used by HW for
  738. * various link and queue descriptors and managed by WBM
  739. */
  740. static int dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
  741. {
  742. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  743. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  744. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  745. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  746. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  747. uint32_t total_link_descs, total_mem_size;
  748. uint32_t num_link_desc_banks;
  749. uint32_t last_bank_size = 0;
  750. uint32_t entry_size, num_entries;
  751. void *mon_desc_srng;
  752. uint32_t num_replenish_buf;
  753. struct dp_srng *dp_srng;
  754. int i;
  755. dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev];
  756. num_entries = dp_srng->alloc_size/hal_srng_get_entrysize(
  757. soc->hal_soc, RXDMA_MONITOR_DESC);
  758. /* Round up to power of 2 */
  759. total_link_descs = 1;
  760. while (total_link_descs < num_entries)
  761. total_link_descs <<= 1;
  762. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  763. "%s: total_link_descs: %u, link_desc_size: %d\n",
  764. __func__, total_link_descs, link_desc_size);
  765. total_mem_size = total_link_descs * link_desc_size;
  766. total_mem_size += link_desc_align;
  767. if (total_mem_size <= max_alloc_size) {
  768. num_link_desc_banks = 0;
  769. last_bank_size = total_mem_size;
  770. } else {
  771. num_link_desc_banks = (total_mem_size) /
  772. (max_alloc_size - link_desc_align);
  773. last_bank_size = total_mem_size %
  774. (max_alloc_size - link_desc_align);
  775. }
  776. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  777. "%s: total_mem_size: %d, num_link_desc_banks: %u, \
  778. max_alloc_size: %d last_bank_size: %d\n",
  779. __func__, total_mem_size, num_link_desc_banks, max_alloc_size,
  780. last_bank_size);
  781. for (i = 0; i < num_link_desc_banks; i++) {
  782. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned =
  783. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  784. max_alloc_size,
  785. &(dp_pdev->link_desc_banks[mac_for_pdev][i].
  786. base_paddr_unaligned));
  787. if (!dp_pdev->link_desc_banks[mac_for_pdev][i].
  788. base_vaddr_unaligned) {
  789. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  790. "%s: Link desc memory allocation failed\n",
  791. __func__);
  792. goto fail;
  793. }
  794. dp_pdev->link_desc_banks[mac_for_pdev][i].size = max_alloc_size;
  795. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr =
  796. (void *)((unsigned long)
  797. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  798. base_vaddr_unaligned) +
  799. ((unsigned long)
  800. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  801. base_vaddr_unaligned) %
  802. link_desc_align));
  803. dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr =
  804. (unsigned long)
  805. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  806. base_paddr_unaligned) +
  807. ((unsigned long)
  808. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) -
  809. (unsigned long)
  810. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  811. base_vaddr_unaligned));
  812. }
  813. if (last_bank_size) {
  814. /* Allocate last bank in case total memory required is not exact
  815. * multiple of max_alloc_size
  816. */
  817. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned =
  818. qdf_mem_alloc_consistent(soc->osdev,
  819. soc->osdev->dev, last_bank_size,
  820. &(dp_pdev->link_desc_banks[mac_for_pdev][i].
  821. base_paddr_unaligned));
  822. if (dp_pdev->link_desc_banks[mac_for_pdev][i].
  823. base_vaddr_unaligned == NULL) {
  824. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  825. "%s: allocation failed for mon link desc pool\n",
  826. __func__);
  827. goto fail;
  828. }
  829. dp_pdev->link_desc_banks[mac_for_pdev][i].size = last_bank_size;
  830. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr =
  831. (void *)((unsigned long)
  832. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  833. base_vaddr_unaligned) +
  834. ((unsigned long)
  835. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  836. base_vaddr_unaligned) %
  837. link_desc_align));
  838. dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr =
  839. (unsigned long)
  840. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  841. base_paddr_unaligned) +
  842. ((unsigned long)
  843. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) -
  844. (unsigned long)
  845. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  846. base_vaddr_unaligned));
  847. }
  848. /* Allocate and setup link descriptor idle list for HW internal use */
  849. entry_size = hal_srng_get_entrysize(soc->hal_soc, RXDMA_MONITOR_DESC);
  850. total_mem_size = entry_size * total_link_descs;
  851. mon_desc_srng = dp_pdev->rxdma_mon_desc_ring[mac_for_pdev].hal_srng;
  852. num_replenish_buf = 0;
  853. if (total_mem_size <= max_alloc_size) {
  854. void *desc;
  855. for (i = 0;
  856. i < MAX_MON_LINK_DESC_BANKS &&
  857. dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr;
  858. i++) {
  859. uint32_t num_entries =
  860. (dp_pdev->link_desc_banks[mac_for_pdev][i].size -
  861. (unsigned long)
  862. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) -
  863. (unsigned long)
  864. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  865. base_vaddr_unaligned)) / link_desc_size;
  866. unsigned long paddr =
  867. (unsigned long)
  868. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr);
  869. unsigned long vaddr =
  870. (unsigned long)
  871. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr);
  872. hal_srng_access_start_unlocked(soc->hal_soc,
  873. mon_desc_srng);
  874. while (num_entries && (desc =
  875. hal_srng_src_get_next(soc->hal_soc,
  876. mon_desc_srng))) {
  877. hal_set_link_desc_addr(desc, i, paddr);
  878. num_entries--;
  879. num_replenish_buf++;
  880. paddr += link_desc_size;
  881. vaddr += link_desc_size;
  882. }
  883. hal_srng_access_end_unlocked(soc->hal_soc,
  884. mon_desc_srng);
  885. }
  886. } else {
  887. qdf_assert(0);
  888. }
  889. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  890. "%s: successfully replenished %d buffer\n",
  891. __func__, num_replenish_buf);
  892. return 0;
  893. fail:
  894. for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) {
  895. if (dp_pdev->link_desc_banks[mac_for_pdev][i].
  896. base_vaddr_unaligned) {
  897. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  898. dp_pdev->link_desc_banks[mac_for_pdev][i].size,
  899. dp_pdev->link_desc_banks[mac_for_pdev][i].
  900. base_vaddr_unaligned,
  901. dp_pdev->link_desc_banks[mac_for_pdev][i].
  902. base_paddr_unaligned, 0);
  903. dp_pdev->link_desc_banks[mac_for_pdev][i].
  904. base_vaddr_unaligned = NULL;
  905. }
  906. }
  907. return QDF_STATUS_E_FAILURE;
  908. }
  909. /*
  910. * Free link descriptor pool that was setup HW
  911. */
  912. static void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id)
  913. {
  914. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  915. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  916. int i;
  917. for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) {
  918. if (dp_pdev->link_desc_banks[mac_for_pdev][i].
  919. base_vaddr_unaligned) {
  920. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  921. dp_pdev->link_desc_banks[mac_for_pdev][i].size,
  922. dp_pdev->link_desc_banks[mac_for_pdev][i].
  923. base_vaddr_unaligned,
  924. dp_pdev->link_desc_banks[mac_for_pdev][i].
  925. base_paddr_unaligned, 0);
  926. dp_pdev->link_desc_banks[mac_for_pdev][i].
  927. base_vaddr_unaligned = NULL;
  928. }
  929. }
  930. }
  931. /**
  932. * dp_rx_pdev_mon_attach() - attach DP RX for monitor mode
  933. * @pdev: core txrx pdev context
  934. *
  935. * This function will attach a DP RX for monitor mode instance into
  936. * the main device (SOC) context. Will allocate dp rx resource and
  937. * initialize resources.
  938. *
  939. * Return: QDF_STATUS_SUCCESS: success
  940. * QDF_STATUS_E_RESOURCES: Error return
  941. */
  942. QDF_STATUS
  943. dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
  944. struct dp_soc *soc = pdev->soc;
  945. QDF_STATUS status;
  946. uint8_t pdev_id = pdev->pdev_id;
  947. int mac_id;
  948. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  949. "%s: pdev attach id=%d\n", __func__, pdev_id);
  950. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  951. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  952. status = dp_rx_pdev_mon_buf_attach(pdev, mac_for_pdev);
  953. if (!QDF_IS_STATUS_SUCCESS(status)) {
  954. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  955. "%s: dp_rx_pdev_mon_buf_attach() failed\n",
  956. __func__);
  957. return status;
  958. }
  959. status = dp_rx_pdev_mon_status_attach(pdev, mac_for_pdev);
  960. if (!QDF_IS_STATUS_SUCCESS(status)) {
  961. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  962. "%s: dp_rx_pdev_mon_status_attach() failed\n",
  963. __func__);
  964. return status;
  965. }
  966. status = dp_mon_link_desc_pool_setup(soc, mac_for_pdev);
  967. if (!QDF_IS_STATUS_SUCCESS(status)) {
  968. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  969. "%s: dp_mon_link_desc_pool_setup() failed\n",
  970. __func__);
  971. return status;
  972. }
  973. }
  974. qdf_spinlock_create(&pdev->mon_lock);
  975. return QDF_STATUS_SUCCESS;
  976. }
  977. /**
  978. * dp_rx_pdev_mon_detach() - detach dp rx for monitor mode
  979. * @pdev: core txrx pdev context
  980. *
  981. * This function will detach DP RX for monitor mode from
  982. * main device context. will free DP Rx resources for
  983. * monitor mode
  984. *
  985. * Return: QDF_STATUS_SUCCESS: success
  986. * QDF_STATUS_E_RESOURCES: Error return
  987. */
  988. QDF_STATUS
  989. dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
  990. uint8_t pdev_id = pdev->pdev_id;
  991. struct dp_soc *soc = pdev->soc;
  992. int mac_id;
  993. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  994. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  995. qdf_spinlock_destroy(&pdev->mon_lock);
  996. dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev);
  997. dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev);
  998. dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev);
  999. }
  1000. return QDF_STATUS_SUCCESS;
  1001. }