dp_rx_mon_dest.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "hal_rx.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "hal_api_mon.h"
  27. #include "dp_rx_mon.h"
  28. #include "wlan_cfg.h"
  29. #include "dp_internal.h"
  30. /**
  31. * dp_rx_mon_link_desc_return() - Return a MPDU link descriptor to HW
  32. * (WBM), following error handling
  33. *
  34. * @dp_pdev: core txrx pdev context
  35. * @buf_addr_info: void pointer to monitor link descriptor buf addr info
  36. * Return: QDF_STATUS
  37. */
  38. static QDF_STATUS
  39. dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
  40. void *buf_addr_info, int mac_id)
  41. {
  42. struct dp_srng *dp_srng;
  43. void *hal_srng;
  44. void *hal_soc;
  45. QDF_STATUS status = QDF_STATUS_E_FAILURE;
  46. void *src_srng_desc;
  47. int mac_for_pdev = dp_get_mac_id_for_mac(dp_pdev->soc, mac_id);
  48. hal_soc = dp_pdev->soc->hal_soc;
  49. dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev];
  50. hal_srng = dp_srng->hal_srng;
  51. qdf_assert(hal_srng);
  52. if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_srng))) {
  53. /* TODO */
  54. /*
  55. * Need API to convert from hal_ring pointer to
  56. * Ring Type / Ring Id combo
  57. */
  58. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  59. "%s %d : \
  60. HAL RING Access For WBM Release SRNG Failed -- %pK\n",
  61. __func__, __LINE__, hal_srng);
  62. goto done;
  63. }
  64. src_srng_desc = hal_srng_src_get_next(hal_soc, hal_srng);
  65. if (qdf_likely(src_srng_desc)) {
  66. /* Return link descriptor through WBM ring (SW2WBM)*/
  67. hal_rx_mon_msdu_link_desc_set(hal_soc,
  68. src_srng_desc, buf_addr_info);
  69. status = QDF_STATUS_SUCCESS;
  70. } else {
  71. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  72. "%s %d -- Monitor Link Desc WBM Release Ring Full\n",
  73. __func__, __LINE__);
  74. }
  75. done:
  76. hal_srng_access_end(hal_soc, hal_srng);
  77. return status;
  78. }
  79. /**
  80. * dp_mon_adjust_frag_len() - MPDU and MSDU may spread across
  81. * multiple nbufs. This function
  82. * is to return data length in
  83. * fragmented buffer
  84. *
  85. * @total_len: pointer to remaining data length.
  86. * @frag_len: pointer to data length in this fragment.
  87. */
  88. static inline void dp_mon_adjust_frag_len(uint32_t *total_len,
  89. uint32_t *frag_len)
  90. {
  91. if (*total_len >= (RX_BUFFER_SIZE - RX_PKT_TLVS_LEN)) {
  92. *frag_len = RX_BUFFER_SIZE - RX_PKT_TLVS_LEN;
  93. *total_len -= *frag_len;
  94. } else {
  95. *frag_len = *total_len;
  96. *total_len = 0;
  97. }
  98. }
  99. /**
  100. * dp_rx_mon_mpdu_pop() - Return a MPDU link descriptor to HW
  101. * (WBM), following error handling
  102. *
  103. * @soc: core DP main context
  104. * @mac_id: mac id which is one of 3 mac_ids
  105. * @rxdma_dst_ring_desc: void pointer to monitor link descriptor buf addr info
  106. * @head_msdu: head of msdu to be popped
  107. * @tail_msdu: tail of msdu to be popped
  108. * @npackets: number of packet to be popped
  109. * @ppdu_id: ppdu id of processing ppdu
  110. * @head: head of descs list to be freed
  111. * @tail: tail of decs list to be freed
  112. * Return: number of msdu in MPDU to be popped
  113. */
  114. static inline uint32_t
  115. dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
  116. void *rxdma_dst_ring_desc, qdf_nbuf_t *head_msdu,
  117. qdf_nbuf_t *tail_msdu, uint32_t *npackets, uint32_t *ppdu_id,
  118. union dp_rx_desc_list_elem_t **head,
  119. union dp_rx_desc_list_elem_t **tail)
  120. {
  121. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  122. void *rx_desc_tlv;
  123. void *rx_msdu_link_desc;
  124. qdf_nbuf_t msdu;
  125. qdf_nbuf_t last;
  126. struct hal_rx_msdu_list msdu_list;
  127. uint16_t num_msdus;
  128. uint32_t rx_buf_size, rx_pkt_offset;
  129. struct hal_buf_info buf_info;
  130. void *p_buf_addr_info;
  131. void *p_last_buf_addr_info;
  132. uint32_t rx_bufs_used = 0;
  133. uint32_t msdu_ppdu_id, msdu_cnt, last_ppdu_id;
  134. uint8_t *data;
  135. uint32_t i;
  136. uint32_t total_frag_len = 0, frag_len = 0;
  137. bool is_frag, is_first_msdu;
  138. bool drop_mpdu = false;
  139. msdu = 0;
  140. last_ppdu_id = dp_pdev->ppdu_info.com_info.last_ppdu_id;
  141. last = NULL;
  142. hal_rx_reo_ent_buf_paddr_get(rxdma_dst_ring_desc, &buf_info,
  143. &p_last_buf_addr_info, &msdu_cnt);
  144. if ((hal_rx_reo_ent_rxdma_push_reason_get(rxdma_dst_ring_desc) ==
  145. HAL_RX_WBM_RXDMA_PSH_RSN_ERROR)) {
  146. uint8_t rxdma_err =
  147. hal_rx_reo_ent_rxdma_error_code_get(
  148. rxdma_dst_ring_desc);
  149. if (qdf_unlikely((rxdma_err == HAL_RXDMA_ERR_FLUSH_REQUEST) ||
  150. (rxdma_err == HAL_RXDMA_ERR_MPDU_LENGTH) ||
  151. (rxdma_err == HAL_RXDMA_ERR_OVERFLOW))) {
  152. drop_mpdu = true;
  153. dp_pdev->rx_mon_stats.dest_mpdu_drop++;
  154. }
  155. }
  156. is_frag = false;
  157. is_first_msdu = true;
  158. do {
  159. rx_msdu_link_desc =
  160. dp_rx_cookie_2_mon_link_desc_va(dp_pdev, &buf_info,
  161. mac_id);
  162. qdf_assert(rx_msdu_link_desc);
  163. hal_rx_msdu_list_get(rx_msdu_link_desc, &msdu_list, &num_msdus);
  164. for (i = 0; i < num_msdus; i++) {
  165. uint32_t l2_hdr_offset;
  166. struct dp_rx_desc *rx_desc =
  167. dp_rx_cookie_2_va_mon_buf(soc,
  168. msdu_list.sw_cookie[i]);
  169. qdf_assert(rx_desc);
  170. msdu = rx_desc->nbuf;
  171. if (rx_desc->unmapped == 0) {
  172. qdf_nbuf_unmap_single(soc->osdev, msdu,
  173. QDF_DMA_FROM_DEVICE);
  174. rx_desc->unmapped = 1;
  175. }
  176. if (drop_mpdu) {
  177. qdf_nbuf_free(msdu);
  178. msdu = NULL;
  179. goto next_msdu;
  180. }
  181. data = qdf_nbuf_data(msdu);
  182. rx_desc_tlv = HAL_RX_MON_DEST_GET_DESC(data);
  183. QDF_TRACE(QDF_MODULE_ID_DP,
  184. QDF_TRACE_LEVEL_DEBUG,
  185. "[%s] i=%d, ppdu_id=%x, "
  186. "last_ppdu_id=%x num_msdus = %u\n",
  187. __func__, i, *ppdu_id,
  188. last_ppdu_id, num_msdus);
  189. if (is_first_msdu) {
  190. msdu_ppdu_id = HAL_RX_HW_DESC_GET_PPDUID_GET(
  191. rx_desc_tlv);
  192. is_first_msdu = false;
  193. QDF_TRACE(QDF_MODULE_ID_DP,
  194. QDF_TRACE_LEVEL_DEBUG,
  195. "[%s] msdu_ppdu_id=%x\n",
  196. __func__, msdu_ppdu_id);
  197. if (*ppdu_id > msdu_ppdu_id)
  198. QDF_TRACE(QDF_MODULE_ID_DP,
  199. QDF_TRACE_LEVEL_DEBUG,
  200. "[%s][%d] ppdu_id=%d "
  201. "msdu_ppdu_id=%d\n",
  202. __func__, __LINE__, *ppdu_id,
  203. msdu_ppdu_id);
  204. if ((*ppdu_id < msdu_ppdu_id) && (*ppdu_id >
  205. last_ppdu_id)) {
  206. *ppdu_id = msdu_ppdu_id;
  207. return rx_bufs_used;
  208. }
  209. }
  210. if (hal_rx_desc_is_first_msdu(rx_desc_tlv))
  211. hal_rx_mon_hw_desc_get_mpdu_status(soc->hal_soc,
  212. rx_desc_tlv,
  213. &(dp_pdev->ppdu_info.rx_status));
  214. if (msdu_list.msdu_info[i].msdu_flags &
  215. HAL_MSDU_F_MSDU_CONTINUATION) {
  216. if (!is_frag) {
  217. total_frag_len =
  218. msdu_list.msdu_info[i].msdu_len;
  219. is_frag = true;
  220. }
  221. dp_mon_adjust_frag_len(
  222. &total_frag_len, &frag_len);
  223. } else {
  224. if (is_frag) {
  225. dp_mon_adjust_frag_len(
  226. &total_frag_len, &frag_len);
  227. } else {
  228. frag_len =
  229. msdu_list.msdu_info[i].msdu_len;
  230. }
  231. is_frag = false;
  232. msdu_cnt--;
  233. }
  234. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  235. "%s total_len %u frag_len %u flags %u",
  236. __func__, total_frag_len, frag_len,
  237. msdu_list.msdu_info[i].msdu_flags);
  238. rx_pkt_offset = HAL_RX_MON_HW_RX_DESC_SIZE();
  239. /*
  240. * HW structures call this L3 header padding
  241. * -- even though this is actually the offset
  242. * from the buffer beginning where the L2
  243. * header begins.
  244. */
  245. l2_hdr_offset =
  246. hal_rx_msdu_end_l3_hdr_padding_get(data);
  247. rx_buf_size = rx_pkt_offset + l2_hdr_offset
  248. + frag_len;
  249. qdf_nbuf_set_pktlen(msdu, rx_buf_size);
  250. #if 0
  251. /* Disble it.see packet on msdu done set to 0 */
  252. /*
  253. * Check if DMA completed -- msdu_done is the
  254. * last bit to be written
  255. */
  256. if (!hal_rx_attn_msdu_done_get(rx_desc_tlv)) {
  257. QDF_TRACE(QDF_MODULE_ID_DP,
  258. QDF_TRACE_LEVEL_ERROR,
  259. "%s:%d: Pkt Desc\n",
  260. __func__, __LINE__);
  261. QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
  262. QDF_TRACE_LEVEL_ERROR,
  263. rx_desc_tlv, 128);
  264. qdf_assert_always(0);
  265. }
  266. #endif
  267. QDF_TRACE(QDF_MODULE_ID_DP,
  268. QDF_TRACE_LEVEL_DEBUG,
  269. "%s: rx_pkt_offset=%d, l2_hdr_offset=%d, msdu_len=%d, addr=%pK skb->len %lu",
  270. __func__, rx_pkt_offset, l2_hdr_offset,
  271. msdu_list.msdu_info[i].msdu_len,
  272. qdf_nbuf_data(msdu), qdf_nbuf_len(msdu));
  273. if (head_msdu && *head_msdu == NULL) {
  274. *head_msdu = msdu;
  275. } else {
  276. if (last)
  277. qdf_nbuf_set_next(last, msdu);
  278. }
  279. last = msdu;
  280. next_msdu:
  281. rx_bufs_used++;
  282. dp_rx_add_to_free_desc_list(head,
  283. tail, rx_desc);
  284. }
  285. hal_rx_mon_next_link_desc_get(rx_msdu_link_desc, &buf_info,
  286. &p_buf_addr_info);
  287. if (dp_rx_mon_link_desc_return(dp_pdev, p_last_buf_addr_info,
  288. mac_id) != QDF_STATUS_SUCCESS)
  289. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  290. "dp_rx_mon_link_desc_return failed\n");
  291. p_last_buf_addr_info = p_buf_addr_info;
  292. } while (buf_info.paddr && msdu_cnt);
  293. if (last)
  294. qdf_nbuf_set_next(last, NULL);
  295. *tail_msdu = msdu;
  296. return rx_bufs_used;
  297. }
  298. static inline
  299. void dp_rx_msdus_set_payload(qdf_nbuf_t msdu)
  300. {
  301. uint8_t *data;
  302. uint32_t rx_pkt_offset, l2_hdr_offset;
  303. data = qdf_nbuf_data(msdu);
  304. rx_pkt_offset = HAL_RX_MON_HW_RX_DESC_SIZE();
  305. l2_hdr_offset = hal_rx_msdu_end_l3_hdr_padding_get(data);
  306. qdf_nbuf_pull_head(msdu, rx_pkt_offset + l2_hdr_offset);
  307. }
  308. static inline
  309. qdf_nbuf_t dp_rx_mon_restitch_mpdu_from_msdus(struct dp_soc *soc,
  310. uint32_t mac_id, qdf_nbuf_t head_msdu, qdf_nbuf_t last_msdu,
  311. struct cdp_mon_status *rx_status)
  312. {
  313. qdf_nbuf_t msdu, mpdu_buf, prev_buf, msdu_orig, head_frag_list;
  314. uint32_t decap_format, wifi_hdr_len, sec_hdr_len, msdu_llc_len,
  315. mpdu_buf_len, decap_hdr_pull_bytes, frag_list_sum_len, dir,
  316. is_amsdu, is_first_frag, amsdu_pad;
  317. void *rx_desc;
  318. char *hdr_desc;
  319. unsigned char *dest;
  320. struct ieee80211_frame *wh;
  321. struct ieee80211_qoscntl *qos;
  322. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  323. head_frag_list = NULL;
  324. mpdu_buf = NULL;
  325. /* The nbuf has been pulled just beyond the status and points to the
  326. * payload
  327. */
  328. if (!head_msdu)
  329. goto mpdu_stitch_fail;
  330. msdu_orig = head_msdu;
  331. rx_desc = qdf_nbuf_data(msdu_orig);
  332. if (HAL_RX_DESC_GET_MPDU_LENGTH_ERR(rx_desc)) {
  333. /* It looks like there is some issue on MPDU len err */
  334. /* Need further investigate if drop the packet */
  335. DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
  336. return NULL;
  337. }
  338. rx_desc = qdf_nbuf_data(last_msdu);
  339. rx_status->cdp_rs_fcs_err = HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc);
  340. dp_pdev->ppdu_info.rx_status.rs_fcs_err =
  341. HAL_RX_DESC_GET_MPDU_FCS_ERR(rx_desc);
  342. /* Fill out the rx_status from the PPDU start and end fields */
  343. /* HAL_RX_GET_PPDU_STATUS(soc, mac_id, rx_status); */
  344. rx_desc = qdf_nbuf_data(head_msdu);
  345. decap_format = HAL_RX_DESC_GET_DECAP_FORMAT(rx_desc);
  346. /* Easy case - The MSDU status indicates that this is a non-decapped
  347. * packet in RAW mode.
  348. */
  349. if (decap_format == HAL_HW_RX_DECAP_FORMAT_RAW) {
  350. /* Note that this path might suffer from headroom unavailabilty
  351. * - but the RX status is usually enough
  352. */
  353. dp_rx_msdus_set_payload(head_msdu);
  354. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  355. "[%s][%d] decap format raw head %pK head->next %pK last_msdu %pK last_msdu->next %pK",
  356. __func__, __LINE__, head_msdu, head_msdu->next,
  357. last_msdu, last_msdu->next);
  358. mpdu_buf = head_msdu;
  359. prev_buf = mpdu_buf;
  360. frag_list_sum_len = 0;
  361. msdu = qdf_nbuf_next(head_msdu);
  362. is_first_frag = 1;
  363. while (msdu) {
  364. dp_rx_msdus_set_payload(msdu);
  365. if (is_first_frag) {
  366. is_first_frag = 0;
  367. head_frag_list = msdu;
  368. }
  369. frag_list_sum_len += qdf_nbuf_len(msdu);
  370. /* Maintain the linking of the cloned MSDUS */
  371. qdf_nbuf_set_next_ext(prev_buf, msdu);
  372. /* Move to the next */
  373. prev_buf = msdu;
  374. msdu = qdf_nbuf_next(msdu);
  375. }
  376. qdf_nbuf_trim_tail(prev_buf, HAL_RX_FCS_LEN);
  377. /* If there were more fragments to this RAW frame */
  378. if (head_frag_list) {
  379. if (frag_list_sum_len <
  380. sizeof(struct ieee80211_frame_min_one)) {
  381. DP_STATS_INC(dp_pdev, dropped.mon_rx_drop, 1);
  382. return NULL;
  383. }
  384. frag_list_sum_len -= HAL_RX_FCS_LEN;
  385. qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
  386. frag_list_sum_len);
  387. qdf_nbuf_set_next(mpdu_buf, NULL);
  388. }
  389. goto mpdu_stitch_done;
  390. }
  391. /* Decap mode:
  392. * Calculate the amount of header in decapped packet to knock off based
  393. * on the decap type and the corresponding number of raw bytes to copy
  394. * status header
  395. */
  396. rx_desc = qdf_nbuf_data(head_msdu);
  397. hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc);
  398. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  399. "[%s][%d] decap format not raw",
  400. __func__, __LINE__);
  401. /* Base size */
  402. wifi_hdr_len = sizeof(struct ieee80211_frame);
  403. wh = (struct ieee80211_frame *)hdr_desc;
  404. dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK;
  405. if (dir == IEEE80211_FC1_DIR_DSTODS)
  406. wifi_hdr_len += 6;
  407. is_amsdu = 0;
  408. if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
  409. qos = (struct ieee80211_qoscntl *)
  410. (hdr_desc + wifi_hdr_len);
  411. wifi_hdr_len += 2;
  412. is_amsdu = (qos->i_qos[0] & IEEE80211_QOS_AMSDU);
  413. }
  414. /*Calculate security header length based on 'Protected'
  415. * and 'EXT_IV' flag
  416. * */
  417. if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
  418. char *iv = (char *)wh + wifi_hdr_len;
  419. if (iv[3] & KEY_EXTIV)
  420. sec_hdr_len = 8;
  421. else
  422. sec_hdr_len = 4;
  423. } else {
  424. sec_hdr_len = 0;
  425. }
  426. wifi_hdr_len += sec_hdr_len;
  427. /* MSDU related stuff LLC - AMSDU subframe header etc */
  428. msdu_llc_len = is_amsdu ? (14 + 8) : 8;
  429. mpdu_buf_len = wifi_hdr_len + msdu_llc_len;
  430. /* "Decap" header to remove from MSDU buffer */
  431. decap_hdr_pull_bytes = 14;
  432. /* Allocate a new nbuf for holding the 802.11 header retrieved from the
  433. * status of the now decapped first msdu. Leave enough headroom for
  434. * accomodating any radio-tap /prism like PHY header
  435. */
  436. #define MAX_MONITOR_HEADER (512)
  437. mpdu_buf = qdf_nbuf_alloc(soc->osdev,
  438. MAX_MONITOR_HEADER + mpdu_buf_len,
  439. MAX_MONITOR_HEADER, 4, FALSE);
  440. if (!mpdu_buf)
  441. goto mpdu_stitch_done;
  442. /* Copy the MPDU related header and enc headers into the first buffer
  443. * - Note that there can be a 2 byte pad between heaader and enc header
  444. */
  445. prev_buf = mpdu_buf;
  446. dest = qdf_nbuf_put_tail(prev_buf, wifi_hdr_len);
  447. if (!dest)
  448. goto mpdu_stitch_fail;
  449. qdf_mem_copy(dest, hdr_desc, wifi_hdr_len);
  450. hdr_desc += wifi_hdr_len;
  451. #if 0
  452. dest = qdf_nbuf_put_tail(prev_buf, sec_hdr_len);
  453. adf_os_mem_copy(dest, hdr_desc, sec_hdr_len);
  454. hdr_desc += sec_hdr_len;
  455. #endif
  456. /* The first LLC len is copied into the MPDU buffer */
  457. frag_list_sum_len = 0;
  458. msdu_orig = head_msdu;
  459. is_first_frag = 1;
  460. amsdu_pad = 0;
  461. while (msdu_orig) {
  462. /* TODO: intra AMSDU padding - do we need it ??? */
  463. msdu = msdu_orig;
  464. if (is_first_frag) {
  465. head_frag_list = msdu;
  466. } else {
  467. /* Reload the hdr ptr only on non-first MSDUs */
  468. rx_desc = qdf_nbuf_data(msdu_orig);
  469. hdr_desc = HAL_RX_DESC_GET_80211_HDR(rx_desc);
  470. }
  471. /* Copy this buffers MSDU related status into the prev buffer */
  472. if (is_first_frag) {
  473. is_first_frag = 0;
  474. }
  475. dest = qdf_nbuf_put_tail(prev_buf,
  476. msdu_llc_len + amsdu_pad);
  477. if (!dest)
  478. goto mpdu_stitch_fail;
  479. dest += amsdu_pad;
  480. qdf_mem_copy(dest, hdr_desc, msdu_llc_len);
  481. dp_rx_msdus_set_payload(msdu);
  482. /* Push the MSDU buffer beyond the decap header */
  483. qdf_nbuf_pull_head(msdu, decap_hdr_pull_bytes);
  484. frag_list_sum_len += msdu_llc_len + qdf_nbuf_len(msdu)
  485. + amsdu_pad;
  486. /* Set up intra-AMSDU pad to be added to start of next buffer -
  487. * AMSDU pad is 4 byte pad on AMSDU subframe */
  488. amsdu_pad = (msdu_llc_len + qdf_nbuf_len(msdu)) & 0x3;
  489. amsdu_pad = amsdu_pad ? (4 - amsdu_pad) : 0;
  490. /* TODO FIXME How do we handle MSDUs that have fraglist - Should
  491. * probably iterate all the frags cloning them along the way and
  492. * and also updating the prev_buf pointer
  493. */
  494. /* Move to the next */
  495. prev_buf = msdu;
  496. msdu_orig = qdf_nbuf_next(msdu_orig);
  497. }
  498. #if 0
  499. /* Add in the trailer section - encryption trailer + FCS */
  500. qdf_nbuf_put_tail(prev_buf, HAL_RX_FCS_LEN);
  501. frag_list_sum_len += HAL_RX_FCS_LEN;
  502. #endif
  503. frag_list_sum_len -= msdu_llc_len;
  504. /* TODO: Convert this to suitable adf routines */
  505. qdf_nbuf_append_ext_list(mpdu_buf, head_frag_list,
  506. frag_list_sum_len);
  507. mpdu_stitch_done:
  508. /* Check if this buffer contains the PPDU end status for TSF */
  509. /* Need revist this code to see where we can get tsf timestamp */
  510. #if 0
  511. /* PPDU end TLV will be retrieved from monitor status ring */
  512. last_mpdu =
  513. (*(((u_int32_t *)&rx_desc->attention)) &
  514. RX_ATTENTION_0_LAST_MPDU_MASK) >>
  515. RX_ATTENTION_0_LAST_MPDU_LSB;
  516. if (last_mpdu)
  517. rx_status->rs_tstamp.tsf = rx_desc->ppdu_end.tsf_timestamp;
  518. #endif
  519. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  520. "%s %d mpdu_buf %pK mpdu_buf->len %u",
  521. __func__, __LINE__,
  522. mpdu_buf, mpdu_buf->len);
  523. return mpdu_buf;
  524. mpdu_stitch_fail:
  525. if ((mpdu_buf) && (decap_format != HAL_HW_RX_DECAP_FORMAT_RAW)) {
  526. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  527. "%s mpdu_stitch_fail mpdu_buf %pK",
  528. __func__, mpdu_buf);
  529. /* Free the head buffer */
  530. qdf_nbuf_free(mpdu_buf);
  531. }
  532. return NULL;
  533. }
  534. /**
  535. * dp_rx_extract_radiotap_info(): Extract and populate information in
  536. * struct mon_rx_status type
  537. * @rx_status: Receive status
  538. * @mon_rx_status: Monitor mode status
  539. *
  540. * Returns: None
  541. */
  542. static inline
  543. void dp_rx_extract_radiotap_info(struct cdp_mon_status *rx_status,
  544. struct mon_rx_status *rx_mon_status)
  545. {
  546. rx_mon_status->tsft = rx_status->cdp_rs_tstamp.cdp_tsf;
  547. rx_mon_status->chan_freq = rx_status->rs_freq;
  548. rx_mon_status->chan_num = rx_status->rs_channel;
  549. rx_mon_status->chan_flags = rx_status->rs_flags;
  550. rx_mon_status->rate = rx_status->rs_datarate;
  551. /* TODO: rx_mon_status->ant_signal_db */
  552. /* TODO: rx_mon_status->nr_ant */
  553. rx_mon_status->mcs = rx_status->cdf_rs_rate_mcs;
  554. rx_mon_status->is_stbc = rx_status->cdp_rs_stbc;
  555. rx_mon_status->sgi = rx_status->cdp_rs_sgi;
  556. /* TODO: rx_mon_status->ldpc */
  557. /* TODO: rx_mon_status->beamformed */
  558. /* TODO: rx_mon_status->vht_flags */
  559. /* TODO: rx_mon_status->vht_flag_values1 */
  560. }
  561. QDF_STATUS dp_rx_mon_deliver(struct dp_soc *soc, uint32_t mac_id,
  562. qdf_nbuf_t head_msdu, qdf_nbuf_t tail_msdu)
  563. {
  564. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  565. struct cdp_mon_status *rs = &pdev->rx_mon_recv_status;
  566. qdf_nbuf_t mon_skb, skb_next;
  567. qdf_nbuf_t mon_mpdu = NULL;
  568. if ((pdev->monitor_vdev == NULL) ||
  569. (pdev->monitor_vdev->osif_rx_mon == NULL)) {
  570. goto mon_deliver_fail;
  571. }
  572. /* restitch mon MPDU for delivery via monitor interface */
  573. mon_mpdu = dp_rx_mon_restitch_mpdu_from_msdus(soc, mac_id, head_msdu,
  574. tail_msdu, rs);
  575. if (mon_mpdu && pdev->monitor_vdev && pdev->monitor_vdev->osif_vdev) {
  576. pdev->ppdu_info.rx_status.ppdu_id =
  577. pdev->ppdu_info.com_info.ppdu_id;
  578. qdf_nbuf_update_radiotap(&(pdev->ppdu_info.rx_status),
  579. mon_mpdu, sizeof(struct rx_pkt_tlvs));
  580. pdev->monitor_vdev->osif_rx_mon(
  581. pdev->monitor_vdev->osif_vdev, mon_mpdu, NULL);
  582. } else {
  583. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  584. "[%s][%d] mon_mpdu=%pK pdev->monitor_vdev %pK osif_vdev %pK",
  585. __func__, __LINE__, mon_mpdu, pdev->monitor_vdev,
  586. pdev->monitor_vdev->osif_vdev);
  587. goto mon_deliver_fail;
  588. }
  589. return QDF_STATUS_SUCCESS;
  590. mon_deliver_fail:
  591. mon_skb = head_msdu;
  592. while (mon_skb) {
  593. skb_next = qdf_nbuf_next(mon_skb);
  594. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  595. "[%s][%d] mon_skb=%pK len %u", __func__,
  596. __LINE__, mon_skb, mon_skb->len);
  597. qdf_nbuf_free(mon_skb);
  598. mon_skb = skb_next;
  599. }
  600. return QDF_STATUS_E_INVAL;
  601. }
  602. /**
  603. * dp_rx_mon_dest_process() - Brain of the Rx processing functionality
  604. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  605. * @soc: core txrx main contex
  606. * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
  607. * @quota: No. of units (packets) that can be serviced in one shot.
  608. *
  609. * This function implements the core of Rx functionality. This is
  610. * expected to handle only non-error frames.
  611. *
  612. * Return: none
  613. */
  614. void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
  615. {
  616. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  617. void *hal_soc;
  618. void *rxdma_dst_ring_desc;
  619. void *mon_dst_srng;
  620. union dp_rx_desc_list_elem_t *head = NULL;
  621. union dp_rx_desc_list_elem_t *tail = NULL;
  622. uint32_t ppdu_id;
  623. uint32_t rx_bufs_used;
  624. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  625. struct cdp_pdev_mon_stats *rx_mon_stats;
  626. mon_dst_srng = pdev->rxdma_mon_dst_ring[mac_for_pdev].hal_srng;
  627. if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
  628. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  629. "%s %d : HAL Monitor Destination Ring Init Failed -- %pK\n",
  630. __func__, __LINE__, mon_dst_srng);
  631. return;
  632. }
  633. hal_soc = soc->hal_soc;
  634. qdf_assert(hal_soc);
  635. qdf_spin_lock_bh(&pdev->mon_lock);
  636. if (pdev->monitor_vdev == NULL) {
  637. qdf_spin_unlock(&pdev->mon_lock);
  638. return;
  639. }
  640. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
  641. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  642. "%s %d : HAL Monitor Destination Ring access Failed -- %pK\n",
  643. __func__, __LINE__, mon_dst_srng);
  644. return;
  645. }
  646. ppdu_id = pdev->ppdu_info.com_info.ppdu_id;
  647. rx_bufs_used = 0;
  648. rx_mon_stats = &pdev->rx_mon_stats;
  649. while (qdf_likely(rxdma_dst_ring_desc =
  650. hal_srng_dst_peek(hal_soc, mon_dst_srng))) {
  651. qdf_nbuf_t head_msdu, tail_msdu;
  652. uint32_t npackets;
  653. head_msdu = (qdf_nbuf_t) NULL;
  654. tail_msdu = (qdf_nbuf_t) NULL;
  655. rx_bufs_used += dp_rx_mon_mpdu_pop(soc, mac_id,
  656. rxdma_dst_ring_desc,
  657. &head_msdu, &tail_msdu,
  658. &npackets, &ppdu_id,
  659. &head, &tail);
  660. if (ppdu_id != pdev->ppdu_info.com_info.ppdu_id) {
  661. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  662. qdf_mem_zero(&(pdev->ppdu_info.rx_status),
  663. sizeof(pdev->ppdu_info.rx_status));
  664. pdev->ppdu_info.com_info.last_ppdu_id =
  665. pdev->ppdu_info.com_info.ppdu_id;
  666. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  667. "%s %d ppdu_id %x != ppdu_info.com_info .ppdu_id %x",
  668. __func__, __LINE__,
  669. ppdu_id, pdev->ppdu_info.com_info.ppdu_id);
  670. break;
  671. }
  672. if (qdf_likely((head_msdu != NULL) && (tail_msdu != NULL))) {
  673. rx_mon_stats->dest_mpdu_done++;
  674. dp_rx_mon_deliver(soc, mac_id, head_msdu, tail_msdu);
  675. }
  676. rxdma_dst_ring_desc = hal_srng_dst_get_next(hal_soc,
  677. mon_dst_srng);
  678. }
  679. hal_srng_access_end(hal_soc, mon_dst_srng);
  680. qdf_spin_unlock_bh(&pdev->mon_lock);
  681. if (rx_bufs_used) {
  682. rx_mon_stats->dest_ppdu_done++;
  683. dp_rx_buffers_replenish(soc, mac_id,
  684. &pdev->rxdma_mon_buf_ring[mac_for_pdev],
  685. &soc->rx_desc_mon[mac_id], rx_bufs_used, &head, &tail);
  686. }
  687. }
  688. #ifndef QCA_WIFI_QCA6390
  689. static QDF_STATUS
  690. dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
  691. uint8_t pdev_id = pdev->pdev_id;
  692. struct dp_soc *soc = pdev->soc;
  693. union dp_rx_desc_list_elem_t *desc_list = NULL;
  694. union dp_rx_desc_list_elem_t *tail = NULL;
  695. struct dp_srng *rxdma_srng;
  696. uint32_t rxdma_entries;
  697. struct rx_desc_pool *rx_desc_pool;
  698. QDF_STATUS status;
  699. uint8_t mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  700. rxdma_srng = &pdev->rxdma_mon_buf_ring[mac_for_pdev];
  701. rxdma_entries = rxdma_srng->alloc_size/hal_srng_get_entrysize(
  702. soc->hal_soc,
  703. RXDMA_MONITOR_BUF);
  704. rx_desc_pool = &soc->rx_desc_mon[mac_id];
  705. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  706. "%s: Mon RX Desc Pool[%d] allocation size=%d"
  707. , __func__, pdev_id, rxdma_entries*3);
  708. status = dp_rx_desc_pool_alloc(soc, mac_id,
  709. rxdma_entries*3, rx_desc_pool);
  710. if (!QDF_IS_STATUS_SUCCESS(status)) {
  711. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  712. "%s: dp_rx_desc_pool_alloc() failed \n", __func__);
  713. return status;
  714. }
  715. rx_desc_pool->owner = HAL_RX_BUF_RBM_SW3_BM;
  716. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
  717. "%s: Mon RX Buffers Replenish pdev_id=%d",
  718. __func__, pdev_id);
  719. status = dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
  720. rxdma_entries, &desc_list, &tail);
  721. if (!QDF_IS_STATUS_SUCCESS(status)) {
  722. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  723. "%s: dp_rx_buffers_replenish() failed",
  724. __func__);
  725. return status;
  726. }
  727. return QDF_STATUS_SUCCESS;
  728. }
  729. static QDF_STATUS
  730. dp_rx_pdev_mon_buf_detach(struct dp_pdev *pdev, int mac_id)
  731. {
  732. struct dp_soc *soc = pdev->soc;
  733. struct rx_desc_pool *rx_desc_pool;
  734. rx_desc_pool = &soc->rx_desc_mon[mac_id];
  735. if (rx_desc_pool->pool_size != 0)
  736. dp_rx_desc_pool_free(soc, mac_id, rx_desc_pool);
  737. return QDF_STATUS_SUCCESS;
  738. }
  739. /*
  740. * Allocate and setup link descriptor pool that will be used by HW for
  741. * various link and queue descriptors and managed by WBM
  742. */
  743. static
  744. QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
  745. {
  746. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  747. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  748. int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
  749. int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
  750. uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
  751. uint32_t total_link_descs, total_mem_size;
  752. uint32_t num_link_desc_banks;
  753. uint32_t last_bank_size = 0;
  754. uint32_t entry_size, num_entries;
  755. void *mon_desc_srng;
  756. uint32_t num_replenish_buf;
  757. struct dp_srng *dp_srng;
  758. int i;
  759. dp_srng = &dp_pdev->rxdma_mon_desc_ring[mac_for_pdev];
  760. num_entries = dp_srng->alloc_size/hal_srng_get_entrysize(
  761. soc->hal_soc, RXDMA_MONITOR_DESC);
  762. /* Round up to power of 2 */
  763. total_link_descs = 1;
  764. while (total_link_descs < num_entries)
  765. total_link_descs <<= 1;
  766. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
  767. "%s: total_link_descs: %u, link_desc_size: %d\n",
  768. __func__, total_link_descs, link_desc_size);
  769. total_mem_size = total_link_descs * link_desc_size;
  770. total_mem_size += link_desc_align;
  771. if (total_mem_size <= max_alloc_size) {
  772. num_link_desc_banks = 0;
  773. last_bank_size = total_mem_size;
  774. } else {
  775. num_link_desc_banks = (total_mem_size) /
  776. (max_alloc_size - link_desc_align);
  777. last_bank_size = total_mem_size %
  778. (max_alloc_size - link_desc_align);
  779. }
  780. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  781. "%s: total_mem_size: %d, num_link_desc_banks: %u, \
  782. max_alloc_size: %d last_bank_size: %d\n",
  783. __func__, total_mem_size, num_link_desc_banks, max_alloc_size,
  784. last_bank_size);
  785. for (i = 0; i < num_link_desc_banks; i++) {
  786. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned =
  787. qdf_mem_alloc_consistent(soc->osdev, soc->osdev->dev,
  788. max_alloc_size,
  789. &(dp_pdev->link_desc_banks[mac_for_pdev][i].
  790. base_paddr_unaligned));
  791. if (!dp_pdev->link_desc_banks[mac_for_pdev][i].
  792. base_vaddr_unaligned) {
  793. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  794. "%s: Link desc memory allocation failed\n",
  795. __func__);
  796. goto fail;
  797. }
  798. dp_pdev->link_desc_banks[mac_for_pdev][i].size = max_alloc_size;
  799. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr =
  800. (void *)((unsigned long)
  801. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  802. base_vaddr_unaligned) +
  803. ((unsigned long)
  804. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  805. base_vaddr_unaligned) %
  806. link_desc_align));
  807. dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr =
  808. (unsigned long)
  809. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  810. base_paddr_unaligned) +
  811. ((unsigned long)
  812. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) -
  813. (unsigned long)
  814. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  815. base_vaddr_unaligned));
  816. }
  817. if (last_bank_size) {
  818. /* Allocate last bank in case total memory required is not exact
  819. * multiple of max_alloc_size
  820. */
  821. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr_unaligned =
  822. qdf_mem_alloc_consistent(soc->osdev,
  823. soc->osdev->dev, last_bank_size,
  824. &(dp_pdev->link_desc_banks[mac_for_pdev][i].
  825. base_paddr_unaligned));
  826. if (dp_pdev->link_desc_banks[mac_for_pdev][i].
  827. base_vaddr_unaligned == NULL) {
  828. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  829. "%s: allocation failed for mon link desc pool\n",
  830. __func__);
  831. goto fail;
  832. }
  833. dp_pdev->link_desc_banks[mac_for_pdev][i].size = last_bank_size;
  834. dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr =
  835. (void *)((unsigned long)
  836. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  837. base_vaddr_unaligned) +
  838. ((unsigned long)
  839. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  840. base_vaddr_unaligned) %
  841. link_desc_align));
  842. dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr =
  843. (unsigned long)
  844. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  845. base_paddr_unaligned) +
  846. ((unsigned long)
  847. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) -
  848. (unsigned long)
  849. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  850. base_vaddr_unaligned));
  851. }
  852. /* Allocate and setup link descriptor idle list for HW internal use */
  853. entry_size = hal_srng_get_entrysize(soc->hal_soc, RXDMA_MONITOR_DESC);
  854. total_mem_size = entry_size * total_link_descs;
  855. mon_desc_srng = dp_pdev->rxdma_mon_desc_ring[mac_for_pdev].hal_srng;
  856. num_replenish_buf = 0;
  857. if (total_mem_size <= max_alloc_size) {
  858. void *desc;
  859. for (i = 0;
  860. i < MAX_MON_LINK_DESC_BANKS &&
  861. dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr;
  862. i++) {
  863. uint32_t num_entries =
  864. (dp_pdev->link_desc_banks[mac_for_pdev][i].size -
  865. (unsigned long)
  866. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr) -
  867. (unsigned long)
  868. (dp_pdev->link_desc_banks[mac_for_pdev][i].
  869. base_vaddr_unaligned)) / link_desc_size;
  870. unsigned long paddr =
  871. (unsigned long)
  872. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_paddr);
  873. unsigned long vaddr =
  874. (unsigned long)
  875. (dp_pdev->link_desc_banks[mac_for_pdev][i].base_vaddr);
  876. hal_srng_access_start_unlocked(soc->hal_soc,
  877. mon_desc_srng);
  878. while (num_entries && (desc =
  879. hal_srng_src_get_next(soc->hal_soc,
  880. mon_desc_srng))) {
  881. hal_set_link_desc_addr(desc, i, paddr);
  882. num_entries--;
  883. num_replenish_buf++;
  884. paddr += link_desc_size;
  885. vaddr += link_desc_size;
  886. }
  887. hal_srng_access_end_unlocked(soc->hal_soc,
  888. mon_desc_srng);
  889. }
  890. } else {
  891. qdf_assert(0);
  892. }
  893. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  894. "%s: successfully replenished %d buffer\n",
  895. __func__, num_replenish_buf);
  896. return QDF_STATUS_SUCCESS;
  897. fail:
  898. for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) {
  899. if (dp_pdev->link_desc_banks[mac_for_pdev][i].
  900. base_vaddr_unaligned) {
  901. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  902. dp_pdev->link_desc_banks[mac_for_pdev][i].size,
  903. dp_pdev->link_desc_banks[mac_for_pdev][i].
  904. base_vaddr_unaligned,
  905. dp_pdev->link_desc_banks[mac_for_pdev][i].
  906. base_paddr_unaligned, 0);
  907. dp_pdev->link_desc_banks[mac_for_pdev][i].
  908. base_vaddr_unaligned = NULL;
  909. }
  910. }
  911. return QDF_STATUS_E_FAILURE;
  912. }
  913. /*
  914. * Free link descriptor pool that was setup HW
  915. */
  916. static
  917. void dp_mon_link_desc_pool_cleanup(struct dp_soc *soc, uint32_t mac_id)
  918. {
  919. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  920. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  921. int i;
  922. for (i = 0; i < MAX_MON_LINK_DESC_BANKS; i++) {
  923. if (dp_pdev->link_desc_banks[mac_for_pdev][i].
  924. base_vaddr_unaligned) {
  925. qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
  926. dp_pdev->link_desc_banks[mac_for_pdev][i].size,
  927. dp_pdev->link_desc_banks[mac_for_pdev][i].
  928. base_vaddr_unaligned,
  929. dp_pdev->link_desc_banks[mac_for_pdev][i].
  930. base_paddr_unaligned, 0);
  931. dp_pdev->link_desc_banks[mac_for_pdev][i].
  932. base_vaddr_unaligned = NULL;
  933. }
  934. }
  935. }
  936. /**
  937. * dp_rx_pdev_mon_attach() - attach DP RX for monitor mode
  938. * @pdev: core txrx pdev context
  939. *
  940. * This function will attach a DP RX for monitor mode instance into
  941. * the main device (SOC) context. Will allocate dp rx resource and
  942. * initialize resources.
  943. *
  944. * Return: QDF_STATUS_SUCCESS: success
  945. * QDF_STATUS_E_RESOURCES: Error return
  946. */
  947. QDF_STATUS
  948. dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
  949. struct dp_soc *soc = pdev->soc;
  950. QDF_STATUS status;
  951. uint8_t pdev_id = pdev->pdev_id;
  952. int mac_id;
  953. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
  954. "%s: pdev attach id=%d\n", __func__, pdev_id);
  955. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  956. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  957. status = dp_rx_pdev_mon_buf_attach(pdev, mac_for_pdev);
  958. if (!QDF_IS_STATUS_SUCCESS(status)) {
  959. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  960. "%s: dp_rx_pdev_mon_buf_attach() failed\n",
  961. __func__);
  962. return status;
  963. }
  964. status = dp_rx_pdev_mon_status_attach(pdev, mac_for_pdev);
  965. if (!QDF_IS_STATUS_SUCCESS(status)) {
  966. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  967. "%s: dp_rx_pdev_mon_status_attach() failed\n",
  968. __func__);
  969. return status;
  970. }
  971. status = dp_mon_link_desc_pool_setup(soc, mac_for_pdev);
  972. if (!QDF_IS_STATUS_SUCCESS(status)) {
  973. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  974. "%s: dp_mon_link_desc_pool_setup() failed\n",
  975. __func__);
  976. return status;
  977. }
  978. }
  979. qdf_spinlock_create(&pdev->mon_lock);
  980. return QDF_STATUS_SUCCESS;
  981. }
  982. /**
  983. * dp_rx_pdev_mon_detach() - detach dp rx for monitor mode
  984. * @pdev: core txrx pdev context
  985. *
  986. * This function will detach DP RX for monitor mode from
  987. * main device context. will free DP Rx resources for
  988. * monitor mode
  989. *
  990. * Return: QDF_STATUS_SUCCESS: success
  991. * QDF_STATUS_E_RESOURCES: Error return
  992. */
  993. QDF_STATUS
  994. dp_rx_pdev_mon_detach(struct dp_pdev *pdev) {
  995. uint8_t pdev_id = pdev->pdev_id;
  996. struct dp_soc *soc = pdev->soc;
  997. int mac_id;
  998. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  999. int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
  1000. qdf_spinlock_destroy(&pdev->mon_lock);
  1001. dp_mon_link_desc_pool_cleanup(soc, mac_for_pdev);
  1002. dp_rx_pdev_mon_status_detach(pdev, mac_for_pdev);
  1003. dp_rx_pdev_mon_buf_detach(pdev, mac_for_pdev);
  1004. }
  1005. return QDF_STATUS_SUCCESS;
  1006. }
  1007. #endif