dp_rx_mon_status.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275
  1. /*
  2. * Copyright (c) 2017-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "hal_hw_headers.h"
  19. #include "dp_types.h"
  20. #include "dp_rx.h"
  21. #include "dp_peer.h"
  22. #include "hal_rx.h"
  23. #include "hal_api.h"
  24. #include "qdf_trace.h"
  25. #include "qdf_nbuf.h"
  26. #include "hal_api_mon.h"
  27. #include "dp_rx_mon.h"
  28. #include "dp_internal.h"
  29. #include "qdf_mem.h" /* qdf_mem_malloc,free */
  30. #ifdef FEATURE_PERPKT_INFO
  31. #include "dp_ratetable.h"
  32. #endif
  33. #ifdef WLAN_RX_PKT_CAPTURE_ENH
  34. #include "dp_rx_mon_feature.h"
  35. #else
  36. static QDF_STATUS
  37. dp_rx_handle_enh_capture(struct dp_soc *soc, struct dp_pdev *pdev,
  38. struct hal_rx_ppdu_info *ppdu_info)
  39. {
  40. return QDF_STATUS_SUCCESS;
  41. }
  42. static void
  43. dp_rx_mon_enh_capture_process(struct dp_pdev *pdev, uint32_t tlv_status,
  44. qdf_nbuf_t status_nbuf,
  45. struct hal_rx_ppdu_info *ppdu_info,
  46. bool *nbuf_used)
  47. {
  48. }
  49. #endif
  50. #ifdef FEATURE_PERPKT_INFO
  51. static inline void
  52. dp_rx_populate_rx_rssi_chain(struct hal_rx_ppdu_info *ppdu_info,
  53. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  54. {
  55. uint8_t chain, bw;
  56. int8_t rssi;
  57. for (chain = 0; chain < SS_COUNT; chain++) {
  58. for (bw = 0; bw < MAX_BW; bw++) {
  59. rssi = ppdu_info->rx_status.rssi_chain[chain][bw];
  60. if (rssi != DP_RSSI_INVAL)
  61. cdp_rx_ppdu->rssi_chain[chain][bw] = rssi;
  62. else
  63. cdp_rx_ppdu->rssi_chain[chain][bw] = 0;
  64. }
  65. }
  66. }
  67. /*
  68. * dp_rx_populate_su_evm_details() - Populate su evm info
  69. * @ppdu_info: ppdu info structure from ppdu ring
  70. * @cdp_rx_ppdu: rx ppdu indication structure
  71. */
  72. static inline void
  73. dp_rx_populate_su_evm_details(struct hal_rx_ppdu_info *ppdu_info,
  74. struct cdp_rx_indication_ppdu *cdp_rx_ppdu)
  75. {
  76. uint8_t pilot_evm;
  77. uint8_t nss_count;
  78. uint8_t pilot_count;
  79. nss_count = ppdu_info->evm_info.nss_count;
  80. pilot_count = ppdu_info->evm_info.pilot_count;
  81. if ((nss_count * pilot_count) > DP_RX_MAX_SU_EVM_COUNT) {
  82. qdf_err("pilot evm count is more than expected");
  83. return;
  84. }
  85. cdp_rx_ppdu->evm_info.pilot_count = pilot_count;
  86. cdp_rx_ppdu->evm_info.nss_count = nss_count;
  87. /* Populate evm for pilot_evm = nss_count*pilot_count */
  88. for (pilot_evm = 0; pilot_evm < nss_count * pilot_count; pilot_evm++) {
  89. cdp_rx_ppdu->evm_info.pilot_evm[pilot_evm] =
  90. ppdu_info->evm_info.pilot_evm[pilot_evm];
  91. }
  92. }
  93. /**
  94. * dp_rx_populate_cdp_indication_ppdu() - Populate cdp rx indication structure
  95. * @pdev: pdev ctx
  96. * @ppdu_info: ppdu info structure from ppdu ring
  97. * @ppdu_nbuf: qdf nbuf abstraction for linux skb
  98. *
  99. * Return: none
  100. */
  101. static inline void
  102. dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
  103. struct hal_rx_ppdu_info *ppdu_info,
  104. qdf_nbuf_t ppdu_nbuf)
  105. {
  106. struct dp_peer *peer;
  107. struct dp_soc *soc = pdev->soc;
  108. struct dp_ast_entry *ast_entry;
  109. struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
  110. uint32_t ast_index;
  111. cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
  112. cdp_rx_ppdu->first_data_seq_ctrl =
  113. ppdu_info->rx_status.first_data_seq_ctrl;
  114. cdp_rx_ppdu->frame_ctrl =
  115. ppdu_info->rx_status.frame_control;
  116. cdp_rx_ppdu->ppdu_id = ppdu_info->com_info.ppdu_id;
  117. cdp_rx_ppdu->length = ppdu_info->rx_status.ppdu_len;
  118. cdp_rx_ppdu->duration = ppdu_info->rx_status.duration;
  119. cdp_rx_ppdu->u.bw = ppdu_info->rx_status.bw;
  120. cdp_rx_ppdu->tcp_msdu_count = ppdu_info->rx_status.tcp_msdu_count;
  121. cdp_rx_ppdu->udp_msdu_count = ppdu_info->rx_status.udp_msdu_count;
  122. cdp_rx_ppdu->other_msdu_count = ppdu_info->rx_status.other_msdu_count;
  123. cdp_rx_ppdu->u.nss = ppdu_info->rx_status.nss;
  124. cdp_rx_ppdu->u.mcs = ppdu_info->rx_status.mcs;
  125. if ((ppdu_info->rx_status.sgi == VHT_SGI_NYSM) &&
  126. (ppdu_info->rx_status.preamble_type == HAL_RX_PKT_TYPE_11AC))
  127. cdp_rx_ppdu->u.gi = CDP_SGI_0_4_US;
  128. else
  129. cdp_rx_ppdu->u.gi = ppdu_info->rx_status.sgi;
  130. cdp_rx_ppdu->u.ldpc = ppdu_info->rx_status.ldpc;
  131. cdp_rx_ppdu->u.preamble = ppdu_info->rx_status.preamble_type;
  132. cdp_rx_ppdu->u.ppdu_type = ppdu_info->rx_status.reception_type;
  133. cdp_rx_ppdu->u.ltf_size = (ppdu_info->rx_status.he_data5 >>
  134. QDF_MON_STATUS_HE_LTF_SIZE_SHIFT) & 0x3;
  135. cdp_rx_ppdu->num_mpdu = ppdu_info->com_info.mpdu_cnt_fcs_ok;
  136. cdp_rx_ppdu->rssi = ppdu_info->rx_status.rssi_comb;
  137. cdp_rx_ppdu->timestamp = ppdu_info->rx_status.tsft;
  138. cdp_rx_ppdu->channel = ppdu_info->rx_status.chan_num;
  139. cdp_rx_ppdu->beamformed = ppdu_info->rx_status.beamformed;
  140. cdp_rx_ppdu->num_msdu = (cdp_rx_ppdu->tcp_msdu_count +
  141. cdp_rx_ppdu->udp_msdu_count +
  142. cdp_rx_ppdu->other_msdu_count);
  143. cdp_rx_ppdu->num_bytes = ppdu_info->rx_status.ppdu_len;
  144. cdp_rx_ppdu->retries = CDP_FC_IS_RETRY_SET(cdp_rx_ppdu->frame_ctrl) ?
  145. ppdu_info->com_info.mpdu_cnt_fcs_ok : 0;
  146. if (ppdu_info->com_info.mpdu_cnt_fcs_ok > 1)
  147. cdp_rx_ppdu->is_ampdu = 1;
  148. else
  149. cdp_rx_ppdu->is_ampdu = 0;
  150. cdp_rx_ppdu->tid = ppdu_info->rx_status.tid;
  151. cdp_rx_ppdu->lsig_a = ppdu_info->rx_status.rate;
  152. ast_index = ppdu_info->rx_status.ast_index;
  153. if (ast_index >= wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
  154. cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
  155. return;
  156. }
  157. ast_entry = soc->ast_table[ast_index];
  158. if (!ast_entry) {
  159. cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
  160. return;
  161. }
  162. peer = ast_entry->peer;
  163. if (!peer || peer->peer_ids[0] == HTT_INVALID_PEER) {
  164. cdp_rx_ppdu->peer_id = HTT_INVALID_PEER;
  165. return;
  166. }
  167. qdf_mem_copy(cdp_rx_ppdu->mac_addr,
  168. peer->mac_addr.raw, QDF_MAC_ADDR_SIZE);
  169. cdp_rx_ppdu->peer_id = peer->peer_ids[0];
  170. cdp_rx_ppdu->vdev_id = peer->vdev->vdev_id;
  171. cdp_rx_ppdu->u.ltf_size = ppdu_info->rx_status.ltf_size;
  172. dp_rx_populate_rx_rssi_chain(ppdu_info, cdp_rx_ppdu);
  173. dp_rx_populate_su_evm_details(ppdu_info, cdp_rx_ppdu);
  174. cdp_rx_ppdu->rx_antenna = ppdu_info->rx_status.rx_antenna;
  175. }
  176. #else
  177. static inline void
  178. dp_rx_populate_cdp_indication_ppdu(struct dp_pdev *pdev,
  179. struct hal_rx_ppdu_info *ppdu_info,
  180. qdf_nbuf_t ppdu_nbuf)
  181. {
  182. }
  183. #endif
  184. /**
  185. * dp_rx_stats_update() - Update per-peer statistics
  186. * @soc: Datapath SOC handle
  187. * @peer: Datapath peer handle
  188. * @ppdu: PPDU Descriptor
  189. *
  190. * Return: None
  191. */
  192. #ifdef FEATURE_PERPKT_INFO
  193. static inline void dp_rx_rate_stats_update(struct dp_peer *peer,
  194. struct cdp_rx_indication_ppdu *ppdu)
  195. {
  196. uint32_t ratekbps = 0;
  197. uint32_t ppdu_rx_rate = 0;
  198. uint32_t nss = 0;
  199. uint32_t rix;
  200. if (!peer || !ppdu)
  201. return;
  202. if (ppdu->u.nss == 0)
  203. nss = 0;
  204. else
  205. nss = ppdu->u.nss - 1;
  206. ratekbps = dp_getrateindex(ppdu->u.gi,
  207. ppdu->u.mcs,
  208. nss,
  209. ppdu->u.preamble,
  210. ppdu->u.bw,
  211. &rix);
  212. if (!ratekbps)
  213. return;
  214. ppdu->rix = rix;
  215. DP_STATS_UPD(peer, rx.last_rx_rate, ratekbps);
  216. dp_ath_rate_lpf(peer->stats.rx.avg_rx_rate, ratekbps);
  217. ppdu_rx_rate = dp_ath_rate_out(peer->stats.rx.avg_rx_rate);
  218. DP_STATS_UPD(peer, rx.rnd_avg_rx_rate, ppdu_rx_rate);
  219. ppdu->rx_ratekbps = ratekbps;
  220. ppdu->rx_ratecode = CDP_TXRX_RATECODE(ppdu->u.mcs,
  221. nss,
  222. ppdu->u.preamble);
  223. if (peer->vdev)
  224. peer->vdev->stats.rx.last_rx_rate = ratekbps;
  225. }
  226. static void dp_rx_stats_update(struct dp_pdev *pdev, struct dp_peer *peer,
  227. struct cdp_rx_indication_ppdu *ppdu)
  228. {
  229. struct dp_soc *soc = NULL;
  230. uint8_t mcs, preamble, ac = 0;
  231. uint16_t num_msdu;
  232. bool is_invalid_peer = false;
  233. mcs = ppdu->u.mcs;
  234. preamble = ppdu->u.preamble;
  235. num_msdu = ppdu->num_msdu;
  236. if (pdev)
  237. soc = pdev->soc;
  238. else
  239. return;
  240. if (!peer) {
  241. is_invalid_peer = true;
  242. peer = pdev->invalid_peer;
  243. }
  244. if (!soc || soc->process_rx_status)
  245. return;
  246. DP_STATS_UPD(peer, rx.rssi, ppdu->rssi);
  247. if (peer->stats.rx.avg_rssi == INVALID_RSSI)
  248. peer->stats.rx.avg_rssi = ppdu->rssi;
  249. else
  250. peer->stats.rx.avg_rssi =
  251. DP_GET_AVG_RSSI(peer->stats.rx.avg_rssi, ppdu->rssi);
  252. if ((preamble == DOT11_A) || (preamble == DOT11_B))
  253. ppdu->u.nss = 1;
  254. if (ppdu->u.nss)
  255. DP_STATS_INC(peer, rx.nss[ppdu->u.nss - 1], num_msdu);
  256. DP_STATS_INC(peer, rx.sgi_count[ppdu->u.gi], num_msdu);
  257. DP_STATS_INC(peer, rx.bw[ppdu->u.bw], num_msdu);
  258. DP_STATS_INC(peer, rx.reception_type[ppdu->u.ppdu_type], num_msdu);
  259. DP_STATS_INCC(peer, rx.ampdu_cnt, num_msdu, ppdu->is_ampdu);
  260. DP_STATS_INCC(peer, rx.non_ampdu_cnt, num_msdu, !(ppdu->is_ampdu));
  261. DP_STATS_UPD(peer, rx.rx_rate, mcs);
  262. DP_STATS_INCC(peer,
  263. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  264. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_A)));
  265. DP_STATS_INCC(peer,
  266. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  267. ((mcs < MAX_MCS_11A) && (preamble == DOT11_A)));
  268. DP_STATS_INCC(peer,
  269. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  270. ((mcs >= MAX_MCS_11B) && (preamble == DOT11_B)));
  271. DP_STATS_INCC(peer,
  272. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  273. ((mcs < MAX_MCS_11B) && (preamble == DOT11_B)));
  274. DP_STATS_INCC(peer,
  275. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  276. ((mcs >= MAX_MCS_11A) && (preamble == DOT11_N)));
  277. DP_STATS_INCC(peer,
  278. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  279. ((mcs < MAX_MCS_11A) && (preamble == DOT11_N)));
  280. DP_STATS_INCC(peer,
  281. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  282. ((mcs >= MAX_MCS_11AC) && (preamble == DOT11_AC)));
  283. DP_STATS_INCC(peer,
  284. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  285. ((mcs < MAX_MCS_11AC) && (preamble == DOT11_AC)));
  286. DP_STATS_INCC(peer,
  287. rx.pkt_type[preamble].mcs_count[MAX_MCS - 1], num_msdu,
  288. ((mcs >= (MAX_MCS - 1)) && (preamble == DOT11_AX)));
  289. DP_STATS_INCC(peer,
  290. rx.pkt_type[preamble].mcs_count[mcs], num_msdu,
  291. ((mcs < (MAX_MCS - 1)) && (preamble == DOT11_AX)));
  292. /*
  293. * If invalid TID, it could be a non-qos frame, hence do not update
  294. * any AC counters
  295. */
  296. ac = TID_TO_WME_AC(ppdu->tid);
  297. if (ppdu->tid != HAL_TID_INVALID)
  298. DP_STATS_INC(peer, rx.wme_ac_type[ac], num_msdu);
  299. dp_peer_stats_notify(peer);
  300. DP_STATS_UPD(peer, rx.last_rssi, ppdu->rssi);
  301. if (is_invalid_peer)
  302. return;
  303. if (dp_is_subtype_data(ppdu->frame_ctrl))
  304. dp_rx_rate_stats_update(peer, ppdu);
  305. #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
  306. dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, pdev->soc,
  307. &peer->stats, ppdu->peer_id,
  308. UPDATE_PEER_STATS, pdev->pdev_id);
  309. #endif
  310. }
  311. #endif
  312. /*
  313. * dp_rx_get_fcs_ok_msdu() - get ppdu status buffer containing fcs_ok msdu
  314. * @pdev: pdev object
  315. * @ppdu_info: ppdu info object
  316. *
  317. * Return: nbuf
  318. */
  319. static inline qdf_nbuf_t
  320. dp_rx_get_fcs_ok_msdu(struct dp_pdev *pdev,
  321. struct hal_rx_ppdu_info *ppdu_info)
  322. {
  323. uint16_t mpdu_fcs_ok;
  324. qdf_nbuf_t status_nbuf = NULL;
  325. unsigned long int fcs_ok_bitmap;
  326. /* If fcs_ok_bitmap is zero, no need to procees further */
  327. if (qdf_unlikely(!ppdu_info->com_info.mpdu_fcs_ok_bitmap))
  328. return NULL;
  329. /* Obtain fcs_ok passed index from bitmap
  330. * this index is used to get fcs passed first msdu payload
  331. */
  332. fcs_ok_bitmap = ppdu_info->com_info.mpdu_fcs_ok_bitmap;
  333. mpdu_fcs_ok = qdf_find_first_bit(&fcs_ok_bitmap, HAL_RX_MAX_MPDU);
  334. /* Get status buffer by indexing mpdu_fcs_ok index
  335. * containing first msdu payload with fcs passed
  336. * and clone the buffer
  337. */
  338. status_nbuf = ppdu_info->ppdu_msdu_info[mpdu_fcs_ok].nbuf;
  339. /* Take ref of status nbuf as this nbuf is to be
  340. * freeed by upper layer.
  341. */
  342. qdf_nbuf_ref(status_nbuf);
  343. /* Free the ppdu status buffer queue */
  344. qdf_nbuf_queue_free(&pdev->rx_ppdu_buf_q);
  345. return status_nbuf;
  346. }
  347. static inline void
  348. dp_rx_handle_ppdu_status_buf(struct dp_pdev *pdev,
  349. struct hal_rx_ppdu_info *ppdu_info,
  350. qdf_nbuf_t status_nbuf)
  351. {
  352. qdf_nbuf_queue_add(&pdev->rx_ppdu_buf_q, status_nbuf);
  353. }
  354. /**
  355. * dp_rx_handle_mcopy_mode() - Allocate and deliver first MSDU payload
  356. * @soc: core txrx main context
  357. * @pdev: pdev strcuture
  358. * @ppdu_info: structure for rx ppdu ring
  359. *
  360. * Return: QDF_STATUS_SUCCESS - If nbuf to be freed by caller
  361. * QDF_STATUS_E_ALREADY - If nbuf not to be freed by caller
  362. */
  363. #ifdef FEATURE_PERPKT_INFO
  364. static inline QDF_STATUS
  365. dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  366. struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
  367. {
  368. uint8_t size = 0;
  369. struct ieee80211_frame *wh;
  370. uint32_t *nbuf_data;
  371. if (!ppdu_info->fcs_ok_msdu_info.first_msdu_payload)
  372. return QDF_STATUS_SUCCESS;
  373. if (pdev->m_copy_id.rx_ppdu_id == ppdu_info->com_info.ppdu_id)
  374. return QDF_STATUS_SUCCESS;
  375. pdev->m_copy_id.rx_ppdu_id = ppdu_info->com_info.ppdu_id;
  376. wh = (struct ieee80211_frame *)
  377. (ppdu_info->fcs_ok_msdu_info.first_msdu_payload + 4);
  378. size = (ppdu_info->fcs_ok_msdu_info.first_msdu_payload -
  379. qdf_nbuf_data(nbuf));
  380. ppdu_info->fcs_ok_msdu_info.first_msdu_payload = NULL;
  381. if (qdf_nbuf_pull_head(nbuf, size) == NULL)
  382. return QDF_STATUS_SUCCESS;
  383. if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
  384. IEEE80211_FC0_TYPE_MGT) ||
  385. ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
  386. IEEE80211_FC0_TYPE_CTL)) {
  387. return QDF_STATUS_SUCCESS;
  388. }
  389. nbuf_data = (uint32_t *)qdf_nbuf_data(nbuf);
  390. *nbuf_data = pdev->ppdu_info.com_info.ppdu_id;
  391. /* only retain RX MSDU payload in the skb */
  392. qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
  393. ppdu_info->fcs_ok_msdu_info.payload_len);
  394. dp_wdi_event_handler(WDI_EVENT_RX_DATA, soc,
  395. nbuf, HTT_INVALID_PEER, WDI_NO_VAL, pdev->pdev_id);
  396. return QDF_STATUS_E_ALREADY;
  397. }
  398. #else
  399. static inline QDF_STATUS
  400. dp_rx_handle_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  401. struct hal_rx_ppdu_info *ppdu_info, qdf_nbuf_t nbuf)
  402. {
  403. return QDF_STATUS_SUCCESS;
  404. }
  405. #endif
  406. #ifdef FEATURE_PERPKT_INFO
  407. static inline void
  408. dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  409. struct hal_rx_ppdu_info *ppdu_info,
  410. uint32_t tlv_status,
  411. qdf_nbuf_t status_nbuf)
  412. {
  413. QDF_STATUS mcopy_status;
  414. if (qdf_unlikely(!ppdu_info->com_info.mpdu_cnt)) {
  415. qdf_nbuf_free(status_nbuf);
  416. return;
  417. }
  418. /* Add buffers to queue until we receive
  419. * HAL_TLV_STATUS_PPDU_DONE
  420. */
  421. dp_rx_handle_ppdu_status_buf(pdev, ppdu_info, status_nbuf);
  422. /* If tlv_status is PPDU_DONE, process rx_ppdu_buf_q
  423. * and devliver fcs_ok msdu buffer
  424. */
  425. if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
  426. /* Get rx ppdu status buffer having fcs ok msdu */
  427. status_nbuf = dp_rx_get_fcs_ok_msdu(pdev, ppdu_info);
  428. if (status_nbuf) {
  429. mcopy_status = dp_rx_handle_mcopy_mode(soc, pdev,
  430. ppdu_info,
  431. status_nbuf);
  432. if (mcopy_status == QDF_STATUS_SUCCESS)
  433. qdf_nbuf_free(status_nbuf);
  434. }
  435. }
  436. }
  437. #else
  438. static inline void
  439. dp_rx_process_mcopy_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  440. struct hal_rx_ppdu_info *ppdu_info,
  441. uint32_t tlv_status,
  442. qdf_nbuf_t status_nbuf)
  443. {
  444. }
  445. #endif
  446. /**
  447. * dp_rx_handle_smart_mesh_mode() - Deliver header for smart mesh
  448. * @soc: Datapath SOC handle
  449. * @pdev: Datapath PDEV handle
  450. * @ppdu_info: Structure for rx ppdu info
  451. * @nbuf: Qdf nbuf abstraction for linux skb
  452. *
  453. * Return: 0 on success, 1 on failure
  454. */
  455. static inline int
  456. dp_rx_handle_smart_mesh_mode(struct dp_soc *soc, struct dp_pdev *pdev,
  457. struct hal_rx_ppdu_info *ppdu_info,
  458. qdf_nbuf_t nbuf)
  459. {
  460. uint8_t size = 0;
  461. if (!pdev->monitor_vdev) {
  462. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  463. "[%s]:[%d] Monitor vdev is NULL !!",
  464. __func__, __LINE__);
  465. return 1;
  466. }
  467. if (!ppdu_info->msdu_info.first_msdu_payload) {
  468. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  469. "[%s]:[%d] First msdu payload not present",
  470. __func__, __LINE__);
  471. return 1;
  472. }
  473. /* Adding 4 bytes to get to start of 802.11 frame after phy_ppdu_id */
  474. size = (ppdu_info->msdu_info.first_msdu_payload -
  475. qdf_nbuf_data(nbuf)) + 4;
  476. ppdu_info->msdu_info.first_msdu_payload = NULL;
  477. if (qdf_nbuf_pull_head(nbuf, size) == NULL) {
  478. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  479. "[%s]:[%d] No header present",
  480. __func__, __LINE__);
  481. return 1;
  482. }
  483. /* Only retain RX MSDU payload in the skb */
  484. qdf_nbuf_trim_tail(nbuf, qdf_nbuf_len(nbuf) -
  485. ppdu_info->msdu_info.payload_len);
  486. if (!qdf_nbuf_update_radiotap(&pdev->ppdu_info.rx_status, nbuf,
  487. qdf_nbuf_headroom(nbuf))) {
  488. DP_STATS_INC(pdev, dropped.mon_radiotap_update_err, 1);
  489. return 1;
  490. }
  491. pdev->monitor_vdev->osif_rx_mon(pdev->monitor_vdev->osif_vdev,
  492. nbuf, NULL);
  493. pdev->ppdu_info.rx_status.monitor_direct_used = 0;
  494. return 0;
  495. }
  496. /**
  497. * dp_rx_handle_ppdu_stats() - Allocate and deliver ppdu stats to cdp layer
  498. * @soc: core txrx main context
  499. * @pdev: pdev strcuture
  500. * @ppdu_info: structure for rx ppdu ring
  501. *
  502. * Return: none
  503. */
  504. #ifdef FEATURE_PERPKT_INFO
  505. static inline void
  506. dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
  507. struct hal_rx_ppdu_info *ppdu_info)
  508. {
  509. qdf_nbuf_t ppdu_nbuf;
  510. struct dp_peer *peer;
  511. struct cdp_rx_indication_ppdu *cdp_rx_ppdu;
  512. /*
  513. * Do not allocate if fcs error,
  514. * ast idx invalid / fctl invalid
  515. */
  516. if (ppdu_info->com_info.mpdu_cnt_fcs_ok == 0)
  517. return;
  518. if (ppdu_info->nac_info.fc_valid &&
  519. ppdu_info->nac_info.to_ds_flag &&
  520. ppdu_info->nac_info.mac_addr2_valid) {
  521. struct dp_neighbour_peer *peer = NULL;
  522. uint8_t rssi = ppdu_info->rx_status.rssi_comb;
  523. qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
  524. if (pdev->neighbour_peers_added) {
  525. TAILQ_FOREACH(peer, &pdev->neighbour_peers_list,
  526. neighbour_peer_list_elem) {
  527. if (!qdf_mem_cmp(&peer->neighbour_peers_macaddr,
  528. &ppdu_info->nac_info.mac_addr2,
  529. QDF_MAC_ADDR_SIZE)) {
  530. peer->rssi = rssi;
  531. break;
  532. }
  533. }
  534. }
  535. qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
  536. }
  537. /* need not generate wdi event when mcopy and
  538. * enhanced stats are not enabled
  539. */
  540. if (!pdev->mcopy_mode && !pdev->enhanced_stats_en)
  541. return;
  542. if (!pdev->mcopy_mode) {
  543. if (!ppdu_info->rx_status.frame_control_info_valid)
  544. return;
  545. if (ppdu_info->rx_status.ast_index == HAL_AST_IDX_INVALID)
  546. return;
  547. }
  548. ppdu_nbuf = qdf_nbuf_alloc(soc->osdev,
  549. sizeof(struct cdp_rx_indication_ppdu), 0, 0, FALSE);
  550. if (ppdu_nbuf) {
  551. dp_rx_populate_cdp_indication_ppdu(pdev, ppdu_info, ppdu_nbuf);
  552. qdf_nbuf_put_tail(ppdu_nbuf,
  553. sizeof(struct cdp_rx_indication_ppdu));
  554. cdp_rx_ppdu = (struct cdp_rx_indication_ppdu *)ppdu_nbuf->data;
  555. peer = dp_peer_find_by_id(soc, cdp_rx_ppdu->peer_id);
  556. if (peer) {
  557. cdp_rx_ppdu->cookie = (void *)peer->wlanstats_ctx;
  558. dp_rx_stats_update(pdev, peer, cdp_rx_ppdu);
  559. dp_peer_unref_del_find_by_id(peer);
  560. }
  561. if (cdp_rx_ppdu->peer_id != HTT_INVALID_PEER) {
  562. dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC,
  563. soc, ppdu_nbuf,
  564. cdp_rx_ppdu->peer_id,
  565. WDI_NO_VAL, pdev->pdev_id);
  566. } else if (pdev->mcopy_mode) {
  567. dp_wdi_event_handler(WDI_EVENT_RX_PPDU_DESC, soc,
  568. ppdu_nbuf, HTT_INVALID_PEER,
  569. WDI_NO_VAL, pdev->pdev_id);
  570. } else {
  571. qdf_nbuf_free(ppdu_nbuf);
  572. }
  573. }
  574. }
  575. #else
  576. static inline void
  577. dp_rx_handle_ppdu_stats(struct dp_soc *soc, struct dp_pdev *pdev,
  578. struct hal_rx_ppdu_info *ppdu_info)
  579. {
  580. }
  581. #endif
  582. /**
  583. * dp_rx_process_peer_based_pktlog() - Process Rx pktlog if peer based
  584. * filtering enabled
  585. * @soc: core txrx main context
  586. * @ppdu_info: Structure for rx ppdu info
  587. * @status_nbuf: Qdf nbuf abstraction for linux skb
  588. * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
  589. *
  590. * Return: none
  591. */
  592. static inline void
  593. dp_rx_process_peer_based_pktlog(struct dp_soc *soc,
  594. struct hal_rx_ppdu_info *ppdu_info,
  595. qdf_nbuf_t status_nbuf, uint32_t mac_id)
  596. {
  597. struct dp_peer *peer;
  598. struct dp_ast_entry *ast_entry;
  599. uint32_t ast_index;
  600. ast_index = ppdu_info->rx_status.ast_index;
  601. if (ast_index < (WLAN_UMAC_PSOC_MAX_PEERS * 2)) {
  602. ast_entry = soc->ast_table[ast_index];
  603. if (ast_entry) {
  604. peer = ast_entry->peer;
  605. if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER)) {
  606. if (peer->peer_based_pktlog_filter) {
  607. dp_wdi_event_handler(
  608. WDI_EVENT_RX_DESC, soc,
  609. status_nbuf,
  610. peer->peer_ids[0],
  611. WDI_NO_VAL, mac_id);
  612. }
  613. }
  614. }
  615. }
  616. }
  617. /**
  618. * dp_rx_mon_status_process_tlv() - Process status TLV in status
  619. * buffer on Rx status Queue posted by status SRNG processing.
  620. * @soc: core txrx main context
  621. * @mac_id: mac_id which is one of 3 mac_ids _ring
  622. *
  623. * Return: none
  624. */
  625. static inline void
  626. dp_rx_mon_status_process_tlv(struct dp_soc *soc, uint32_t mac_id,
  627. uint32_t quota)
  628. {
  629. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  630. struct hal_rx_ppdu_info *ppdu_info;
  631. qdf_nbuf_t status_nbuf;
  632. uint8_t *rx_tlv;
  633. uint8_t *rx_tlv_start;
  634. uint32_t tlv_status = HAL_TLV_STATUS_BUF_DONE;
  635. QDF_STATUS enh_log_status = QDF_STATUS_SUCCESS;
  636. struct cdp_pdev_mon_stats *rx_mon_stats;
  637. int smart_mesh_status;
  638. enum WDI_EVENT pktlog_mode = WDI_NO_VAL;
  639. bool nbuf_used;
  640. uint32_t rx_enh_capture_mode;
  641. ppdu_info = &pdev->ppdu_info;
  642. rx_mon_stats = &pdev->rx_mon_stats;
  643. if (pdev->mon_ppdu_status != DP_PPDU_STATUS_START)
  644. return;
  645. rx_enh_capture_mode = pdev->rx_enh_capture_mode;
  646. while (!qdf_nbuf_is_queue_empty(&pdev->rx_status_q)) {
  647. status_nbuf = qdf_nbuf_queue_remove(&pdev->rx_status_q);
  648. rx_tlv = qdf_nbuf_data(status_nbuf);
  649. rx_tlv_start = rx_tlv;
  650. nbuf_used = false;
  651. if ((pdev->monitor_vdev) || (pdev->enhanced_stats_en) ||
  652. pdev->mcopy_mode ||
  653. (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED)) {
  654. do {
  655. tlv_status = hal_rx_status_get_tlv_info(rx_tlv,
  656. ppdu_info, pdev->soc->hal_soc,
  657. status_nbuf);
  658. dp_rx_mon_update_dbg_ppdu_stats(ppdu_info,
  659. rx_mon_stats);
  660. dp_rx_mon_enh_capture_process(pdev, tlv_status,
  661. status_nbuf, ppdu_info,
  662. &nbuf_used);
  663. rx_tlv = hal_rx_status_get_next_tlv(rx_tlv);
  664. if ((rx_tlv - rx_tlv_start) >= RX_BUFFER_SIZE)
  665. break;
  666. } while ((tlv_status == HAL_TLV_STATUS_PPDU_NOT_DONE) ||
  667. (tlv_status == HAL_TLV_STATUS_HEADER) ||
  668. (tlv_status == HAL_TLV_STATUS_MPDU_END) ||
  669. (tlv_status == HAL_TLV_STATUS_MSDU_END));
  670. }
  671. if (pdev->dp_peer_based_pktlog) {
  672. dp_rx_process_peer_based_pktlog(soc, ppdu_info,
  673. status_nbuf, mac_id);
  674. } else {
  675. if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_FULL)
  676. pktlog_mode = WDI_EVENT_RX_DESC;
  677. else if (pdev->rx_pktlog_mode == DP_RX_PKTLOG_LITE)
  678. pktlog_mode = WDI_EVENT_LITE_RX;
  679. if (pktlog_mode != WDI_NO_VAL)
  680. dp_wdi_event_handler(pktlog_mode, soc,
  681. status_nbuf,
  682. HTT_INVALID_PEER,
  683. WDI_NO_VAL, mac_id);
  684. }
  685. /* smart monitor vap and m_copy cannot co-exist */
  686. if (ppdu_info->rx_status.monitor_direct_used && pdev->neighbour_peers_added
  687. && pdev->monitor_vdev) {
  688. smart_mesh_status = dp_rx_handle_smart_mesh_mode(soc,
  689. pdev, ppdu_info, status_nbuf);
  690. if (smart_mesh_status)
  691. qdf_nbuf_free(status_nbuf);
  692. } else if (qdf_unlikely(pdev->mcopy_mode)) {
  693. dp_rx_process_mcopy_mode(soc, pdev,
  694. ppdu_info, tlv_status,
  695. status_nbuf);
  696. } else if (rx_enh_capture_mode != CDP_RX_ENH_CAPTURE_DISABLED) {
  697. if (!nbuf_used)
  698. qdf_nbuf_free(status_nbuf);
  699. if (tlv_status == HAL_TLV_STATUS_PPDU_DONE)
  700. enh_log_status =
  701. dp_rx_handle_enh_capture(soc,
  702. pdev, ppdu_info);
  703. } else {
  704. qdf_nbuf_free(status_nbuf);
  705. }
  706. if (tlv_status == HAL_TLV_STATUS_PPDU_NON_STD_DONE) {
  707. dp_rx_mon_deliver_non_std(soc, mac_id);
  708. } else if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
  709. rx_mon_stats->status_ppdu_done++;
  710. if (pdev->enhanced_stats_en ||
  711. pdev->mcopy_mode || pdev->neighbour_peers_added)
  712. dp_rx_handle_ppdu_stats(soc, pdev, ppdu_info);
  713. pdev->mon_ppdu_status = DP_PPDU_STATUS_DONE;
  714. dp_rx_mon_dest_process(soc, mac_id, quota);
  715. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  716. }
  717. }
  718. return;
  719. }
  720. /*
  721. * dp_rx_mon_status_srng_process() - Process monitor status ring
  722. * post the status ring buffer to Rx status Queue for later
  723. * processing when status ring is filled with status TLV.
  724. * Allocate a new buffer to status ring if the filled buffer
  725. * is posted.
  726. *
  727. * @soc: core txrx main context
  728. * @mac_id: mac_id which is one of 3 mac_ids
  729. * @quota: No. of ring entry that can be serviced in one shot.
  730. * Return: uint32_t: No. of ring entry that is processed.
  731. */
  732. static inline uint32_t
  733. dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
  734. uint32_t quota)
  735. {
  736. struct dp_pdev *pdev = dp_get_pdev_for_mac_id(soc, mac_id);
  737. hal_soc_handle_t hal_soc;
  738. void *mon_status_srng;
  739. void *rxdma_mon_status_ring_entry;
  740. QDF_STATUS status;
  741. uint32_t work_done = 0;
  742. int mac_for_pdev = dp_get_mac_id_for_mac(soc, mac_id);
  743. mon_status_srng = pdev->rxdma_mon_status_ring[mac_for_pdev].hal_srng;
  744. qdf_assert(mon_status_srng);
  745. if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
  746. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  747. "%s %d : HAL Monitor Status Ring Init Failed -- %pK",
  748. __func__, __LINE__, mon_status_srng);
  749. return work_done;
  750. }
  751. hal_soc = soc->hal_soc;
  752. qdf_assert(hal_soc);
  753. if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_status_srng)))
  754. goto done;
  755. /* mon_status_ring_desc => WBM_BUFFER_RING STRUCT =>
  756. * BUFFER_ADDR_INFO STRUCT
  757. */
  758. while (qdf_likely((rxdma_mon_status_ring_entry =
  759. hal_srng_src_peek(hal_soc, mon_status_srng))
  760. && quota--)) {
  761. uint32_t rx_buf_cookie;
  762. qdf_nbuf_t status_nbuf;
  763. struct dp_rx_desc *rx_desc;
  764. uint8_t *status_buf;
  765. qdf_dma_addr_t paddr;
  766. uint64_t buf_addr;
  767. buf_addr =
  768. (HAL_RX_BUFFER_ADDR_31_0_GET(
  769. rxdma_mon_status_ring_entry) |
  770. ((uint64_t)(HAL_RX_BUFFER_ADDR_39_32_GET(
  771. rxdma_mon_status_ring_entry)) << 32));
  772. if (qdf_likely(buf_addr)) {
  773. rx_buf_cookie =
  774. HAL_RX_BUF_COOKIE_GET(
  775. rxdma_mon_status_ring_entry);
  776. rx_desc = dp_rx_cookie_2_va_mon_status(soc,
  777. rx_buf_cookie);
  778. qdf_assert(rx_desc);
  779. status_nbuf = rx_desc->nbuf;
  780. qdf_nbuf_sync_for_cpu(soc->osdev, status_nbuf,
  781. QDF_DMA_FROM_DEVICE);
  782. status_buf = qdf_nbuf_data(status_nbuf);
  783. status = hal_get_rx_status_done(status_buf);
  784. if (status != QDF_STATUS_SUCCESS) {
  785. uint32_t hp, tp;
  786. hal_get_sw_hptp(hal_soc, mon_status_srng,
  787. &tp, &hp);
  788. QDF_TRACE(QDF_MODULE_ID_DP,
  789. QDF_TRACE_LEVEL_ERROR,
  790. "[%s][%d] status not done - hp:%u, tp:%u",
  791. __func__, __LINE__, hp, tp);
  792. /* WAR for missing status: Skip status entry */
  793. hal_srng_src_get_next(hal_soc, mon_status_srng);
  794. continue;
  795. }
  796. qdf_nbuf_set_pktlen(status_nbuf, RX_BUFFER_SIZE);
  797. qdf_nbuf_unmap_single(soc->osdev, status_nbuf,
  798. QDF_DMA_FROM_DEVICE);
  799. /* Put the status_nbuf to queue */
  800. qdf_nbuf_queue_add(&pdev->rx_status_q, status_nbuf);
  801. } else {
  802. union dp_rx_desc_list_elem_t *desc_list = NULL;
  803. union dp_rx_desc_list_elem_t *tail = NULL;
  804. struct rx_desc_pool *rx_desc_pool;
  805. uint32_t num_alloc_desc;
  806. rx_desc_pool = &soc->rx_desc_status[mac_id];
  807. num_alloc_desc = dp_rx_get_free_desc_list(soc, mac_id,
  808. rx_desc_pool,
  809. 1,
  810. &desc_list,
  811. &tail);
  812. /*
  813. * No free descriptors available
  814. */
  815. if (qdf_unlikely(num_alloc_desc == 0)) {
  816. work_done++;
  817. break;
  818. }
  819. rx_desc = &desc_list->rx_desc;
  820. }
  821. status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
  822. /*
  823. * qdf_nbuf alloc or map failed,
  824. * free the dp rx desc to free list,
  825. * fill in NULL dma address at current HP entry,
  826. * keep HP in mon_status_ring unchanged,
  827. * wait next time dp_rx_mon_status_srng_process
  828. * to fill in buffer at current HP.
  829. */
  830. if (qdf_unlikely(!status_nbuf)) {
  831. union dp_rx_desc_list_elem_t *desc_list = NULL;
  832. union dp_rx_desc_list_elem_t *tail = NULL;
  833. struct rx_desc_pool *rx_desc_pool;
  834. rx_desc_pool = &soc->rx_desc_status[mac_id];
  835. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  836. "%s: fail to allocate or map qdf_nbuf",
  837. __func__);
  838. dp_rx_add_to_free_desc_list(&desc_list,
  839. &tail, rx_desc);
  840. dp_rx_add_desc_list_to_free_list(soc, &desc_list,
  841. &tail, mac_id, rx_desc_pool);
  842. hal_rxdma_buff_addr_info_set(
  843. rxdma_mon_status_ring_entry,
  844. 0, 0, HAL_RX_BUF_RBM_SW3_BM);
  845. work_done++;
  846. break;
  847. }
  848. paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
  849. rx_desc->nbuf = status_nbuf;
  850. rx_desc->in_use = 1;
  851. hal_rxdma_buff_addr_info_set(rxdma_mon_status_ring_entry,
  852. paddr, rx_desc->cookie, HAL_RX_BUF_RBM_SW3_BM);
  853. hal_srng_src_get_next(hal_soc, mon_status_srng);
  854. work_done++;
  855. }
  856. done:
  857. hal_srng_access_end(hal_soc, mon_status_srng);
  858. return work_done;
  859. }
  860. /*
  861. * dp_rx_mon_status_process() - Process monitor status ring and
  862. * TLV in status ring.
  863. *
  864. * @soc: core txrx main context
  865. * @mac_id: mac_id which is one of 3 mac_ids
  866. * @quota: No. of ring entry that can be serviced in one shot.
  867. * Return: uint32_t: No. of ring entry that is processed.
  868. */
  869. static inline uint32_t
  870. dp_rx_mon_status_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
  871. uint32_t work_done;
  872. work_done = dp_rx_mon_status_srng_process(soc, mac_id, quota);
  873. quota -= work_done;
  874. dp_rx_mon_status_process_tlv(soc, mac_id, quota);
  875. return work_done;
  876. }
  877. /**
  878. * dp_mon_process() - Main monitor mode processing roution.
  879. * This call monitor status ring process then monitor
  880. * destination ring process.
  881. * Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
  882. * @soc: core txrx main context
  883. * @mac_id: mac_id which is one of 3 mac_ids
  884. * @quota: No. of status ring entry that can be serviced in one shot.
  885. * Return: uint32_t: No. of ring entry that is processed.
  886. */
  887. uint32_t
  888. dp_mon_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota) {
  889. return dp_rx_mon_status_process(soc, mac_id, quota);
  890. }
  891. /**
  892. * dp_rx_pdev_mon_status_detach() - detach dp rx for status ring
  893. * @pdev: core txrx pdev context
  894. * @mac_id: mac_id/pdev_id correspondinggly for MCL and WIN
  895. *
  896. * This function will detach DP RX status ring from
  897. * main device context. will free DP Rx resources for
  898. * status ring
  899. *
  900. * Return: QDF_STATUS_SUCCESS: success
  901. * QDF_STATUS_E_RESOURCES: Error return
  902. */
  903. QDF_STATUS
  904. dp_rx_pdev_mon_status_detach(struct dp_pdev *pdev, int mac_id)
  905. {
  906. struct dp_soc *soc = pdev->soc;
  907. struct rx_desc_pool *rx_desc_pool;
  908. rx_desc_pool = &soc->rx_desc_status[mac_id];
  909. if (rx_desc_pool->pool_size != 0) {
  910. if (!dp_is_soc_reinit(soc))
  911. dp_rx_desc_nbuf_and_pool_free(soc, mac_id,
  912. rx_desc_pool);
  913. else
  914. dp_rx_desc_nbuf_free(soc, rx_desc_pool);
  915. }
  916. return QDF_STATUS_SUCCESS;
  917. }
  918. /*
  919. * dp_rx_buffers_replenish() - replenish monitor status ring with
  920. * rx nbufs called during dp rx
  921. * monitor status ring initialization
  922. *
  923. * @soc: core txrx main context
  924. * @mac_id: mac_id which is one of 3 mac_ids
  925. * @dp_rxdma_srng: dp monitor status circular ring
  926. * @rx_desc_pool; Pointer to Rx descriptor pool
  927. * @num_req_buffers: number of buffer to be replenished
  928. * @desc_list: list of descs if called from dp rx monitor status
  929. * process or NULL during dp rx initialization or
  930. * out of buffer interrupt
  931. * @tail: tail of descs list
  932. * @owner: who owns the nbuf (host, NSS etc...)
  933. * Return: return success or failure
  934. */
  935. static inline
  936. QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
  937. uint32_t mac_id,
  938. struct dp_srng *dp_rxdma_srng,
  939. struct rx_desc_pool *rx_desc_pool,
  940. uint32_t num_req_buffers,
  941. union dp_rx_desc_list_elem_t **desc_list,
  942. union dp_rx_desc_list_elem_t **tail,
  943. uint8_t owner)
  944. {
  945. uint32_t num_alloc_desc;
  946. uint16_t num_desc_to_free = 0;
  947. uint32_t num_entries_avail;
  948. uint32_t count = 0;
  949. int sync_hw_ptr = 1;
  950. qdf_dma_addr_t paddr;
  951. qdf_nbuf_t rx_netbuf;
  952. void *rxdma_ring_entry;
  953. union dp_rx_desc_list_elem_t *next;
  954. void *rxdma_srng;
  955. struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
  956. rxdma_srng = dp_rxdma_srng->hal_srng;
  957. qdf_assert(rxdma_srng);
  958. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  959. "[%s][%d] requested %d buffers for replenish",
  960. __func__, __LINE__, num_req_buffers);
  961. /*
  962. * if desc_list is NULL, allocate the descs from freelist
  963. */
  964. if (!(*desc_list)) {
  965. num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
  966. rx_desc_pool,
  967. num_req_buffers,
  968. desc_list,
  969. tail);
  970. if (!num_alloc_desc) {
  971. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  972. "[%s][%d] no free rx_descs in freelist",
  973. __func__, __LINE__);
  974. return QDF_STATUS_E_NOMEM;
  975. }
  976. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  977. "[%s][%d] %d rx desc allocated", __func__, __LINE__,
  978. num_alloc_desc);
  979. num_req_buffers = num_alloc_desc;
  980. }
  981. hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
  982. num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
  983. rxdma_srng, sync_hw_ptr);
  984. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  985. "[%s][%d] no of available entries in rxdma ring: %d",
  986. __func__, __LINE__, num_entries_avail);
  987. if (num_entries_avail < num_req_buffers) {
  988. num_desc_to_free = num_req_buffers - num_entries_avail;
  989. num_req_buffers = num_entries_avail;
  990. }
  991. while (count < num_req_buffers) {
  992. rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
  993. /*
  994. * qdf_nbuf alloc or map failed,
  995. * keep HP in mon_status_ring unchanged,
  996. * wait dp_rx_mon_status_srng_process
  997. * to fill in buffer at current HP.
  998. */
  999. if (qdf_unlikely(!rx_netbuf)) {
  1000. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1001. "%s: qdf_nbuf allocate or map fail, count %d",
  1002. __func__, count);
  1003. break;
  1004. }
  1005. paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
  1006. next = (*desc_list)->next;
  1007. rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
  1008. rxdma_srng);
  1009. if (qdf_unlikely(!rxdma_ring_entry)) {
  1010. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  1011. "[%s][%d] rxdma_ring_entry is NULL, count - %d",
  1012. __func__, __LINE__, count);
  1013. qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
  1014. QDF_DMA_FROM_DEVICE);
  1015. qdf_nbuf_free(rx_netbuf);
  1016. break;
  1017. }
  1018. (*desc_list)->rx_desc.nbuf = rx_netbuf;
  1019. (*desc_list)->rx_desc.in_use = 1;
  1020. count++;
  1021. hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
  1022. (*desc_list)->rx_desc.cookie, owner);
  1023. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1024. "[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
  1025. paddr=%pK",
  1026. __func__, __LINE__, &(*desc_list)->rx_desc,
  1027. (*desc_list)->rx_desc.cookie, rx_netbuf,
  1028. (void *)paddr);
  1029. *desc_list = next;
  1030. }
  1031. hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
  1032. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1033. "successfully replenished %d buffers", num_req_buffers);
  1034. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  1035. "%d rx desc added back to free list", num_desc_to_free);
  1036. /*
  1037. * add any available free desc back to the free list
  1038. */
  1039. if (*desc_list) {
  1040. dp_rx_add_desc_list_to_free_list(dp_soc, desc_list, tail,
  1041. mac_id, rx_desc_pool);
  1042. }
  1043. return QDF_STATUS_SUCCESS;
  1044. }
  1045. /**
  1046. * dp_rx_pdev_mon_status_attach() - attach DP RX monitor status ring
  1047. * @pdev: core txrx pdev context
  1048. * @ring_id: ring number
  1049. * This function will attach a DP RX monitor status ring into pDEV
  1050. * and replenish monitor status ring with buffer.
  1051. *
  1052. * Return: QDF_STATUS_SUCCESS: success
  1053. * QDF_STATUS_E_RESOURCES: Error return
  1054. */
  1055. QDF_STATUS
  1056. dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
  1057. struct dp_soc *soc = pdev->soc;
  1058. union dp_rx_desc_list_elem_t *desc_list = NULL;
  1059. union dp_rx_desc_list_elem_t *tail = NULL;
  1060. struct dp_srng *mon_status_ring;
  1061. uint32_t num_entries;
  1062. uint32_t i;
  1063. struct rx_desc_pool *rx_desc_pool;
  1064. QDF_STATUS status;
  1065. int mac_for_pdev = dp_get_mac_id_for_mac(soc, ring_id);
  1066. mon_status_ring = &pdev->rxdma_mon_status_ring[mac_for_pdev];
  1067. num_entries = mon_status_ring->num_entries;
  1068. rx_desc_pool = &soc->rx_desc_status[ring_id];
  1069. dp_info("Mon RX Status Pool[%d] entries=%d",
  1070. ring_id, num_entries);
  1071. status = dp_rx_desc_pool_alloc(soc, ring_id, num_entries + 1,
  1072. rx_desc_pool);
  1073. if (!QDF_IS_STATUS_SUCCESS(status))
  1074. return status;
  1075. dp_debug("Mon RX Status Buffers Replenish ring_id=%d", ring_id);
  1076. status = dp_rx_mon_status_buffers_replenish(soc, ring_id,
  1077. mon_status_ring,
  1078. rx_desc_pool,
  1079. num_entries,
  1080. &desc_list, &tail,
  1081. HAL_RX_BUF_RBM_SW3_BM);
  1082. if (!QDF_IS_STATUS_SUCCESS(status))
  1083. return status;
  1084. qdf_nbuf_queue_init(&pdev->rx_status_q);
  1085. qdf_nbuf_queue_init(&pdev->rx_ppdu_buf_q);
  1086. pdev->mon_ppdu_status = DP_PPDU_STATUS_START;
  1087. qdf_mem_zero(&(pdev->ppdu_info.rx_status),
  1088. sizeof(pdev->ppdu_info.rx_status));
  1089. qdf_mem_zero(&pdev->rx_mon_stats,
  1090. sizeof(pdev->rx_mon_stats));
  1091. dp_rx_mon_init_dbg_ppdu_stats(&pdev->ppdu_info,
  1092. &pdev->rx_mon_stats);
  1093. for (i = 0; i < MAX_MU_USERS; i++) {
  1094. qdf_nbuf_queue_init(&pdev->mpdu_q[i]);
  1095. pdev->is_mpdu_hdr[i] = true;
  1096. }
  1097. qdf_mem_zero(pdev->msdu_list, sizeof(pdev->msdu_list[MAX_MU_USERS]));
  1098. pdev->rx_enh_capture_mode = CDP_RX_ENH_CAPTURE_DISABLED;
  1099. return QDF_STATUS_SUCCESS;
  1100. }