dp_mon.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863
  1. /*
  2. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  3. * Permission to use, copy, modify, and/or distribute this software for any
  4. * purpose with or without fee is hereby granted, provided that the above
  5. * copyright notice and this permission notice appear in all copies.
  6. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  7. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  8. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  9. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  10. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  11. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  12. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  13. */
  14. #include <dp_types.h>
  15. #include "dp_rx.h"
  16. #include "dp_peer.h"
  17. #include <dp_htt.h>
  18. #include <dp_rx_mon.h>
  19. #include <dp_mon_filter.h>
  20. #include <dp_mon.h>
  21. #include "htt_ppdu_stats.h"
  22. #define RNG_ERR "SRNG setup failed for"
  23. #define mon_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
  24. QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
  25. int ring_type, uint32_t num_entries,
  26. bool cached);
  27. void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
  28. QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
  29. int ring_type, int ring_num, int mac_id);
  30. void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
  31. int ring_type, int ring_num);
  32. QDF_STATUS dp_htt_ppdu_stats_attach(struct dp_pdev *pdev);
  33. void dp_htt_ppdu_stats_detach(struct dp_pdev *pdev);
  34. void dp_neighbour_peers_detach(struct dp_pdev *pdev);
  35. void dp_pktlogmod_exit(struct dp_pdev *handle);
  36. QDF_STATUS dp_vdev_set_monitor_mode_rings(struct dp_pdev *pdev,
  37. uint8_t delayed_replenish);
  38. #if !defined(DISABLE_MON_CONFIG)
  39. /**
  40. * dp_mon_rings_deinit() - Deinitialize monitor rings
  41. * @pdev: DP pdev handle
  42. *
  43. * Return: None
  44. *
  45. */
  46. static void dp_mon_rings_deinit(struct dp_pdev *pdev)
  47. {
  48. int mac_id = 0;
  49. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  50. struct dp_soc *soc = pdev->soc;
  51. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  52. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  53. int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
  54. pdev->pdev_id);
  55. dp_srng_deinit(soc, &soc->rxdma_mon_status_ring[lmac_id],
  56. RXDMA_MONITOR_STATUS, 0);
  57. if (!soc->wlan_cfg_ctx->rxdma1_enable)
  58. continue;
  59. dp_srng_deinit(soc, &soc->rxdma_mon_buf_ring[lmac_id],
  60. RXDMA_MONITOR_BUF, 0);
  61. dp_srng_deinit(soc, &soc->rxdma_mon_dst_ring[lmac_id],
  62. RXDMA_MONITOR_DST, 0);
  63. dp_srng_deinit(soc, &soc->rxdma_mon_desc_ring[lmac_id],
  64. RXDMA_MONITOR_DESC, 0);
  65. }
  66. }
  67. /**
  68. * dp_mon_rings_free() - free monitor rings
  69. * @pdev: Datapath pdev handle
  70. *
  71. * Return: None
  72. *
  73. */
  74. static void dp_mon_rings_free(struct dp_pdev *pdev)
  75. {
  76. int mac_id = 0;
  77. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  78. struct dp_soc *soc = pdev->soc;
  79. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  80. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  81. int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
  82. pdev->pdev_id);
  83. dp_srng_free(soc, &soc->rxdma_mon_status_ring[lmac_id]);
  84. if (!soc->wlan_cfg_ctx->rxdma1_enable)
  85. continue;
  86. dp_srng_free(soc, &soc->rxdma_mon_buf_ring[lmac_id]);
  87. dp_srng_free(soc, &soc->rxdma_mon_dst_ring[lmac_id]);
  88. dp_srng_free(soc, &soc->rxdma_mon_desc_ring[lmac_id]);
  89. }
  90. }
  91. /**
  92. * dp_mon_rings_init() - Initialize monitor srng rings
  93. * @pdev: Datapath pdev handle
  94. *
  95. * return: QDF_STATUS_SUCCESS on success
  96. * QDF_STATUS_E_NOMEM on failure
  97. */
  98. static
  99. QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
  100. {
  101. int mac_id = 0;
  102. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  103. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  104. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  105. int lmac_id = dp_get_lmac_id_for_pdev_id(soc, mac_id,
  106. pdev->pdev_id);
  107. if (dp_srng_init(soc, &soc->rxdma_mon_status_ring[lmac_id],
  108. RXDMA_MONITOR_STATUS, 0, lmac_id)) {
  109. mon_init_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
  110. soc);
  111. goto fail1;
  112. }
  113. if (!soc->wlan_cfg_ctx->rxdma1_enable)
  114. continue;
  115. if (dp_srng_init(soc, &soc->rxdma_mon_buf_ring[lmac_id],
  116. RXDMA_MONITOR_BUF, 0, lmac_id)) {
  117. mon_init_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ",
  118. soc);
  119. goto fail1;
  120. }
  121. if (dp_srng_init(soc, &soc->rxdma_mon_dst_ring[lmac_id],
  122. RXDMA_MONITOR_DST, 0, lmac_id)) {
  123. mon_init_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
  124. goto fail1;
  125. }
  126. if (dp_srng_init(soc, &soc->rxdma_mon_desc_ring[lmac_id],
  127. RXDMA_MONITOR_DESC, 0, lmac_id)) {
  128. mon_init_err("%pK: " RNG_ERR "rxdma_mon_desc_ring",
  129. soc);
  130. goto fail1;
  131. }
  132. }
  133. return QDF_STATUS_SUCCESS;
  134. fail1:
  135. dp_mon_rings_deinit(pdev);
  136. return QDF_STATUS_E_NOMEM;
  137. }
  138. /**
  139. * dp_mon_rings_alloc() - Allocate memory for monitor srng rings
  140. * @soc: Datapath soc handle
  141. * @pdev: Datapath pdev handle
  142. *
  143. * return: QDF_STATUS_SUCCESS on success
  144. * QDF_STATUS_E_NOMEM on failure
  145. */
  146. static
  147. QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
  148. {
  149. int mac_id = 0;
  150. int entries;
  151. struct wlan_cfg_dp_pdev_ctxt *pdev_cfg_ctx;
  152. pdev_cfg_ctx = pdev->wlan_cfg_ctx;
  153. for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
  154. int lmac_id =
  155. dp_get_lmac_id_for_pdev_id(soc, mac_id, pdev->pdev_id);
  156. entries = wlan_cfg_get_dma_mon_stat_ring_size(pdev_cfg_ctx);
  157. if (dp_srng_alloc(soc, &soc->rxdma_mon_status_ring[lmac_id],
  158. RXDMA_MONITOR_STATUS, entries, 0)) {
  159. mon_init_err("%pK: " RNG_ERR "rxdma_mon_status_ring",
  160. soc);
  161. goto fail1;
  162. }
  163. if (!soc->wlan_cfg_ctx->rxdma1_enable)
  164. continue;
  165. entries = wlan_cfg_get_dma_mon_buf_ring_size(pdev_cfg_ctx);
  166. if (dp_srng_alloc(soc, &soc->rxdma_mon_buf_ring[lmac_id],
  167. RXDMA_MONITOR_BUF, entries, 0)) {
  168. mon_init_err("%pK: " RNG_ERR "rxdma_mon_buf_ring ",
  169. soc);
  170. goto fail1;
  171. }
  172. entries = wlan_cfg_get_dma_mon_dest_ring_size(pdev_cfg_ctx);
  173. if (dp_srng_alloc(soc, &soc->rxdma_mon_dst_ring[lmac_id],
  174. RXDMA_MONITOR_DST, entries, 0)) {
  175. mon_init_err("%pK: " RNG_ERR "rxdma_mon_dst_ring", soc);
  176. goto fail1;
  177. }
  178. entries = wlan_cfg_get_dma_mon_desc_ring_size(pdev_cfg_ctx);
  179. if (dp_srng_alloc(soc, &soc->rxdma_mon_desc_ring[lmac_id],
  180. RXDMA_MONITOR_DESC, entries, 0)) {
  181. mon_init_err("%pK: " RNG_ERR "rxdma_mon_desc_ring",
  182. soc);
  183. goto fail1;
  184. }
  185. }
  186. return QDF_STATUS_SUCCESS;
  187. fail1:
  188. dp_mon_rings_free(pdev);
  189. return QDF_STATUS_E_NOMEM;
  190. }
  191. #else
  192. static void dp_mon_rings_free(struct dp_pdev *pdev)
  193. {
  194. }
  195. static void dp_mon_rings_deinit(struct dp_pdev *pdev)
  196. {
  197. }
  198. static
  199. QDF_STATUS dp_mon_rings_init(struct dp_soc *soc, struct dp_pdev *pdev)
  200. {
  201. return QDF_STATUS_SUCCESS;
  202. }
  203. static
  204. QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
  205. {
  206. return QDF_STATUS_SUCCESS;
  207. }
  208. #endif
  209. #ifdef QCA_SUPPORT_FULL_MON
  210. static inline QDF_STATUS
  211. dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
  212. uint8_t val)
  213. {
  214. struct dp_soc *soc = (struct dp_soc *)soc_handle;
  215. soc->full_mon_mode = val;
  216. dp_cdp_err("Configure full monitor mode val: %d ", val);
  217. return QDF_STATUS_SUCCESS;
  218. }
  219. #else
  220. static inline QDF_STATUS
  221. dp_config_full_mon_mode(struct cdp_soc_t *soc_handle,
  222. uint8_t val)
  223. {
  224. return 0;
  225. }
  226. #endif
  227. static inline void
  228. dp_pdev_disable_mcopy_code(struct dp_pdev *pdev)
  229. {
  230. pdev->mcopy_mode = M_COPY_DISABLED;
  231. pdev->monitor_configured = false;
  232. pdev->monitor_vdev = NULL;
  233. }
  234. #ifdef QCA_SUPPORT_FULL_MON
  235. static inline QDF_STATUS
  236. dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
  237. {
  238. struct dp_soc *soc = pdev->soc;
  239. QDF_STATUS status = QDF_STATUS_SUCCESS;
  240. if (!soc->full_mon_mode)
  241. return QDF_STATUS_SUCCESS;
  242. if ((htt_h2t_full_mon_cfg(soc->htt_handle,
  243. pdev->pdev_id,
  244. val)) != QDF_STATUS_SUCCESS) {
  245. status = QDF_STATUS_E_FAILURE;
  246. }
  247. return status;
  248. }
  249. #else
  250. static inline QDF_STATUS
  251. dp_soc_config_full_mon_mode(struct dp_pdev *pdev, enum dp_full_mon_config val)
  252. {
  253. return 0;
  254. }
  255. #endif
  256. /**
  257. * dp_reset_monitor_mode() - Disable monitor mode
  258. * @soc_hdl: Datapath soc handle
  259. * @pdev_id: id of datapath PDEV handle
  260. *
  261. * Return: QDF_STATUS
  262. */
  263. QDF_STATUS dp_reset_monitor_mode(struct cdp_soc_t *soc_hdl,
  264. uint8_t pdev_id,
  265. uint8_t special_monitor)
  266. {
  267. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  268. struct dp_pdev *pdev =
  269. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  270. pdev_id);
  271. QDF_STATUS status = QDF_STATUS_SUCCESS;
  272. if (!pdev)
  273. return QDF_STATUS_E_FAILURE;
  274. qdf_spin_lock_bh(&pdev->mon_lock);
  275. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
  276. pdev->monitor_vdev = NULL;
  277. pdev->monitor_configured = false;
  278. /*
  279. * Lite monitor mode, smart monitor mode and monitor
  280. * mode uses this APIs to filter reset and mode disable
  281. */
  282. if (pdev->mcopy_mode) {
  283. #if defined(FEATURE_PERPKT_INFO)
  284. dp_pdev_disable_mcopy_code(pdev);
  285. dp_mon_filter_reset_mcopy_mode(pdev);
  286. #endif /* FEATURE_PERPKT_INFO */
  287. } else if (special_monitor) {
  288. #if defined(ATH_SUPPORT_NAC)
  289. dp_mon_filter_reset_smart_monitor(pdev);
  290. #endif /* ATH_SUPPORT_NAC */
  291. } else {
  292. dp_mon_filter_reset_mon_mode(pdev);
  293. }
  294. status = dp_mon_filter_update(pdev);
  295. if (status != QDF_STATUS_SUCCESS) {
  296. dp_rx_mon_dest_err("%pK: Failed to reset monitor filters",
  297. soc);
  298. }
  299. qdf_spin_unlock_bh(&pdev->mon_lock);
  300. return QDF_STATUS_SUCCESS;
  301. }
  302. /**
  303. * dp_pdev_set_advance_monitor_filter() - Set DP PDEV monitor filter
  304. * @soc: soc handle
  305. * @pdev_id: id of Datapath PDEV handle
  306. * @filter_val: Flag to select Filter for monitor mode
  307. * Return: 0 on success, not 0 on failure
  308. */
  309. static QDF_STATUS
  310. dp_pdev_set_advance_monitor_filter(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
  311. struct cdp_monitor_filter *filter_val)
  312. {
  313. /* Many monitor VAPs can exists in a system but only one can be up at
  314. * anytime
  315. */
  316. struct dp_soc *soc = (struct dp_soc *)soc_hdl;
  317. struct dp_vdev *vdev;
  318. struct dp_pdev *pdev =
  319. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
  320. pdev_id);
  321. QDF_STATUS status = QDF_STATUS_SUCCESS;
  322. if (!pdev)
  323. return QDF_STATUS_E_FAILURE;
  324. vdev = pdev->monitor_vdev;
  325. if (!vdev)
  326. return QDF_STATUS_E_FAILURE;
  327. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  328. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
  329. pdev, pdev_id, soc, vdev);
  330. /*Check if current pdev's monitor_vdev exists */
  331. if (!pdev->monitor_vdev) {
  332. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
  333. "vdev=%pK", vdev);
  334. qdf_assert(vdev);
  335. }
  336. /* update filter mode, type in pdev structure */
  337. pdev->mon_filter_mode = filter_val->mode;
  338. pdev->fp_mgmt_filter = filter_val->fp_mgmt;
  339. pdev->fp_ctrl_filter = filter_val->fp_ctrl;
  340. pdev->fp_data_filter = filter_val->fp_data;
  341. pdev->mo_mgmt_filter = filter_val->mo_mgmt;
  342. pdev->mo_ctrl_filter = filter_val->mo_ctrl;
  343. pdev->mo_data_filter = filter_val->mo_data;
  344. dp_mon_filter_setup_mon_mode(pdev);
  345. status = dp_mon_filter_update(pdev);
  346. if (status != QDF_STATUS_SUCCESS) {
  347. dp_rx_mon_dest_err("%pK: Failed to set filter for adv mon mode",
  348. soc);
  349. dp_mon_filter_reset_mon_mode(pdev);
  350. }
  351. return status;
  352. }
  353. /**
  354. * dp_deliver_tx_mgmt() - Deliver mgmt frame for tx capture
  355. * @cdp_soc : data path soc handle
  356. * @pdev_id : pdev_id
  357. * @nbuf: Management frame buffer
  358. */
  359. static QDF_STATUS
  360. dp_deliver_tx_mgmt(struct cdp_soc_t *cdp_soc, uint8_t pdev_id, qdf_nbuf_t nbuf)
  361. {
  362. struct dp_pdev *pdev =
  363. dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)cdp_soc,
  364. pdev_id);
  365. if (!pdev)
  366. return QDF_STATUS_E_FAILURE;
  367. dp_deliver_mgmt_frm(pdev, nbuf);
  368. return QDF_STATUS_SUCCESS;
  369. }
  370. /**
  371. * dp_vdev_set_monitor_mode() - Set DP VDEV to monitor mode
  372. * @vdev_handle: Datapath VDEV handle
  373. * @smart_monitor: Flag to denote if its smart monitor mode
  374. *
  375. * Return: 0 on success, not 0 on failure
  376. */
  377. static QDF_STATUS dp_vdev_set_monitor_mode(struct cdp_soc_t *dp_soc,
  378. uint8_t vdev_id,
  379. uint8_t special_monitor)
  380. {
  381. struct dp_soc *soc = (struct dp_soc *)dp_soc;
  382. struct dp_pdev *pdev;
  383. struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
  384. DP_MOD_ID_CDP);
  385. QDF_STATUS status = QDF_STATUS_SUCCESS;
  386. if (!vdev)
  387. return QDF_STATUS_E_FAILURE;
  388. pdev = vdev->pdev;
  389. pdev->monitor_vdev = vdev;
  390. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
  391. "pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
  392. pdev, pdev->pdev_id, pdev->soc, vdev);
  393. /*
  394. * do not configure monitor buf ring and filter for smart and
  395. * lite monitor
  396. * for smart monitor filters are added along with first NAC
  397. * for lite monitor required configuration done through
  398. * dp_set_pdev_param
  399. */
  400. if (special_monitor) {
  401. status = QDF_STATUS_SUCCESS;
  402. goto fail;
  403. }
  404. /*Check if current pdev's monitor_vdev exists */
  405. if (pdev->monitor_configured) {
  406. QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
  407. "monitor vap already created vdev=%pK\n", vdev);
  408. status = QDF_STATUS_E_RESOURCES;
  409. goto fail;
  410. }
  411. pdev->monitor_configured = true;
  412. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
  413. dp_mon_filter_setup_mon_mode(pdev);
  414. status = dp_mon_filter_update(pdev);
  415. if (status != QDF_STATUS_SUCCESS) {
  416. dp_cdp_err("%pK: Failed to reset monitor filters", soc);
  417. dp_mon_filter_reset_mon_mode(pdev);
  418. pdev->monitor_configured = false;
  419. pdev->monitor_vdev = NULL;
  420. }
  421. fail:
  422. dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
  423. return status;
  424. }
  425. /*
  426. * dp_config_debug_sniffer()- API to enable/disable debug sniffer
  427. * @pdev: DP_PDEV handle
  428. * @val: user provided value
  429. *
  430. * Return: 0 for success. nonzero for failure.
  431. */
  432. static QDF_STATUS
  433. dp_config_debug_sniffer(struct dp_pdev *pdev, int val)
  434. {
  435. QDF_STATUS status = QDF_STATUS_SUCCESS;
  436. /*
  437. * Note: The mirror copy mode cannot co-exist with any other
  438. * monitor modes. Hence disabling the filter for this mode will
  439. * reset the monitor destination ring filters.
  440. */
  441. if (pdev->mcopy_mode) {
  442. #ifdef FEATURE_PERPKT_INFO
  443. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_DISABLE);
  444. dp_pdev_disable_mcopy_code(pdev);
  445. dp_mon_filter_reset_mcopy_mode(pdev);
  446. status = dp_mon_filter_update(pdev);
  447. if (status != QDF_STATUS_SUCCESS) {
  448. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  449. FL("Failed to reset AM copy mode filters"));
  450. }
  451. #endif /* FEATURE_PERPKT_INFO */
  452. }
  453. switch (val) {
  454. case 0:
  455. pdev->tx_sniffer_enable = 0;
  456. pdev->monitor_configured = false;
  457. /*
  458. * We don't need to reset the Rx monitor status ring or call
  459. * the API dp_ppdu_ring_reset() if all debug sniffer mode is
  460. * disabled. The Rx monitor status ring will be disabled when
  461. * the last mode using the monitor status ring get disabled.
  462. */
  463. if (!pdev->pktlog_ppdu_stats && !pdev->enhanced_stats_en &&
  464. !pdev->bpr_enable) {
  465. dp_h2t_cfg_stats_msg_send(pdev, 0, pdev->pdev_id);
  466. } else if (pdev->enhanced_stats_en && !pdev->bpr_enable) {
  467. dp_h2t_cfg_stats_msg_send(pdev,
  468. DP_PPDU_STATS_CFG_ENH_STATS,
  469. pdev->pdev_id);
  470. } else if (!pdev->enhanced_stats_en && pdev->bpr_enable) {
  471. dp_h2t_cfg_stats_msg_send(pdev,
  472. DP_PPDU_STATS_CFG_BPR_ENH,
  473. pdev->pdev_id);
  474. } else {
  475. dp_h2t_cfg_stats_msg_send(pdev,
  476. DP_PPDU_STATS_CFG_BPR,
  477. pdev->pdev_id);
  478. }
  479. break;
  480. case 1:
  481. pdev->tx_sniffer_enable = 1;
  482. pdev->monitor_configured = false;
  483. if (!pdev->pktlog_ppdu_stats)
  484. dp_h2t_cfg_stats_msg_send(pdev,
  485. DP_PPDU_STATS_CFG_SNIFFER,
  486. pdev->pdev_id);
  487. break;
  488. case 2:
  489. case 4:
  490. if (pdev->monitor_vdev) {
  491. status = QDF_STATUS_E_RESOURCES;
  492. break;
  493. }
  494. #ifdef FEATURE_PERPKT_INFO
  495. pdev->mcopy_mode = val;
  496. pdev->tx_sniffer_enable = 0;
  497. pdev->monitor_configured = true;
  498. if (!wlan_cfg_is_delay_mon_replenish(pdev->soc->wlan_cfg_ctx))
  499. dp_vdev_set_monitor_mode_rings(pdev, true);
  500. /*
  501. * Setup the M copy mode filter.
  502. */
  503. dp_soc_config_full_mon_mode(pdev, DP_FULL_MON_ENABLE);
  504. dp_mon_filter_setup_mcopy_mode(pdev);
  505. status = dp_mon_filter_update(pdev);
  506. if (status != QDF_STATUS_SUCCESS) {
  507. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  508. FL("Failed to set M_copy mode filters"));
  509. dp_mon_filter_reset_mcopy_mode(pdev);
  510. dp_pdev_disable_mcopy_code(pdev);
  511. return status;
  512. }
  513. if (!pdev->pktlog_ppdu_stats)
  514. dp_h2t_cfg_stats_msg_send(pdev,
  515. DP_PPDU_STATS_CFG_SNIFFER,
  516. pdev->pdev_id);
  517. #endif /* FEATURE_PERPKT_INFO */
  518. break;
  519. default:
  520. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
  521. "Invalid value");
  522. break;
  523. }
  524. return status;
  525. }
  526. QDF_STATUS dp_mon_soc_cfg_init(struct dp_soc *soc)
  527. {
  528. int target_type;
  529. target_type = hal_get_target_type(soc->hal_soc);
  530. switch (target_type) {
  531. case TARGET_TYPE_QCA6290:
  532. case TARGET_TYPE_QCA6390:
  533. case TARGET_TYPE_QCA6490:
  534. case TARGET_TYPE_QCA6750:
  535. break;
  536. case TARGET_TYPE_QCA8074:
  537. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  538. MON_BUF_MIN_ENTRIES);
  539. break;
  540. case TARGET_TYPE_QCA8074V2:
  541. case TARGET_TYPE_QCA6018:
  542. case TARGET_TYPE_QCA9574:
  543. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  544. MON_BUF_MIN_ENTRIES);
  545. soc->hw_nac_monitor_support = 1;
  546. break;
  547. case TARGET_TYPE_QCN9000:
  548. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  549. MON_BUF_MIN_ENTRIES);
  550. soc->hw_nac_monitor_support = 1;
  551. if (cfg_get(soc->ctrl_psoc, CFG_DP_FULL_MON_MODE))
  552. dp_config_full_mon_mode((struct cdp_soc_t *)soc, 1);
  553. break;
  554. case TARGET_TYPE_QCA5018:
  555. case TARGET_TYPE_QCN6122:
  556. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  557. MON_BUF_MIN_ENTRIES);
  558. soc->hw_nac_monitor_support = 1;
  559. break;
  560. case TARGET_TYPE_QCN9224:
  561. wlan_cfg_set_mon_delayed_replenish_entries(soc->wlan_cfg_ctx,
  562. MON_BUF_MIN_ENTRIES);
  563. soc->hw_nac_monitor_support = 1;
  564. break;
  565. default:
  566. qdf_print("%s: Unknown tgt type %d\n", __func__, target_type);
  567. qdf_assert_always(0);
  568. break;
  569. }
  570. return QDF_STATUS_SUCCESS;
  571. }
  572. QDF_STATUS dp_mon_pdev_attach(struct dp_pdev *pdev)
  573. {
  574. struct dp_soc *soc;
  575. struct dp_mon_pdev *mon_pdev;
  576. if (!pdev) {
  577. mon_init_err("pdev is NULL");
  578. goto fail0;
  579. }
  580. soc = pdev->soc;
  581. mon_pdev = (struct dp_mon_pdev *)qdf_mem_malloc(sizeof(*mon_pdev));
  582. if (!mon_pdev) {
  583. mon_init_err("%pK: MONITOR pdev allocation failed", pdev);
  584. goto fail0;
  585. }
  586. if (dp_mon_rings_alloc(soc, pdev)) {
  587. mon_init_err("%pK: MONITOR rings setup failed", pdev);
  588. goto fail1;
  589. }
  590. /* Rx monitor mode specific init */
  591. if (dp_rx_pdev_mon_desc_pool_alloc(pdev)) {
  592. mon_init_err("%pK: dp_rx_pdev_mon_attach failed", pdev);
  593. goto fail2;
  594. }
  595. pdev->monitor_pdev = mon_pdev;
  596. return QDF_STATUS_SUCCESS;
  597. fail2:
  598. dp_mon_rings_free(pdev);
  599. fail1:
  600. pdev->monitor_pdev = NULL;
  601. qdf_mem_free(mon_pdev);
  602. fail0:
  603. return QDF_STATUS_E_NOMEM;
  604. }
  605. QDF_STATUS dp_mon_pdev_detach(struct dp_pdev *pdev)
  606. {
  607. struct dp_mon_pdev *mon_pdev;
  608. if (!pdev) {
  609. mon_init_err("pdev is NULL");
  610. return QDF_STATUS_E_FAILURE;
  611. }
  612. mon_pdev = pdev->monitor_pdev;
  613. dp_rx_pdev_mon_desc_pool_free(pdev);
  614. dp_mon_rings_free(pdev);
  615. pdev->monitor_pdev = NULL;
  616. qdf_mem_free(mon_pdev);
  617. return QDF_STATUS_SUCCESS;
  618. }
  619. QDF_STATUS dp_mon_pdev_init(struct dp_pdev *pdev)
  620. {
  621. struct dp_soc *soc;
  622. if (!pdev) {
  623. mon_init_err("pdev is NULL");
  624. return QDF_STATUS_E_FAILURE;
  625. }
  626. soc = pdev->soc;
  627. pdev->filter = dp_mon_filter_alloc(pdev);
  628. if (!pdev->filter) {
  629. mon_init_err("%pK: Memory allocation failed for monitor filter",
  630. pdev);
  631. return QDF_STATUS_E_NOMEM;
  632. }
  633. qdf_spinlock_create(&pdev->ppdu_stats_lock);
  634. qdf_spinlock_create(&pdev->neighbour_peer_mutex);
  635. pdev->monitor_configured = false;
  636. pdev->mon_chan_band = REG_BAND_UNKNOWN;
  637. /* Monitor filter init */
  638. pdev->mon_filter_mode = MON_FILTER_ALL;
  639. TAILQ_INIT(&pdev->neighbour_peers_list);
  640. pdev->neighbour_peers_added = false;
  641. pdev->monitor_configured = false;
  642. /* Monitor filter init */
  643. pdev->mon_filter_mode = MON_FILTER_ALL;
  644. pdev->fp_mgmt_filter = FILTER_MGMT_ALL;
  645. pdev->fp_ctrl_filter = FILTER_CTRL_ALL;
  646. pdev->fp_data_filter = FILTER_DATA_ALL;
  647. pdev->mo_mgmt_filter = FILTER_MGMT_ALL;
  648. pdev->mo_ctrl_filter = FILTER_CTRL_ALL;
  649. pdev->mo_data_filter = FILTER_DATA_ALL;
  650. if (dp_htt_ppdu_stats_attach(pdev) != QDF_STATUS_SUCCESS)
  651. goto fail0;
  652. if (dp_mon_rings_init(soc, pdev)) {
  653. mon_init_err("%pK: MONITOR rings setup failed", pdev);
  654. goto fail1;
  655. }
  656. /* initialize sw monitor rx descriptors */
  657. dp_rx_pdev_mon_desc_pool_init(pdev);
  658. /* allocate buffers and replenish the monitor RxDMA ring */
  659. dp_rx_pdev_mon_buffers_alloc(pdev);
  660. dp_tx_ppdu_stats_attach(pdev);
  661. return QDF_STATUS_SUCCESS;
  662. fail1:
  663. dp_htt_ppdu_stats_detach(pdev);
  664. fail0:
  665. qdf_spinlock_destroy(&pdev->neighbour_peer_mutex);
  666. qdf_spinlock_destroy(&pdev->ppdu_stats_lock);
  667. dp_mon_filter_dealloc(pdev);
  668. return QDF_STATUS_E_FAILURE;
  669. }
  670. QDF_STATUS dp_mon_pdev_deinit(struct dp_pdev *pdev)
  671. {
  672. dp_tx_ppdu_stats_detach(pdev);
  673. dp_rx_pdev_mon_buffers_free(pdev);
  674. dp_rx_pdev_mon_desc_pool_deinit(pdev);
  675. dp_mon_rings_deinit(pdev);
  676. dp_htt_ppdu_stats_detach(pdev);
  677. qdf_spinlock_destroy(&pdev->ppdu_stats_lock);
  678. dp_neighbour_peers_detach(pdev);
  679. dp_pktlogmod_exit(pdev);
  680. if (pdev->filter)
  681. dp_mon_filter_dealloc(pdev);
  682. return QDF_STATUS_SUCCESS;
  683. }
  684. static struct dp_mon_ops monitor_ops = {
  685. .mon_soc_cfg_init = dp_mon_soc_cfg_init,
  686. .mon_pdev_attach = dp_mon_pdev_attach,
  687. .mon_pdev_detach = dp_mon_pdev_detach,
  688. .mon_pdev_init = dp_mon_pdev_init,
  689. .mon_pdev_deinit = dp_mon_pdev_deinit,
  690. .mon_config_debug_sniffer = dp_config_debug_sniffer,
  691. };
  692. static struct cdp_mon_ops dp_ops_mon = {
  693. .txrx_reset_monitor_mode = dp_reset_monitor_mode,
  694. /* Added support for HK advance filter */
  695. .txrx_set_advance_monitor_filter = dp_pdev_set_advance_monitor_filter,
  696. .txrx_deliver_tx_mgmt = dp_deliver_tx_mgmt,
  697. .config_full_mon_mode = dp_config_full_mon_mode,
  698. };
  699. static inline void dp_mon_ops_register(struct dp_mon_soc *mon_soc)
  700. {
  701. mon_soc->mon_ops = &monitor_ops;
  702. }
  703. static inline void dp_mon_cdp_ops_register(struct dp_soc *soc)
  704. {
  705. struct cdp_ops *ops = soc->cdp_soc.ops;
  706. if (!ops) {
  707. mon_init_err("cdp_ops is NULL");
  708. return;
  709. }
  710. ops->mon_ops = &dp_ops_mon;
  711. ops->cmn_drv_ops->txrx_set_monitor_mode = dp_vdev_set_monitor_mode;
  712. }
  713. QDF_STATUS dp_mon_soc_attach(struct dp_soc *soc)
  714. {
  715. struct dp_mon_soc *mon_soc;
  716. if (!soc) {
  717. mon_init_err("dp_soc is NULL");
  718. return QDF_STATUS_E_FAILURE;
  719. }
  720. mon_soc = (struct dp_mon_soc *)qdf_mem_malloc(sizeof(*mon_soc));
  721. if (!mon_soc) {
  722. mon_init_err("%pK: mem allocation failed", soc);
  723. return QDF_STATUS_E_NOMEM;
  724. }
  725. /* register monitor ops */
  726. dp_mon_ops_register(mon_soc);
  727. soc->monitor_soc = mon_soc;
  728. dp_mon_cdp_ops_register(soc);
  729. return QDF_STATUS_SUCCESS;
  730. }
  731. QDF_STATUS dp_mon_soc_detach(struct dp_soc *soc)
  732. {
  733. struct dp_mon_soc *mon_soc;
  734. if (!soc) {
  735. mon_init_err("dp_soc is NULL");
  736. return QDF_STATUS_E_FAILURE;
  737. }
  738. mon_soc = soc->monitor_soc;
  739. soc->monitor_soc = NULL;
  740. qdf_mem_free(mon_soc);
  741. return QDF_STATUS_SUCCESS;
  742. }