pktlog_ac.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. /*
  2. * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /*
  19. *
  20. * Permission to use, copy, modify, and/or distribute this software for any
  21. * purpose with or without fee is hereby granted, provided that the above
  22. * copyright notice and this permission notice appear in all copies.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  25. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  28. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  29. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  30. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  31. */
  32. #ifndef REMOVE_PKT_LOG
  33. #include "qdf_mem.h"
  34. #include "athdefs.h"
  35. #include "pktlog_ac_i.h"
  36. #include "cds_api.h"
  37. #include "wma_types.h"
  38. #include "htc.h"
  39. #include <cdp_txrx_cmn_struct.h>
  40. #include <cdp_txrx_ctrl.h>
  41. #ifdef PKTLOG_LEGACY
  42. #include "pktlog_wifi2.h"
  43. #else
  44. #include "pktlog_wifi3.h"
  45. #endif /* PKTLOG_LEGACY */
  46. wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
  47. wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
  48. wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
  49. wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
  50. wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
  51. wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
  52. wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
  53. wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
  54. wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
  55. struct ol_pl_arch_dep_funcs ol_pl_funcs = {
  56. .pktlog_init = pktlog_init,
  57. .pktlog_enable = pktlog_enable,
  58. .pktlog_setsize = pktlog_setsize,
  59. .pktlog_disable = pktlog_disable, /* valid for f/w disable */
  60. };
  61. struct pktlog_dev_t pl_dev = {
  62. .pl_funcs = &ol_pl_funcs,
  63. };
  64. void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
  65. struct hif_opaque_softc *scn)
  66. {
  67. pl_dev.scn = (ol_ath_generic_softc_handle) scn;
  68. *pl_handle = &pl_dev;
  69. }
  70. void pktlog_set_pdev_id(struct pktlog_dev_t *pl_dev, uint8_t pdev_id)
  71. {
  72. pl_dev->pdev_id = pdev_id;
  73. }
  74. void pktlog_set_callback_regtype(
  75. enum pktlog_callback_regtype callback_type)
  76. {
  77. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  78. if (!pl_dev) {
  79. qdf_print("Invalid pl_dev");
  80. return;
  81. }
  82. pl_dev->callback_type = callback_type;
  83. }
  84. struct pktlog_dev_t *get_pktlog_handle(void)
  85. {
  86. uint8_t pdev_id = WMI_PDEV_ID_SOC;
  87. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  88. return cdp_get_pldev(soc, pdev_id);
  89. }
  90. static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
  91. WMI_CMD_ID cmd_id, bool ini_triggered,
  92. uint8_t user_triggered)
  93. {
  94. struct scheduler_msg msg = { 0 };
  95. QDF_STATUS status;
  96. struct ath_pktlog_wmi_params *param;
  97. param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
  98. if (!param)
  99. return A_NO_MEMORY;
  100. param->cmd_id = cmd_id;
  101. param->pktlog_event = event_types;
  102. param->ini_triggered = ini_triggered;
  103. param->user_triggered = user_triggered;
  104. msg.type = WMA_PKTLOG_ENABLE_REQ;
  105. msg.bodyptr = param;
  106. msg.bodyval = 0;
  107. status = scheduler_post_message(QDF_MODULE_ID_WMA,
  108. QDF_MODULE_ID_WMA,
  109. QDF_MODULE_ID_WMA, &msg);
  110. if (status != QDF_STATUS_SUCCESS) {
  111. qdf_mem_free(param);
  112. return A_ERROR;
  113. }
  114. return A_OK;
  115. }
  116. static inline A_STATUS
  117. pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
  118. bool ini_triggered, uint8_t user_triggered)
  119. {
  120. uint32_t types = 0;
  121. if (log_state & ATH_PKTLOG_TX)
  122. types |= WMI_PKTLOG_EVENT_TX;
  123. if (log_state & ATH_PKTLOG_RX)
  124. types |= WMI_PKTLOG_EVENT_RX;
  125. if (log_state & ATH_PKTLOG_RCFIND)
  126. types |= WMI_PKTLOG_EVENT_RCF;
  127. if (log_state & ATH_PKTLOG_RCUPDATE)
  128. types |= WMI_PKTLOG_EVENT_RCU;
  129. if (log_state & ATH_PKTLOG_SW_EVENT)
  130. types |= WMI_PKTLOG_EVENT_SW;
  131. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  132. "%s: Pktlog events: %d", __func__, types);
  133. return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
  134. ini_triggered, user_triggered);
  135. }
  136. #ifdef PKTLOG_LEGACY
  137. /**
  138. * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
  139. * @pdev_id: pdev id
  140. * @log_state: Pktlog registration
  141. *
  142. * Return: zero on success, non-zero on failure
  143. */
  144. static inline A_STATUS
  145. wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
  146. {
  147. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  148. if (pdev_id < 0) {
  149. qdf_print("Invalid pdev in %s", __func__);
  150. return A_ERROR;
  151. }
  152. if (log_state & ATH_PKTLOG_TX) {
  153. if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_TX_SUBSCRIBER,
  154. WDI_EVENT_TX_STATUS)) {
  155. return A_ERROR;
  156. }
  157. }
  158. if (log_state & ATH_PKTLOG_RX) {
  159. if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_RX_SUBSCRIBER,
  160. WDI_EVENT_RX_DESC)) {
  161. return A_ERROR;
  162. }
  163. if (cdp_wdi_event_sub(soc, pdev_id,
  164. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  165. WDI_EVENT_RX_DESC_REMOTE)) {
  166. return A_ERROR;
  167. }
  168. }
  169. if (log_state & ATH_PKTLOG_RCFIND) {
  170. if (cdp_wdi_event_sub(soc, pdev_id,
  171. &PKTLOG_RCFIND_SUBSCRIBER,
  172. WDI_EVENT_RATE_FIND)) {
  173. return A_ERROR;
  174. }
  175. }
  176. if (log_state & ATH_PKTLOG_RCUPDATE) {
  177. if (cdp_wdi_event_sub(soc, pdev_id,
  178. &PKTLOG_RCUPDATE_SUBSCRIBER,
  179. WDI_EVENT_RATE_UPDATE)) {
  180. return A_ERROR;
  181. }
  182. }
  183. if (log_state & ATH_PKTLOG_SW_EVENT) {
  184. if (cdp_wdi_event_sub(soc, pdev_id,
  185. &PKTLOG_SW_EVENT_SUBSCRIBER,
  186. WDI_EVENT_SW_EVENT)) {
  187. return A_ERROR;
  188. }
  189. }
  190. return A_OK;
  191. }
  192. #else
  193. static inline A_STATUS
  194. wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
  195. {
  196. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  197. if (pdev_id < 0) {
  198. qdf_print("Invalid pdev in %s", __func__);
  199. return A_ERROR;
  200. }
  201. if ((log_state & ATH_PKTLOG_TX) ||
  202. (log_state & ATH_PKTLOG_RCFIND) ||
  203. (log_state & ATH_PKTLOG_RCUPDATE) ||
  204. (log_state & ATH_PKTLOG_SW_EVENT)) {
  205. if (cdp_wdi_event_sub(soc,
  206. pdev_id,
  207. &PKTLOG_OFFLOAD_SUBSCRIBER,
  208. WDI_EVENT_OFFLOAD_ALL)) {
  209. return A_ERROR;
  210. }
  211. }
  212. if (log_state & ATH_PKTLOG_RX) {
  213. if (cdp_wdi_event_sub(soc, pdev_id,
  214. &PKTLOG_RX_SUBSCRIBER,
  215. WDI_EVENT_RX_DESC)) {
  216. return A_ERROR;
  217. }
  218. }
  219. if (log_state & ATH_PKTLOG_SW_EVENT) {
  220. if (cdp_wdi_event_sub(soc, pdev_id,
  221. &PKTLOG_SW_EVENT_SUBSCRIBER,
  222. WDI_EVENT_SW_EVENT)) {
  223. return A_ERROR;
  224. }
  225. }
  226. if (log_state & ATH_PKTLOG_LITE_T2H) {
  227. if (cdp_wdi_event_sub(soc, pdev_id,
  228. &PKTLOG_LITE_T2H_SUBSCRIBER,
  229. WDI_EVENT_LITE_T2H)) {
  230. return A_ERROR;
  231. }
  232. }
  233. if (log_state & ATH_PKTLOG_LITE_RX) {
  234. if (cdp_wdi_event_sub(soc, pdev_id,
  235. &PKTLOG_LITE_RX_SUBSCRIBER,
  236. WDI_EVENT_LITE_RX)) {
  237. return A_ERROR;
  238. }
  239. }
  240. return A_OK;
  241. }
  242. #endif
  243. void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
  244. u_int16_t peer_id, uint32_t status)
  245. {
  246. switch (event) {
  247. case WDI_EVENT_OFFLOAD_ALL:
  248. {
  249. if (process_offload_pktlog_wifi3(pdev, log_data)) {
  250. qdf_print("Unable to process offload info");
  251. return;
  252. }
  253. break;
  254. }
  255. case WDI_EVENT_TX_STATUS:
  256. {
  257. /*
  258. * process TX message
  259. */
  260. if (process_tx_info(pdev, log_data)) {
  261. qdf_print("Unable to process TX info");
  262. return;
  263. }
  264. break;
  265. }
  266. case WDI_EVENT_RX_DESC:
  267. {
  268. /*
  269. * process RX message for local frames
  270. */
  271. if (process_rx_info(pdev, log_data)) {
  272. qdf_print("Unable to process RX info");
  273. return;
  274. }
  275. break;
  276. }
  277. case WDI_EVENT_RX_DESC_REMOTE:
  278. {
  279. /*
  280. * process RX message for remote frames
  281. */
  282. if (process_rx_info_remote(pdev, log_data)) {
  283. qdf_print("Unable to process RX info");
  284. return;
  285. }
  286. break;
  287. }
  288. case WDI_EVENT_RATE_FIND:
  289. {
  290. /*
  291. * process RATE_FIND message
  292. */
  293. if (process_rate_find(pdev, log_data)) {
  294. qdf_print("Unable to process RC_FIND info");
  295. return;
  296. }
  297. break;
  298. }
  299. case WDI_EVENT_RATE_UPDATE:
  300. {
  301. /*
  302. * process RATE_UPDATE message
  303. */
  304. if (process_rate_update(pdev, log_data)) {
  305. qdf_print("Unable to process RC_UPDATE");
  306. return;
  307. }
  308. break;
  309. }
  310. case WDI_EVENT_SW_EVENT:
  311. {
  312. /*
  313. * process SW EVENT message
  314. */
  315. if (process_sw_event(pdev, log_data)) {
  316. qdf_print("Unable to process SW_EVENT");
  317. return;
  318. }
  319. break;
  320. }
  321. default:
  322. break;
  323. }
  324. }
  325. void
  326. lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
  327. u_int16_t peer_id, uint32_t status)
  328. {
  329. switch (event) {
  330. case WDI_EVENT_RX_DESC:
  331. {
  332. if (process_rx_desc_remote_wifi3(context, log_data)) {
  333. qdf_print("Unable to process RX info");
  334. return;
  335. }
  336. break;
  337. }
  338. case WDI_EVENT_LITE_T2H:
  339. {
  340. if (process_pktlog_lite_wifi3(context, log_data,
  341. PKTLOG_TYPE_LITE_T2H)) {
  342. qdf_print("Unable to process lite_t2h");
  343. return;
  344. }
  345. break;
  346. }
  347. case WDI_EVENT_LITE_RX:
  348. {
  349. if (process_pktlog_lite_wifi3(context, log_data,
  350. PKTLOG_TYPE_LITE_RX)) {
  351. qdf_print("Unable to process lite_rx");
  352. return;
  353. }
  354. break;
  355. }
  356. default:
  357. break;
  358. }
  359. }
  360. #ifdef PKTLOG_LEGACY
  361. A_STATUS
  362. wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
  363. {
  364. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  365. /* TODO: WIN implementation to get soc */
  366. if (log_state & ATH_PKTLOG_TX) {
  367. if (cdp_wdi_event_unsub(soc, pdev_id,
  368. &PKTLOG_TX_SUBSCRIBER,
  369. WDI_EVENT_TX_STATUS)) {
  370. return A_ERROR;
  371. }
  372. }
  373. if (log_state & ATH_PKTLOG_RX) {
  374. if (cdp_wdi_event_unsub(soc, pdev_id,
  375. &PKTLOG_RX_SUBSCRIBER,
  376. WDI_EVENT_RX_DESC)) {
  377. return A_ERROR;
  378. }
  379. if (cdp_wdi_event_unsub(soc, pdev_id,
  380. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  381. WDI_EVENT_RX_DESC_REMOTE)) {
  382. return A_ERROR;
  383. }
  384. }
  385. if (log_state & ATH_PKTLOG_RCFIND) {
  386. if (cdp_wdi_event_unsub(soc, pdev_id,
  387. &PKTLOG_RCFIND_SUBSCRIBER,
  388. WDI_EVENT_RATE_FIND)) {
  389. return A_ERROR;
  390. }
  391. }
  392. if (log_state & ATH_PKTLOG_RCUPDATE) {
  393. if (cdp_wdi_event_unsub(soc, pdev_id,
  394. &PKTLOG_RCUPDATE_SUBSCRIBER,
  395. WDI_EVENT_RATE_UPDATE)) {
  396. return A_ERROR;
  397. }
  398. }
  399. if (log_state & ATH_PKTLOG_RCUPDATE) {
  400. if (cdp_wdi_event_unsub(soc, pdev_id,
  401. &PKTLOG_SW_EVENT_SUBSCRIBER,
  402. WDI_EVENT_SW_EVENT)) {
  403. return A_ERROR;
  404. }
  405. }
  406. return A_OK;
  407. }
  408. #else
  409. A_STATUS
  410. wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
  411. {
  412. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  413. if ((log_state & ATH_PKTLOG_TX) ||
  414. (log_state & ATH_PKTLOG_RCFIND) ||
  415. (log_state & ATH_PKTLOG_RCUPDATE) ||
  416. (log_state & ATH_PKTLOG_SW_EVENT)) {
  417. if (cdp_wdi_event_unsub(soc,
  418. pdev_id,
  419. &PKTLOG_OFFLOAD_SUBSCRIBER,
  420. WDI_EVENT_OFFLOAD_ALL)) {
  421. return A_ERROR;
  422. }
  423. }
  424. if (log_state & ATH_PKTLOG_RX) {
  425. if (cdp_wdi_event_unsub(soc, pdev_id,
  426. &PKTLOG_RX_SUBSCRIBER,
  427. WDI_EVENT_RX_DESC)) {
  428. return A_ERROR;
  429. }
  430. }
  431. if (log_state & ATH_PKTLOG_LITE_T2H) {
  432. if (cdp_wdi_event_unsub(soc, pdev_id,
  433. &PKTLOG_LITE_T2H_SUBSCRIBER,
  434. WDI_EVENT_LITE_T2H)) {
  435. return A_ERROR;
  436. }
  437. }
  438. if (log_state & ATH_PKTLOG_LITE_RX) {
  439. if (cdp_wdi_event_unsub(soc, pdev_id,
  440. &PKTLOG_LITE_RX_SUBSCRIBER,
  441. WDI_EVENT_LITE_RX)) {
  442. return A_ERROR;
  443. }
  444. }
  445. return A_OK;
  446. }
  447. #endif
  448. int pktlog_disable(struct hif_opaque_softc *scn)
  449. {
  450. struct pktlog_dev_t *pl_dev;
  451. struct ath_pktlog_info *pl_info;
  452. uint8_t save_pktlog_state;
  453. uint8_t pdev_id = WMI_PDEV_ID_SOC;
  454. pl_dev = get_pktlog_handle();
  455. if (!pl_dev) {
  456. qdf_print("Invalid pl_dev");
  457. return -EINVAL;
  458. }
  459. pl_info = pl_dev->pl_info;
  460. if (!pl_dev->pl_info) {
  461. qdf_print("Invalid pl_info");
  462. return -EINVAL;
  463. }
  464. if (pdev_id < 0) {
  465. qdf_print("Invalid pdev");
  466. return -EINVAL;
  467. }
  468. if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
  469. pl_info->curr_pkt_state ==
  470. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
  471. pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  472. pl_info->curr_pkt_state ==
  473. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  474. return -EBUSY;
  475. save_pktlog_state = pl_info->curr_pkt_state;
  476. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  477. if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
  478. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  479. qdf_print("Failed to disable pktlog in target");
  480. return -EINVAL;
  481. }
  482. if (pl_dev->is_pktlog_cb_subscribed &&
  483. wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
  484. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  485. qdf_print("Cannot unsubscribe pktlog from the WDI");
  486. return -EINVAL;
  487. }
  488. pl_dev->is_pktlog_cb_subscribed = false;
  489. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
  490. pl_info->curr_pkt_state =
  491. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  492. else
  493. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  494. return 0;
  495. }
  496. #ifdef PKTLOG_LEGACY
  497. /**
  498. * pktlog_callback_registration() - Register pktlog handlers based on
  499. * on callback type
  500. * @callback_type: pktlog full or lite registration
  501. *
  502. * Return: None
  503. */
  504. static void pktlog_callback_registration(uint8_t callback_type)
  505. {
  506. if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
  507. PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
  508. PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
  509. PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
  510. PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
  511. PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
  512. PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
  513. }
  514. }
  515. #else
  516. static void pktlog_callback_registration(uint8_t callback_type)
  517. {
  518. if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
  519. PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback;
  520. PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
  521. PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
  522. } else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
  523. PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
  524. PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
  525. }
  526. }
  527. #endif
  528. #define ONE_MEGABYTE (1024 * 1024)
  529. void pktlog_init(struct hif_opaque_softc *scn)
  530. {
  531. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  532. struct ath_pktlog_info *pl_info;
  533. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  534. uint32_t buff_size;
  535. if (!pl_dev || !pl_dev->pl_info) {
  536. qdf_print("pl_dev or pl_info is invalid");
  537. return;
  538. }
  539. pl_info = pl_dev->pl_info;
  540. OS_MEMZERO(pl_info, sizeof(*pl_info));
  541. PKTLOG_LOCK_INIT(pl_info);
  542. mutex_init(&pl_info->pktlog_mutex);
  543. buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
  544. pl_info->buf_size = (buff_size ? buff_size : ONE_MEGABYTE);
  545. pl_info->buf = NULL;
  546. pl_info->log_state = 0;
  547. pl_info->init_saved_state = 0;
  548. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  549. pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
  550. pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
  551. pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
  552. pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
  553. pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
  554. pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
  555. pl_info->pktlen = 0;
  556. pl_info->start_time_thruput = 0;
  557. pl_info->start_time_per = 0;
  558. pl_dev->vendor_cmd_send = false;
  559. pktlog_callback_registration(pl_dev->callback_type);
  560. }
  561. int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  562. bool ini_triggered, uint8_t user_triggered,
  563. uint32_t is_iwpriv_command)
  564. {
  565. struct pktlog_dev_t *pl_dev;
  566. struct ath_pktlog_info *pl_info;
  567. uint8_t pdev_id;
  568. int error;
  569. if (!scn) {
  570. qdf_print("%s: Invalid scn context", __func__);
  571. ASSERT(0);
  572. return -EINVAL;
  573. }
  574. pl_dev = get_pktlog_handle();
  575. if (!pl_dev) {
  576. qdf_print("%s: Invalid pktlog context", __func__);
  577. ASSERT(0);
  578. return -EINVAL;
  579. }
  580. pdev_id = WMI_PDEV_ID_SOC;
  581. if (pdev_id < 0) {
  582. qdf_print("%s: Invalid txrx context", __func__);
  583. ASSERT(0);
  584. return -EINVAL;
  585. }
  586. pl_info = pl_dev->pl_info;
  587. if (!pl_info) {
  588. qdf_print("%s: Invalid pl_info context", __func__);
  589. ASSERT(0);
  590. return -EINVAL;
  591. }
  592. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  593. return -EBUSY;
  594. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  595. /* is_iwpriv_command : 0 indicates its a vendor command
  596. * log_state: 0 indicates pktlog disable command
  597. * vendor_cmd_send flag; false means no vendor pktlog enable
  598. * command was sent previously
  599. */
  600. if (is_iwpriv_command == 0 && log_state == 0 &&
  601. pl_dev->vendor_cmd_send == false) {
  602. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  603. qdf_print("%s: pktlog operation not in progress", __func__);
  604. return 0;
  605. }
  606. if (!pl_dev->tgt_pktlog_alloced) {
  607. if (!pl_info->buf) {
  608. error = pktlog_alloc_buf(scn);
  609. if (error != 0) {
  610. pl_info->curr_pkt_state =
  611. PKTLOG_OPR_NOT_IN_PROGRESS;
  612. qdf_print("%s: pktlog buff alloc failed",
  613. __func__);
  614. return -ENOMEM;
  615. }
  616. if (!pl_info->buf) {
  617. pl_info->curr_pkt_state =
  618. PKTLOG_OPR_NOT_IN_PROGRESS;
  619. qdf_print("%s: pktlog buf alloc failed",
  620. __func__);
  621. ASSERT(0);
  622. return -ENOMEM;
  623. }
  624. }
  625. qdf_spin_lock_bh(&pl_info->log_lock);
  626. pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
  627. pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
  628. pl_info->buf->wr_offset = 0;
  629. pl_info->buf->rd_offset = -1;
  630. /* These below variables are used by per packet stats*/
  631. pl_info->buf->bytes_written = 0;
  632. pl_info->buf->msg_index = 1;
  633. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  634. qdf_spin_unlock_bh(&pl_info->log_lock);
  635. pl_info->start_time_thruput = os_get_timestamp();
  636. pl_info->start_time_per = pl_info->start_time_thruput;
  637. pl_dev->tgt_pktlog_alloced = true;
  638. }
  639. if (log_state != 0) {
  640. /* WDI subscribe */
  641. if (!pl_dev->is_pktlog_cb_subscribed) {
  642. error = wdi_pktlog_subscribe(pdev_id, log_state);
  643. if (error) {
  644. pl_info->curr_pkt_state =
  645. PKTLOG_OPR_NOT_IN_PROGRESS;
  646. qdf_print("Unable to subscribe to the WDI %s",
  647. __func__);
  648. return -EINVAL;
  649. }
  650. } else {
  651. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  652. qdf_print("Unable to subscribe %d to the WDI %s",
  653. log_state, __func__);
  654. return -EINVAL;
  655. }
  656. /* WMI command to enable pktlog on the firmware */
  657. if (pktlog_enable_tgt(scn, log_state, ini_triggered,
  658. user_triggered)) {
  659. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  660. qdf_print("Device cannot be enabled, %s", __func__);
  661. return -EINVAL;
  662. }
  663. pl_dev->is_pktlog_cb_subscribed = true;
  664. if (is_iwpriv_command == 0)
  665. pl_dev->vendor_cmd_send = true;
  666. } else {
  667. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  668. pl_dev->pl_funcs->pktlog_disable(scn);
  669. if (is_iwpriv_command == 0)
  670. pl_dev->vendor_cmd_send = false;
  671. }
  672. pl_info->log_state = log_state;
  673. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  674. return 0;
  675. }
  676. int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  677. bool ini_triggered, uint8_t user_triggered,
  678. uint32_t is_iwpriv_command)
  679. {
  680. struct pktlog_dev_t *pl_dev;
  681. struct ath_pktlog_info *pl_info;
  682. int err;
  683. pl_dev = get_pktlog_handle();
  684. if (!pl_dev) {
  685. qdf_print("%s: invalid pl_dev handle", __func__);
  686. return -EINVAL;
  687. }
  688. pl_info = pl_dev->pl_info;
  689. if (!pl_info) {
  690. qdf_print("%s: invalid pl_info handle", __func__);
  691. return -EINVAL;
  692. }
  693. mutex_lock(&pl_info->pktlog_mutex);
  694. err = __pktlog_enable(scn, log_state, ini_triggered,
  695. user_triggered, is_iwpriv_command);
  696. mutex_unlock(&pl_info->pktlog_mutex);
  697. return err;
  698. }
  699. static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  700. {
  701. struct pktlog_dev_t *pl_dev;
  702. struct ath_pktlog_info *pl_info;
  703. uint8_t pdev_id = WMI_PDEV_ID_SOC;
  704. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  705. uint32_t buff_size;
  706. uint32_t max_allowed_buff_size;
  707. pl_dev = get_pktlog_handle();
  708. if (!pl_dev) {
  709. qdf_print("%s: invalid pl_dev handle", __func__);
  710. return -EINVAL;
  711. }
  712. pl_info = pl_dev->pl_info;
  713. if (!pl_info) {
  714. qdf_print("%s: invalid pl_dev handle", __func__);
  715. return -EINVAL;
  716. }
  717. if (pdev_id < 0) {
  718. qdf_print("%s: invalid pdev", __func__);
  719. return -EINVAL;
  720. }
  721. if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
  722. qdf_print("%s: pktlog is not configured", __func__);
  723. return -EBUSY;
  724. }
  725. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  726. buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
  727. max_allowed_buff_size = (buff_size ? buff_size : ONE_MEGABYTE);
  728. if (size < ONE_MEGABYTE || size > max_allowed_buff_size) {
  729. qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
  730. __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
  731. (max_allowed_buff_size / ONE_MEGABYTE));
  732. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  733. qdf_print("%s: Invalid requested buff size", __func__);
  734. return -EINVAL;
  735. }
  736. if (size == pl_info->buf_size) {
  737. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  738. qdf_print("%s: Pktlog Buff Size is already of same size.",
  739. __func__);
  740. return 0;
  741. }
  742. if (pl_info->log_state) {
  743. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  744. qdf_print("%s: Logging should be disabled before changing"
  745. "buffer size.", __func__);
  746. return -EINVAL;
  747. }
  748. qdf_spin_lock_bh(&pl_info->log_lock);
  749. if (pl_info->buf) {
  750. if (pl_dev->is_pktlog_cb_subscribed &&
  751. wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
  752. pl_info->curr_pkt_state =
  753. PKTLOG_OPR_NOT_IN_PROGRESS;
  754. qdf_spin_unlock_bh(&pl_info->log_lock);
  755. qdf_print("Cannot unsubscribe pktlog from the WDI");
  756. return -EFAULT;
  757. }
  758. pktlog_release_buf(scn);
  759. pl_dev->is_pktlog_cb_subscribed = false;
  760. pl_dev->tgt_pktlog_alloced = false;
  761. }
  762. if (size != 0) {
  763. qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
  764. pl_info->buf_size = size;
  765. }
  766. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  767. qdf_spin_unlock_bh(&pl_info->log_lock);
  768. return 0;
  769. }
  770. int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  771. {
  772. struct pktlog_dev_t *pl_dev;
  773. struct ath_pktlog_info *pl_info;
  774. int status;
  775. pl_dev = get_pktlog_handle();
  776. if (!pl_dev) {
  777. qdf_print("%s: invalid pl_dev handle", __func__);
  778. return -EINVAL;
  779. }
  780. pl_info = pl_dev->pl_info;
  781. if (!pl_info) {
  782. qdf_print("%s: invalid pl_dev handle", __func__);
  783. return -EINVAL;
  784. }
  785. mutex_lock(&pl_info->pktlog_mutex);
  786. status = __pktlog_setsize(scn, size);
  787. mutex_unlock(&pl_info->pktlog_mutex);
  788. return status;
  789. }
  790. int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
  791. {
  792. struct pktlog_dev_t *pl_dev;
  793. struct ath_pktlog_info *pl_info;
  794. uint8_t save_pktlog_state;
  795. pl_dev = get_pktlog_handle();
  796. if (!pl_dev) {
  797. qdf_print("%s: invalid pl_dev handle", __func__);
  798. return -EINVAL;
  799. }
  800. pl_info = pl_dev->pl_info;
  801. if (!pl_info) {
  802. qdf_print("%s: invalid pl_dev handle", __func__);
  803. return -EINVAL;
  804. }
  805. if (!clear_buff)
  806. return -EINVAL;
  807. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  808. pl_info->curr_pkt_state ==
  809. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  810. return -EBUSY;
  811. save_pktlog_state = pl_info->curr_pkt_state;
  812. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  813. if (pl_info->log_state) {
  814. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  815. qdf_print("%s: Logging should be disabled before clearing "
  816. "pktlog buffer.", __func__);
  817. return -EINVAL;
  818. }
  819. if (pl_info->buf) {
  820. if (pl_info->buf_size > 0) {
  821. qdf_debug("pktlog buffer is cleared");
  822. memset(pl_info->buf, 0, pl_info->buf_size);
  823. pl_dev->is_pktlog_cb_subscribed = false;
  824. pl_dev->tgt_pktlog_alloced = false;
  825. pl_info->buf->rd_offset = -1;
  826. } else {
  827. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  828. qdf_print("%s: pktlog buffer size is not proper. "
  829. "Existing Buf size %d", __func__,
  830. pl_info->buf_size);
  831. return -EFAULT;
  832. }
  833. } else {
  834. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  835. qdf_print("%s: pktlog buff is NULL", __func__);
  836. return -EFAULT;
  837. }
  838. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
  839. pl_info->curr_pkt_state =
  840. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
  841. else
  842. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  843. return 0;
  844. }
  845. void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *buff, uint32_t len)
  846. {
  847. uint32_t *pl_hdr;
  848. uint32_t log_type;
  849. struct ol_fw_data pl_fw_data;
  850. if (pdev_id == OL_TXRX_INVALID_PDEV_ID) {
  851. qdf_print("%s: txrx pdev_id is invalid", __func__);
  852. return;
  853. }
  854. pl_hdr = buff;
  855. pl_fw_data.data = pl_hdr;
  856. pl_fw_data.len = len;
  857. log_type =
  858. (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
  859. ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
  860. if ((log_type == PKTLOG_TYPE_TX_CTRL)
  861. || (log_type == PKTLOG_TYPE_TX_STAT)
  862. || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
  863. || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
  864. || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
  865. wdi_event_handler(WDI_EVENT_TX_STATUS,
  866. pdev_id, &pl_fw_data);
  867. else if (log_type == PKTLOG_TYPE_RC_FIND)
  868. wdi_event_handler(WDI_EVENT_RATE_FIND,
  869. pdev_id, &pl_fw_data);
  870. else if (log_type == PKTLOG_TYPE_RC_UPDATE)
  871. wdi_event_handler(WDI_EVENT_RATE_UPDATE,
  872. pdev_id, &pl_fw_data);
  873. else if (log_type == PKTLOG_TYPE_RX_STAT)
  874. wdi_event_handler(WDI_EVENT_RX_DESC,
  875. pdev_id, &pl_fw_data);
  876. else if (log_type == PKTLOG_TYPE_SW_EVENT)
  877. wdi_event_handler(WDI_EVENT_SW_EVENT,
  878. pdev_id, &pl_fw_data);
  879. }
  880. #if defined(QCA_WIFI_3_0_ADRASTEA)
  881. static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
  882. {
  883. int rc = 0; /* sane */
  884. if ((!nbuf) ||
  885. (nbuf->data < nbuf->head) ||
  886. ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
  887. rc = -EINVAL;
  888. return rc;
  889. }
  890. /**
  891. * pktlog_t2h_msg_handler() - Target to host message handler
  892. * @context: pdev context
  893. * @pkt: HTC packet
  894. *
  895. * Return: None
  896. */
  897. static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
  898. {
  899. struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
  900. qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
  901. uint32_t *msg_word;
  902. uint32_t msg_len;
  903. /* check for sanity of the packet, have seen corrupted pkts */
  904. if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
  905. qdf_print("%s: packet 0x%pK corrupted? Leaking...",
  906. __func__, pktlog_t2h_msg);
  907. /* do not free; may crash! */
  908. QDF_ASSERT(0);
  909. return;
  910. }
  911. /* check for successful message reception */
  912. if (pkt->Status != QDF_STATUS_SUCCESS) {
  913. if (pkt->Status != QDF_STATUS_E_CANCELED)
  914. pdev->htc_err_cnt++;
  915. qdf_nbuf_free(pktlog_t2h_msg);
  916. return;
  917. }
  918. /* confirm alignment */
  919. qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
  920. msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
  921. msg_len = qdf_nbuf_len(pktlog_t2h_msg);
  922. pktlog_process_fw_msg(pdev->pdev_id, msg_word, msg_len);
  923. qdf_nbuf_free(pktlog_t2h_msg);
  924. }
  925. /**
  926. * pktlog_tx_resume_handler() - resume callback
  927. * @context: pdev context
  928. *
  929. * Return: None
  930. */
  931. static void pktlog_tx_resume_handler(void *context)
  932. {
  933. qdf_print("%s: Not expected", __func__);
  934. qdf_assert(0);
  935. }
  936. /**
  937. * pktlog_h2t_send_complete() - send complete indication
  938. * @context: pdev context
  939. * @htc_pkt: HTC packet
  940. *
  941. * Return: None
  942. */
  943. static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  944. {
  945. qdf_print("%s: Not expected", __func__);
  946. qdf_assert(0);
  947. }
  948. /**
  949. * pktlog_h2t_full() - queue full indication
  950. * @context: pdev context
  951. * @pkt: HTC packet
  952. *
  953. * Return: HTC action
  954. */
  955. static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
  956. {
  957. return HTC_SEND_FULL_KEEP;
  958. }
  959. /**
  960. * pktlog_htc_connect_service() - create new endpoint for packetlog
  961. * @pdev - pktlog pdev
  962. *
  963. * Return: 0 for success/failure
  964. */
  965. static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
  966. {
  967. struct htc_service_connect_req connect;
  968. struct htc_service_connect_resp response;
  969. QDF_STATUS status;
  970. qdf_mem_zero(&connect, sizeof(connect));
  971. qdf_mem_zero(&response, sizeof(response));
  972. connect.pMetaData = NULL;
  973. connect.MetaDataLength = 0;
  974. connect.EpCallbacks.pContext = pdev;
  975. connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
  976. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  977. connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
  978. connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
  979. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  980. connect.EpCallbacks.EpRecvRefill = NULL;
  981. connect.EpCallbacks.RecvRefillWaterMark = 1;
  982. /* N/A, fill is done by HIF */
  983. connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
  984. /*
  985. * Specify how deep to let a queue get before htc_send_pkt will
  986. * call the EpSendFull function due to excessive send queue depth.
  987. */
  988. connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
  989. /* disable flow control for HTT data message service */
  990. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  991. /* connect to control service */
  992. connect.service_id = PACKET_LOG_SVC;
  993. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  994. if (status != QDF_STATUS_SUCCESS) {
  995. pdev->mt_pktlog_enabled = false;
  996. return -EIO; /* failure */
  997. }
  998. pdev->htc_endpoint = response.Endpoint;
  999. pdev->mt_pktlog_enabled = true;
  1000. return 0; /* success */
  1001. }
  1002. /**
  1003. * pktlog_htc_attach() - attach pktlog HTC service
  1004. *
  1005. * Return: 0 for success/failure
  1006. */
  1007. int pktlog_htc_attach(void)
  1008. {
  1009. struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
  1010. void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
  1011. if ((!pl_pdev) || (!htc_pdev)) {
  1012. qdf_print("Invalid pl_dev or htc_pdev handle");
  1013. return -EINVAL;
  1014. }
  1015. pl_pdev->htc_pdev = htc_pdev;
  1016. return pktlog_htc_connect_service(pl_pdev);
  1017. }
  1018. #else
  1019. int pktlog_htc_attach(void)
  1020. {
  1021. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  1022. if (!pl_dev) {
  1023. qdf_print("Invalid pl_dev handle");
  1024. return -EINVAL;
  1025. }
  1026. pl_dev->mt_pktlog_enabled = false;
  1027. return 0;
  1028. }
  1029. #endif
  1030. #endif /* REMOVE_PKT_LOG */