pktlog_ac.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /*
  2. * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /*
  19. *
  20. * Permission to use, copy, modify, and/or distribute this software for any
  21. * purpose with or without fee is hereby granted, provided that the above
  22. * copyright notice and this permission notice appear in all copies.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  25. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  28. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  29. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  30. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  31. */
  32. #ifndef REMOVE_PKT_LOG
  33. #include "qdf_mem.h"
  34. #include "athdefs.h"
  35. #include "pktlog_ac_i.h"
  36. #include "cds_api.h"
  37. #include "wma_types.h"
  38. #include "htc.h"
  39. #include <cdp_txrx_cmn_struct.h>
  40. #include <cdp_txrx_ctrl.h>
  41. wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
  42. wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
  43. wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
  44. wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
  45. wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
  46. wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
  47. wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
  48. wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
  49. wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
  50. struct ol_pl_arch_dep_funcs ol_pl_funcs = {
  51. .pktlog_init = pktlog_init,
  52. .pktlog_enable = pktlog_enable,
  53. .pktlog_setsize = pktlog_setsize,
  54. .pktlog_disable = pktlog_disable, /* valid for f/w disable */
  55. };
  56. struct pktlog_dev_t pl_dev = {
  57. .pl_funcs = &ol_pl_funcs,
  58. };
  59. void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
  60. struct hif_opaque_softc *scn)
  61. {
  62. pl_dev.scn = (ol_ath_generic_softc_handle) scn;
  63. *pl_handle = &pl_dev;
  64. }
  65. void pktlog_set_callback_regtype(
  66. enum pktlog_callback_regtype callback_type)
  67. {
  68. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  69. if (!pl_dev) {
  70. qdf_print("Invalid pl_dev");
  71. return;
  72. }
  73. pl_dev->callback_type = callback_type;
  74. }
  75. struct pktlog_dev_t *get_pktlog_handle(void)
  76. {
  77. struct cdp_pdev *pdev_txrx_handle =
  78. cds_get_context(QDF_MODULE_ID_TXRX);
  79. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  80. return cdp_get_pldev(soc, pdev_txrx_handle);
  81. }
  82. /*
  83. * Get current txrx context
  84. */
  85. void *get_txrx_context(void)
  86. {
  87. return cds_get_context(QDF_MODULE_ID_TXRX);
  88. }
  89. static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
  90. WMI_CMD_ID cmd_id, bool ini_triggered,
  91. uint8_t user_triggered)
  92. {
  93. struct scheduler_msg msg = { 0 };
  94. QDF_STATUS status;
  95. struct ath_pktlog_wmi_params *param;
  96. param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
  97. if (!param)
  98. return A_NO_MEMORY;
  99. param->cmd_id = cmd_id;
  100. param->pktlog_event = event_types;
  101. param->ini_triggered = ini_triggered;
  102. param->user_triggered = user_triggered;
  103. msg.type = WMA_PKTLOG_ENABLE_REQ;
  104. msg.bodyptr = param;
  105. msg.bodyval = 0;
  106. status = scheduler_post_message(QDF_MODULE_ID_WMA,
  107. QDF_MODULE_ID_WMA,
  108. QDF_MODULE_ID_WMA, &msg);
  109. if (status != QDF_STATUS_SUCCESS) {
  110. qdf_mem_free(param);
  111. return A_ERROR;
  112. }
  113. return A_OK;
  114. }
  115. static inline A_STATUS
  116. pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
  117. bool ini_triggered, uint8_t user_triggered)
  118. {
  119. uint32_t types = 0;
  120. if (log_state & ATH_PKTLOG_TX)
  121. types |= WMI_PKTLOG_EVENT_TX;
  122. if (log_state & ATH_PKTLOG_RX)
  123. types |= WMI_PKTLOG_EVENT_RX;
  124. if (log_state & ATH_PKTLOG_RCFIND)
  125. types |= WMI_PKTLOG_EVENT_RCF;
  126. if (log_state & ATH_PKTLOG_RCUPDATE)
  127. types |= WMI_PKTLOG_EVENT_RCU;
  128. if (log_state & ATH_PKTLOG_SW_EVENT)
  129. types |= WMI_PKTLOG_EVENT_SW;
  130. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  131. "%s: Pktlog events: %d", __func__, types);
  132. return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
  133. ini_triggered, user_triggered);
  134. }
  135. #ifdef HELIUMPLUS
  136. /**
  137. * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
  138. * @cdp_pdev: abstract pdev handle
  139. * @log_state: Pktlog registration
  140. *
  141. * Return: zero on success, non-zero on failure
  142. */
  143. static inline A_STATUS
  144. wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
  145. {
  146. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  147. if (!cdp_pdev) {
  148. qdf_print("Invalid pdev in %s", __func__);
  149. return A_ERROR;
  150. }
  151. if (log_state & ATH_PKTLOG_TX) {
  152. if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
  153. WDI_EVENT_TX_STATUS)) {
  154. return A_ERROR;
  155. }
  156. }
  157. if (log_state & ATH_PKTLOG_RX) {
  158. if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
  159. WDI_EVENT_RX_DESC)) {
  160. return A_ERROR;
  161. }
  162. if (cdp_wdi_event_sub(soc, cdp_pdev,
  163. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  164. WDI_EVENT_RX_DESC_REMOTE)) {
  165. return A_ERROR;
  166. }
  167. }
  168. if (log_state & ATH_PKTLOG_RCFIND) {
  169. if (cdp_wdi_event_sub(soc, cdp_pdev,
  170. &PKTLOG_RCFIND_SUBSCRIBER,
  171. WDI_EVENT_RATE_FIND)) {
  172. return A_ERROR;
  173. }
  174. }
  175. if (log_state & ATH_PKTLOG_RCUPDATE) {
  176. if (cdp_wdi_event_sub(soc, cdp_pdev,
  177. &PKTLOG_RCUPDATE_SUBSCRIBER,
  178. WDI_EVENT_RATE_UPDATE)) {
  179. return A_ERROR;
  180. }
  181. }
  182. if (log_state & ATH_PKTLOG_SW_EVENT) {
  183. if (cdp_wdi_event_sub(soc, cdp_pdev,
  184. &PKTLOG_SW_EVENT_SUBSCRIBER,
  185. WDI_EVENT_SW_EVENT)) {
  186. return A_ERROR;
  187. }
  188. }
  189. return A_OK;
  190. }
  191. #else
  192. static inline A_STATUS
  193. wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
  194. {
  195. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  196. if (!cdp_pdev) {
  197. qdf_print("Invalid pdev in %s", __func__);
  198. return A_ERROR;
  199. }
  200. if ((log_state & ATH_PKTLOG_TX) ||
  201. (log_state & ATH_PKTLOG_RCFIND) ||
  202. (log_state & ATH_PKTLOG_RCUPDATE) ||
  203. (log_state & ATH_PKTLOG_SW_EVENT)) {
  204. if (cdp_wdi_event_sub(soc,
  205. cdp_pdev,
  206. &PKTLOG_OFFLOAD_SUBSCRIBER,
  207. WDI_EVENT_OFFLOAD_ALL)) {
  208. return A_ERROR;
  209. }
  210. }
  211. if (log_state & ATH_PKTLOG_RX) {
  212. if (cdp_wdi_event_sub(soc, cdp_pdev,
  213. &PKTLOG_RX_SUBSCRIBER,
  214. WDI_EVENT_RX_DESC)) {
  215. return A_ERROR;
  216. }
  217. }
  218. if (log_state & ATH_PKTLOG_SW_EVENT) {
  219. if (cdp_wdi_event_sub(soc, cdp_pdev,
  220. &PKTLOG_SW_EVENT_SUBSCRIBER,
  221. WDI_EVENT_SW_EVENT)) {
  222. return A_ERROR;
  223. }
  224. }
  225. if (log_state & ATH_PKTLOG_LITE_T2H) {
  226. if (cdp_wdi_event_sub(soc, cdp_pdev,
  227. &PKTLOG_LITE_T2H_SUBSCRIBER,
  228. WDI_EVENT_LITE_T2H)) {
  229. return A_ERROR;
  230. }
  231. }
  232. if (log_state & ATH_PKTLOG_LITE_RX) {
  233. if (cdp_wdi_event_sub(soc, cdp_pdev,
  234. &PKTLOG_LITE_RX_SUBSCRIBER,
  235. WDI_EVENT_LITE_RX)) {
  236. return A_ERROR;
  237. }
  238. }
  239. return A_OK;
  240. }
  241. #endif
  242. void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
  243. u_int16_t peer_id, uint32_t status)
  244. {
  245. switch (event) {
  246. case WDI_EVENT_OFFLOAD_ALL:
  247. {
  248. if (process_offload_pktlog(pdev, log_data)) {
  249. qdf_print("Unable to process offload info");
  250. return;
  251. }
  252. break;
  253. }
  254. case WDI_EVENT_TX_STATUS:
  255. {
  256. /*
  257. * process TX message
  258. */
  259. if (process_tx_info(pdev, log_data)) {
  260. qdf_print("Unable to process TX info");
  261. return;
  262. }
  263. break;
  264. }
  265. case WDI_EVENT_RX_DESC:
  266. {
  267. /*
  268. * process RX message for local frames
  269. */
  270. if (process_rx_info(pdev, log_data)) {
  271. qdf_print("Unable to process RX info");
  272. return;
  273. }
  274. break;
  275. }
  276. case WDI_EVENT_RX_DESC_REMOTE:
  277. {
  278. /*
  279. * process RX message for remote frames
  280. */
  281. if (process_rx_info_remote(pdev, log_data)) {
  282. qdf_print("Unable to process RX info");
  283. return;
  284. }
  285. break;
  286. }
  287. case WDI_EVENT_RATE_FIND:
  288. {
  289. /*
  290. * process RATE_FIND message
  291. */
  292. if (process_rate_find(pdev, log_data)) {
  293. qdf_print("Unable to process RC_FIND info");
  294. return;
  295. }
  296. break;
  297. }
  298. case WDI_EVENT_RATE_UPDATE:
  299. {
  300. /*
  301. * process RATE_UPDATE message
  302. */
  303. if (process_rate_update(pdev, log_data)) {
  304. qdf_print("Unable to process RC_UPDATE");
  305. return;
  306. }
  307. break;
  308. }
  309. case WDI_EVENT_SW_EVENT:
  310. {
  311. /*
  312. * process SW EVENT message
  313. */
  314. if (process_sw_event(pdev, log_data)) {
  315. qdf_print("Unable to process SW_EVENT");
  316. return;
  317. }
  318. break;
  319. }
  320. default:
  321. break;
  322. }
  323. }
  324. void
  325. lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
  326. u_int16_t peer_id, uint32_t status)
  327. {
  328. switch (event) {
  329. case WDI_EVENT_RX_DESC:
  330. {
  331. if (process_rx_desc_remote(context, log_data)) {
  332. qdf_print("Unable to process RX info");
  333. return;
  334. }
  335. break;
  336. }
  337. case WDI_EVENT_LITE_T2H:
  338. {
  339. if (process_pktlog_lite(context, log_data,
  340. PKTLOG_TYPE_LITE_T2H)) {
  341. qdf_print("Unable to process lite_t2h");
  342. return;
  343. }
  344. break;
  345. }
  346. case WDI_EVENT_LITE_RX:
  347. {
  348. if (process_pktlog_lite(context, log_data,
  349. PKTLOG_TYPE_LITE_RX)) {
  350. qdf_print("Unable to process lite_rx");
  351. return;
  352. }
  353. break;
  354. }
  355. default:
  356. break;
  357. }
  358. }
  359. #ifdef HELIUMPLUS
  360. /**
  361. * wdi_pktlog_unsubscribe() - Unsubscribe pktlog callbacks
  362. * @cdp_pdev: abstract pdev handle
  363. * @log_state: Pktlog registration
  364. *
  365. * Return: zero on success, non-zero on failure
  366. */
  367. A_STATUS
  368. wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
  369. {
  370. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  371. /* TODO: WIN implementation to get soc */
  372. if (log_state & ATH_PKTLOG_TX) {
  373. if (cdp_wdi_event_unsub(soc, pdev,
  374. &PKTLOG_TX_SUBSCRIBER,
  375. WDI_EVENT_TX_STATUS)) {
  376. return A_ERROR;
  377. }
  378. }
  379. if (log_state & ATH_PKTLOG_RX) {
  380. if (cdp_wdi_event_unsub(soc, pdev,
  381. &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
  382. return A_ERROR;
  383. }
  384. if (cdp_wdi_event_unsub(soc, pdev,
  385. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  386. WDI_EVENT_RX_DESC_REMOTE)) {
  387. return A_ERROR;
  388. }
  389. }
  390. if (log_state & ATH_PKTLOG_RCFIND) {
  391. if (cdp_wdi_event_unsub(soc, pdev,
  392. &PKTLOG_RCFIND_SUBSCRIBER,
  393. WDI_EVENT_RATE_FIND)) {
  394. return A_ERROR;
  395. }
  396. }
  397. if (log_state & ATH_PKTLOG_RCUPDATE) {
  398. if (cdp_wdi_event_unsub(soc, pdev,
  399. &PKTLOG_RCUPDATE_SUBSCRIBER,
  400. WDI_EVENT_RATE_UPDATE)) {
  401. return A_ERROR;
  402. }
  403. }
  404. if (log_state & ATH_PKTLOG_RCUPDATE) {
  405. if (cdp_wdi_event_unsub(soc, pdev,
  406. &PKTLOG_SW_EVENT_SUBSCRIBER,
  407. WDI_EVENT_SW_EVENT)) {
  408. return A_ERROR;
  409. }
  410. }
  411. return A_OK;
  412. }
  413. #else
  414. A_STATUS
  415. wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
  416. {
  417. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  418. if ((log_state & ATH_PKTLOG_TX) ||
  419. (log_state & ATH_PKTLOG_RCFIND) ||
  420. (log_state & ATH_PKTLOG_RCUPDATE) ||
  421. (log_state & ATH_PKTLOG_SW_EVENT)) {
  422. if (cdp_wdi_event_unsub(soc,
  423. pdev,
  424. &PKTLOG_OFFLOAD_SUBSCRIBER,
  425. WDI_EVENT_OFFLOAD_ALL)) {
  426. return A_ERROR;
  427. }
  428. }
  429. if (log_state & ATH_PKTLOG_RX) {
  430. if (cdp_wdi_event_unsub(soc, pdev,
  431. &PKTLOG_RX_SUBSCRIBER,
  432. WDI_EVENT_RX_DESC)) {
  433. return A_ERROR;
  434. }
  435. }
  436. if (log_state & ATH_PKTLOG_LITE_T2H) {
  437. if (cdp_wdi_event_unsub(soc, pdev,
  438. &PKTLOG_LITE_T2H_SUBSCRIBER,
  439. WDI_EVENT_LITE_T2H)) {
  440. return A_ERROR;
  441. }
  442. }
  443. if (log_state & ATH_PKTLOG_LITE_RX) {
  444. if (cdp_wdi_event_unsub(soc, pdev,
  445. &PKTLOG_LITE_RX_SUBSCRIBER,
  446. WDI_EVENT_LITE_RX)) {
  447. return A_ERROR;
  448. }
  449. }
  450. return A_OK;
  451. }
  452. #endif
  453. int pktlog_disable(struct hif_opaque_softc *scn)
  454. {
  455. struct pktlog_dev_t *pl_dev;
  456. struct ath_pktlog_info *pl_info;
  457. uint8_t save_pktlog_state;
  458. struct cdp_pdev *txrx_pdev = get_txrx_context();
  459. pl_dev = get_pktlog_handle();
  460. if (!pl_dev) {
  461. qdf_print("Invalid pl_dev");
  462. return -EINVAL;
  463. }
  464. pl_info = pl_dev->pl_info;
  465. if (!pl_dev->pl_info) {
  466. qdf_print("Invalid pl_info");
  467. return -EINVAL;
  468. }
  469. if (!txrx_pdev) {
  470. qdf_print("Invalid cdp_pdev");
  471. return -EINVAL;
  472. }
  473. if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
  474. pl_info->curr_pkt_state ==
  475. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
  476. pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  477. pl_info->curr_pkt_state ==
  478. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  479. return -EBUSY;
  480. save_pktlog_state = pl_info->curr_pkt_state;
  481. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  482. if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
  483. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  484. qdf_print("Failed to disable pktlog in target");
  485. return -EINVAL;
  486. }
  487. if (pl_dev->is_pktlog_cb_subscribed &&
  488. wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
  489. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  490. qdf_print("Cannot unsubscribe pktlog from the WDI");
  491. return -EINVAL;
  492. }
  493. pl_dev->is_pktlog_cb_subscribed = false;
  494. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
  495. pl_info->curr_pkt_state =
  496. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  497. else
  498. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  499. return 0;
  500. }
  501. #ifdef HELIUMPLUS
  502. /**
  503. * pktlog_callback_registration() - Register pktlog handlers based on
  504. * on callback type
  505. * @callback_type: pktlog full or lite registration
  506. *
  507. * Return: None
  508. */
  509. static void pktlog_callback_registration(uint8_t callback_type)
  510. {
  511. if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
  512. PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
  513. PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
  514. PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
  515. PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
  516. PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
  517. PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
  518. }
  519. }
  520. #else
  521. static void pktlog_callback_registration(uint8_t callback_type)
  522. {
  523. if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
  524. PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback;
  525. PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
  526. PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
  527. } else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
  528. PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
  529. PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
  530. }
  531. }
  532. #endif
  533. void pktlog_init(struct hif_opaque_softc *scn)
  534. {
  535. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  536. struct ath_pktlog_info *pl_info;
  537. if (!pl_dev || !pl_dev->pl_info) {
  538. qdf_print("pl_dev or pl_info is invalid");
  539. return;
  540. }
  541. pl_info = pl_dev->pl_info;
  542. OS_MEMZERO(pl_info, sizeof(*pl_info));
  543. PKTLOG_LOCK_INIT(pl_info);
  544. mutex_init(&pl_info->pktlog_mutex);
  545. pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
  546. pl_info->buf = NULL;
  547. pl_info->log_state = 0;
  548. pl_info->init_saved_state = 0;
  549. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  550. pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
  551. pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
  552. pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
  553. pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
  554. pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
  555. pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
  556. pl_info->pktlen = 0;
  557. pl_info->start_time_thruput = 0;
  558. pl_info->start_time_per = 0;
  559. pl_dev->vendor_cmd_send = false;
  560. pktlog_callback_registration(pl_dev->callback_type);
  561. }
  562. static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  563. bool ini_triggered, uint8_t user_triggered,
  564. uint32_t is_iwpriv_command)
  565. {
  566. struct pktlog_dev_t *pl_dev;
  567. struct ath_pktlog_info *pl_info;
  568. struct cdp_pdev *cdp_pdev;
  569. int error;
  570. if (!scn) {
  571. qdf_print("%s: Invalid scn context", __func__);
  572. ASSERT(0);
  573. return -EINVAL;
  574. }
  575. pl_dev = get_pktlog_handle();
  576. if (!pl_dev) {
  577. qdf_print("%s: Invalid pktlog context", __func__);
  578. ASSERT(0);
  579. return -EINVAL;
  580. }
  581. cdp_pdev = get_txrx_context();
  582. if (!cdp_pdev) {
  583. qdf_print("%s: Invalid txrx context", __func__);
  584. ASSERT(0);
  585. return -EINVAL;
  586. }
  587. pl_info = pl_dev->pl_info;
  588. if (!pl_info) {
  589. qdf_print("%s: Invalid pl_info context", __func__);
  590. ASSERT(0);
  591. return -EINVAL;
  592. }
  593. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  594. return -EBUSY;
  595. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  596. /* is_iwpriv_command : 0 indicates its a vendor command
  597. * log_state: 0 indicates pktlog disable command
  598. * vendor_cmd_send flag; false means no vendor pktlog enable
  599. * command was sent previously
  600. */
  601. if (is_iwpriv_command == 0 && log_state == 0 &&
  602. pl_dev->vendor_cmd_send == false) {
  603. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  604. qdf_print("%s: pktlog operation not in progress", __func__);
  605. return 0;
  606. }
  607. if (!pl_dev->tgt_pktlog_alloced) {
  608. if (!pl_info->buf) {
  609. error = pktlog_alloc_buf(scn);
  610. if (error != 0) {
  611. pl_info->curr_pkt_state =
  612. PKTLOG_OPR_NOT_IN_PROGRESS;
  613. qdf_print("%s: pktlog buff alloc failed",
  614. __func__);
  615. return -ENOMEM;
  616. }
  617. if (!pl_info->buf) {
  618. pl_info->curr_pkt_state =
  619. PKTLOG_OPR_NOT_IN_PROGRESS;
  620. qdf_print("%s: pktlog buf alloc failed",
  621. __func__);
  622. ASSERT(0);
  623. return -ENOMEM;
  624. }
  625. }
  626. qdf_spin_lock_bh(&pl_info->log_lock);
  627. pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
  628. pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
  629. pl_info->buf->wr_offset = 0;
  630. pl_info->buf->rd_offset = -1;
  631. /* These below variables are used by per packet stats*/
  632. pl_info->buf->bytes_written = 0;
  633. pl_info->buf->msg_index = 1;
  634. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  635. qdf_spin_unlock_bh(&pl_info->log_lock);
  636. pl_info->start_time_thruput = os_get_timestamp();
  637. pl_info->start_time_per = pl_info->start_time_thruput;
  638. pl_dev->tgt_pktlog_alloced = true;
  639. }
  640. if (log_state != 0) {
  641. /* WDI subscribe */
  642. if (!pl_dev->is_pktlog_cb_subscribed) {
  643. error = wdi_pktlog_subscribe(cdp_pdev, log_state);
  644. if (error) {
  645. pl_info->curr_pkt_state =
  646. PKTLOG_OPR_NOT_IN_PROGRESS;
  647. qdf_print("Unable to subscribe to the WDI %s",
  648. __func__);
  649. return -EINVAL;
  650. }
  651. } else {
  652. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  653. qdf_print("Unable to subscribe %d to the WDI %s",
  654. log_state, __func__);
  655. return -EINVAL;
  656. }
  657. /* WMI command to enable pktlog on the firmware */
  658. if (pktlog_enable_tgt(scn, log_state, ini_triggered,
  659. user_triggered)) {
  660. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  661. qdf_print("Device cannot be enabled, %s", __func__);
  662. return -EINVAL;
  663. }
  664. pl_dev->is_pktlog_cb_subscribed = true;
  665. if (is_iwpriv_command == 0)
  666. pl_dev->vendor_cmd_send = true;
  667. } else {
  668. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  669. pl_dev->pl_funcs->pktlog_disable(scn);
  670. if (is_iwpriv_command == 0)
  671. pl_dev->vendor_cmd_send = false;
  672. }
  673. pl_info->log_state = log_state;
  674. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  675. return 0;
  676. }
  677. int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  678. bool ini_triggered, uint8_t user_triggered,
  679. uint32_t is_iwpriv_command)
  680. {
  681. struct pktlog_dev_t *pl_dev;
  682. struct ath_pktlog_info *pl_info;
  683. int err;
  684. pl_dev = get_pktlog_handle();
  685. if (!pl_dev) {
  686. qdf_print("%s: invalid pl_dev handle", __func__);
  687. return -EINVAL;
  688. }
  689. pl_info = pl_dev->pl_info;
  690. if (!pl_info) {
  691. qdf_print("%s: invalid pl_info handle", __func__);
  692. return -EINVAL;
  693. }
  694. mutex_lock(&pl_info->pktlog_mutex);
  695. err = __pktlog_enable(scn, log_state, ini_triggered,
  696. user_triggered, is_iwpriv_command);
  697. mutex_unlock(&pl_info->pktlog_mutex);
  698. return err;
  699. }
  700. #define ONE_MEGABYTE (1024 * 1024)
  701. #define MAX_ALLOWED_PKTLOG_SIZE (64 * ONE_MEGABYTE)
  702. static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  703. {
  704. struct pktlog_dev_t *pl_dev;
  705. struct ath_pktlog_info *pl_info;
  706. struct cdp_pdev *pdev;
  707. pl_dev = get_pktlog_handle();
  708. if (!pl_dev) {
  709. qdf_print("%s: invalid pl_dev handle", __func__);
  710. return -EINVAL;
  711. }
  712. pl_info = pl_dev->pl_info;
  713. if (!pl_info) {
  714. qdf_print("%s: invalid pl_dev handle", __func__);
  715. return -EINVAL;
  716. }
  717. pdev = get_txrx_context();
  718. if (!pdev) {
  719. qdf_print("%s: invalid pdev handle", __func__);
  720. return -EINVAL;
  721. }
  722. if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
  723. qdf_print("%s: pktlog is not configured", __func__);
  724. return -EBUSY;
  725. }
  726. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  727. if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
  728. qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
  729. __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
  730. (MAX_ALLOWED_PKTLOG_SIZE / ONE_MEGABYTE));
  731. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  732. qdf_print("%s: Invalid requested buff size", __func__);
  733. return -EINVAL;
  734. }
  735. if (size == pl_info->buf_size) {
  736. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  737. qdf_print("%s: Pktlog Buff Size is already of same size.",
  738. __func__);
  739. return 0;
  740. }
  741. if (pl_info->log_state) {
  742. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  743. qdf_print("%s: Logging should be disabled before changing"
  744. "buffer size.", __func__);
  745. return -EINVAL;
  746. }
  747. qdf_spin_lock_bh(&pl_info->log_lock);
  748. if (pl_info->buf) {
  749. if (pl_dev->is_pktlog_cb_subscribed &&
  750. wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
  751. pl_info->curr_pkt_state =
  752. PKTLOG_OPR_NOT_IN_PROGRESS;
  753. qdf_spin_unlock_bh(&pl_info->log_lock);
  754. qdf_print("Cannot unsubscribe pktlog from the WDI");
  755. return -EFAULT;
  756. }
  757. pktlog_release_buf(scn);
  758. pl_dev->is_pktlog_cb_subscribed = false;
  759. pl_dev->tgt_pktlog_alloced = false;
  760. }
  761. if (size != 0) {
  762. qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
  763. pl_info->buf_size = size;
  764. }
  765. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  766. qdf_spin_unlock_bh(&pl_info->log_lock);
  767. return 0;
  768. }
  769. int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  770. {
  771. struct pktlog_dev_t *pl_dev;
  772. struct ath_pktlog_info *pl_info;
  773. int status;
  774. pl_dev = get_pktlog_handle();
  775. if (!pl_dev) {
  776. qdf_print("%s: invalid pl_dev handle", __func__);
  777. return -EINVAL;
  778. }
  779. pl_info = pl_dev->pl_info;
  780. if (!pl_info) {
  781. qdf_print("%s: invalid pl_dev handle", __func__);
  782. return -EINVAL;
  783. }
  784. mutex_lock(&pl_info->pktlog_mutex);
  785. status = __pktlog_setsize(scn, size);
  786. mutex_unlock(&pl_info->pktlog_mutex);
  787. return status;
  788. }
  789. int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
  790. {
  791. struct pktlog_dev_t *pl_dev;
  792. struct ath_pktlog_info *pl_info;
  793. uint8_t save_pktlog_state;
  794. pl_dev = get_pktlog_handle();
  795. if (!pl_dev) {
  796. qdf_print("%s: invalid pl_dev handle", __func__);
  797. return -EINVAL;
  798. }
  799. pl_info = pl_dev->pl_info;
  800. if (!pl_info) {
  801. qdf_print("%s: invalid pl_dev handle", __func__);
  802. return -EINVAL;
  803. }
  804. if (!clear_buff)
  805. return -EINVAL;
  806. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  807. pl_info->curr_pkt_state ==
  808. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  809. return -EBUSY;
  810. save_pktlog_state = pl_info->curr_pkt_state;
  811. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  812. if (pl_info->log_state) {
  813. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  814. qdf_print("%s: Logging should be disabled before clearing "
  815. "pktlog buffer.", __func__);
  816. return -EINVAL;
  817. }
  818. if (pl_info->buf) {
  819. if (pl_info->buf_size > 0) {
  820. qdf_debug("pktlog buffer is cleared");
  821. memset(pl_info->buf, 0, pl_info->buf_size);
  822. pl_dev->is_pktlog_cb_subscribed = false;
  823. pl_dev->tgt_pktlog_alloced = false;
  824. pl_info->buf->rd_offset = -1;
  825. } else {
  826. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  827. qdf_print("%s: pktlog buffer size is not proper. "
  828. "Existing Buf size %d", __func__,
  829. pl_info->buf_size);
  830. return -EFAULT;
  831. }
  832. } else {
  833. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  834. qdf_print("%s: pktlog buff is NULL", __func__);
  835. return -EFAULT;
  836. }
  837. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
  838. pl_info->curr_pkt_state =
  839. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
  840. else
  841. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  842. return 0;
  843. }
  844. /**
  845. * pktlog_process_fw_msg() - process packetlog message
  846. * @buff: buffer
  847. *
  848. * Return: None
  849. */
  850. void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
  851. {
  852. uint32_t *pl_hdr;
  853. uint32_t log_type;
  854. struct cdp_pdev *pdev = get_txrx_context();
  855. struct ol_fw_data pl_fw_data;
  856. if (!pdev) {
  857. qdf_print("%s: txrx_pdev is NULL", __func__);
  858. return;
  859. }
  860. pl_hdr = buff;
  861. pl_fw_data.data = pl_hdr;
  862. pl_fw_data.len = len;
  863. log_type =
  864. (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
  865. ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
  866. if ((log_type == PKTLOG_TYPE_TX_CTRL)
  867. || (log_type == PKTLOG_TYPE_TX_STAT)
  868. || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
  869. || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
  870. || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
  871. wdi_event_handler(WDI_EVENT_TX_STATUS,
  872. pdev, &pl_fw_data);
  873. else if (log_type == PKTLOG_TYPE_RC_FIND)
  874. wdi_event_handler(WDI_EVENT_RATE_FIND,
  875. pdev, &pl_fw_data);
  876. else if (log_type == PKTLOG_TYPE_RC_UPDATE)
  877. wdi_event_handler(WDI_EVENT_RATE_UPDATE,
  878. pdev, &pl_fw_data);
  879. else if (log_type == PKTLOG_TYPE_RX_STAT)
  880. wdi_event_handler(WDI_EVENT_RX_DESC,
  881. pdev, &pl_fw_data);
  882. else if (log_type == PKTLOG_TYPE_SW_EVENT)
  883. wdi_event_handler(WDI_EVENT_SW_EVENT,
  884. pdev, &pl_fw_data);
  885. }
  886. #if defined(QCA_WIFI_3_0_ADRASTEA)
  887. static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
  888. {
  889. int rc = 0; /* sane */
  890. if ((!nbuf) ||
  891. (nbuf->data < nbuf->head) ||
  892. ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
  893. rc = -EINVAL;
  894. return rc;
  895. }
  896. /**
  897. * pktlog_t2h_msg_handler() - Target to host message handler
  898. * @context: pdev context
  899. * @pkt: HTC packet
  900. *
  901. * Return: None
  902. */
  903. static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
  904. {
  905. struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
  906. qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
  907. uint32_t *msg_word;
  908. uint32_t msg_len;
  909. /* check for sanity of the packet, have seen corrupted pkts */
  910. if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
  911. qdf_print("%s: packet 0x%pK corrupted? Leaking...",
  912. __func__, pktlog_t2h_msg);
  913. /* do not free; may crash! */
  914. QDF_ASSERT(0);
  915. return;
  916. }
  917. /* check for successful message reception */
  918. if (pkt->Status != QDF_STATUS_SUCCESS) {
  919. if (pkt->Status != QDF_STATUS_E_CANCELED)
  920. pdev->htc_err_cnt++;
  921. qdf_nbuf_free(pktlog_t2h_msg);
  922. return;
  923. }
  924. /* confirm alignment */
  925. qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
  926. msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
  927. msg_len = qdf_nbuf_len(pktlog_t2h_msg);
  928. pktlog_process_fw_msg(msg_word, msg_len);
  929. qdf_nbuf_free(pktlog_t2h_msg);
  930. }
  931. /**
  932. * pktlog_tx_resume_handler() - resume callback
  933. * @context: pdev context
  934. *
  935. * Return: None
  936. */
  937. static void pktlog_tx_resume_handler(void *context)
  938. {
  939. qdf_print("%s: Not expected", __func__);
  940. qdf_assert(0);
  941. }
  942. /**
  943. * pktlog_h2t_send_complete() - send complete indication
  944. * @context: pdev context
  945. * @htc_pkt: HTC packet
  946. *
  947. * Return: None
  948. */
  949. static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  950. {
  951. qdf_print("%s: Not expected", __func__);
  952. qdf_assert(0);
  953. }
  954. /**
  955. * pktlog_h2t_full() - queue full indication
  956. * @context: pdev context
  957. * @pkt: HTC packet
  958. *
  959. * Return: HTC action
  960. */
  961. static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
  962. {
  963. return HTC_SEND_FULL_KEEP;
  964. }
  965. /**
  966. * pktlog_htc_connect_service() - create new endpoint for packetlog
  967. * @pdev - pktlog pdev
  968. *
  969. * Return: 0 for success/failure
  970. */
  971. static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
  972. {
  973. struct htc_service_connect_req connect;
  974. struct htc_service_connect_resp response;
  975. QDF_STATUS status;
  976. qdf_mem_zero(&connect, sizeof(connect));
  977. qdf_mem_zero(&response, sizeof(response));
  978. connect.pMetaData = NULL;
  979. connect.MetaDataLength = 0;
  980. connect.EpCallbacks.pContext = pdev;
  981. connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
  982. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  983. connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
  984. connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
  985. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  986. connect.EpCallbacks.EpRecvRefill = NULL;
  987. connect.EpCallbacks.RecvRefillWaterMark = 1;
  988. /* N/A, fill is done by HIF */
  989. connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
  990. /*
  991. * Specify how deep to let a queue get before htc_send_pkt will
  992. * call the EpSendFull function due to excessive send queue depth.
  993. */
  994. connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
  995. /* disable flow control for HTT data message service */
  996. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  997. /* connect to control service */
  998. connect.service_id = PACKET_LOG_SVC;
  999. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  1000. if (status != QDF_STATUS_SUCCESS) {
  1001. pdev->mt_pktlog_enabled = false;
  1002. return -EIO; /* failure */
  1003. }
  1004. pdev->htc_endpoint = response.Endpoint;
  1005. pdev->mt_pktlog_enabled = true;
  1006. return 0; /* success */
  1007. }
  1008. /**
  1009. * pktlog_htc_attach() - attach pktlog HTC service
  1010. *
  1011. * Return: 0 for success/failure
  1012. */
  1013. int pktlog_htc_attach(void)
  1014. {
  1015. struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
  1016. void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
  1017. if ((!pl_pdev) || (!htc_pdev)) {
  1018. qdf_print("Invalid pl_dev or htc_pdev handle");
  1019. return -EINVAL;
  1020. }
  1021. pl_pdev->htc_pdev = htc_pdev;
  1022. return pktlog_htc_connect_service(pl_pdev);
  1023. }
  1024. #else
  1025. int pktlog_htc_attach(void)
  1026. {
  1027. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  1028. if (!pl_dev) {
  1029. qdf_print("Invalid pl_dev handle");
  1030. return -EINVAL;
  1031. }
  1032. pl_dev->mt_pktlog_enabled = false;
  1033. return 0;
  1034. }
  1035. #endif
  1036. #endif /* REMOVE_PKT_LOG */