pktlog_ac.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /*
  19. *
  20. * Permission to use, copy, modify, and/or distribute this software for any
  21. * purpose with or without fee is hereby granted, provided that the above
  22. * copyright notice and this permission notice appear in all copies.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  25. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  27. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  28. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  29. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  30. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  31. */
  32. #ifndef REMOVE_PKT_LOG
  33. #include "qdf_mem.h"
  34. #include "athdefs.h"
  35. #include "pktlog_ac_i.h"
  36. #include "cds_api.h"
  37. #include "wma_types.h"
  38. #include "htc.h"
  39. #include <cdp_txrx_cmn_struct.h>
  40. #include <cdp_txrx_ctrl.h>
  41. wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
  42. wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
  43. wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
  44. wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
  45. wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
  46. wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
  47. wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
  48. wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
  49. wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
  50. struct ol_pl_arch_dep_funcs ol_pl_funcs = {
  51. .pktlog_init = pktlog_init,
  52. .pktlog_enable = pktlog_enable,
  53. .pktlog_setsize = pktlog_setsize,
  54. .pktlog_disable = pktlog_disable, /* valid for f/w disable */
  55. };
  56. struct pktlog_dev_t pl_dev = {
  57. .pl_funcs = &ol_pl_funcs,
  58. };
  59. void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
  60. struct hif_opaque_softc *scn)
  61. {
  62. pl_dev.scn = (ol_ath_generic_softc_handle) scn;
  63. *pl_handle = &pl_dev;
  64. }
  65. void pktlog_set_callback_regtype(
  66. enum pktlog_callback_regtype callback_type)
  67. {
  68. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  69. if (!pl_dev) {
  70. qdf_print("Invalid pl_dev");
  71. return;
  72. }
  73. pl_dev->callback_type = callback_type;
  74. }
  75. #ifdef CONFIG_MCL
  76. struct pktlog_dev_t *get_pktlog_handle(void)
  77. {
  78. struct cdp_pdev *pdev_txrx_handle =
  79. cds_get_context(QDF_MODULE_ID_TXRX);
  80. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  81. return cdp_get_pldev(soc, pdev_txrx_handle);
  82. }
  83. /*
  84. * Get current txrx context
  85. */
  86. void *get_txrx_context(void)
  87. {
  88. return cds_get_context(QDF_MODULE_ID_TXRX);
  89. }
  90. #else
  91. /* TODO: Need to use WIN implementation to return pktlog_dev handle */
  92. static inline struct pktlog_dev_t *get_pktlog_handle(void)
  93. {
  94. return NULL;
  95. }
  96. static struct pktlog_dev_t *get_txrx_context(void) { }
  97. #endif
  98. static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
  99. WMI_CMD_ID cmd_id, bool ini_triggered,
  100. uint8_t user_triggered)
  101. {
  102. struct scheduler_msg msg = { 0 };
  103. QDF_STATUS status;
  104. struct ath_pktlog_wmi_params *param;
  105. param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
  106. if (!param)
  107. return A_NO_MEMORY;
  108. param->cmd_id = cmd_id;
  109. param->pktlog_event = event_types;
  110. param->ini_triggered = ini_triggered;
  111. param->user_triggered = user_triggered;
  112. msg.type = WMA_PKTLOG_ENABLE_REQ;
  113. msg.bodyptr = param;
  114. msg.bodyval = 0;
  115. status = scheduler_post_message(QDF_MODULE_ID_WMA,
  116. QDF_MODULE_ID_WMA,
  117. QDF_MODULE_ID_WMA, &msg);
  118. if (status != QDF_STATUS_SUCCESS) {
  119. qdf_mem_free(param);
  120. return A_ERROR;
  121. }
  122. return A_OK;
  123. }
  124. static inline A_STATUS
  125. pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
  126. bool ini_triggered, uint8_t user_triggered)
  127. {
  128. uint32_t types = 0;
  129. if (log_state & ATH_PKTLOG_TX)
  130. types |= WMI_PKTLOG_EVENT_TX;
  131. if (log_state & ATH_PKTLOG_RX)
  132. types |= WMI_PKTLOG_EVENT_RX;
  133. if (log_state & ATH_PKTLOG_RCFIND)
  134. types |= WMI_PKTLOG_EVENT_RCF;
  135. if (log_state & ATH_PKTLOG_RCUPDATE)
  136. types |= WMI_PKTLOG_EVENT_RCU;
  137. if (log_state & ATH_PKTLOG_SW_EVENT)
  138. types |= WMI_PKTLOG_EVENT_SW;
  139. QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
  140. "%s: Pktlog events: %d", __func__, types);
  141. return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
  142. ini_triggered, user_triggered);
  143. }
  144. #ifdef HELIUMPLUS
  145. /**
  146. * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
  147. * @cdp_pdev: abstract pdev handle
  148. * @log_state: Pktlog registration
  149. *
  150. * Return: zero on success, non-zero on failure
  151. */
  152. static inline A_STATUS
  153. wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
  154. {
  155. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  156. if (!cdp_pdev) {
  157. qdf_print("Invalid pdev in %s", __func__);
  158. return A_ERROR;
  159. }
  160. if (log_state & ATH_PKTLOG_TX) {
  161. if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_TX_SUBSCRIBER,
  162. WDI_EVENT_TX_STATUS)) {
  163. return A_ERROR;
  164. }
  165. }
  166. if (log_state & ATH_PKTLOG_RX) {
  167. if (cdp_wdi_event_sub(soc, cdp_pdev, &PKTLOG_RX_SUBSCRIBER,
  168. WDI_EVENT_RX_DESC)) {
  169. return A_ERROR;
  170. }
  171. if (cdp_wdi_event_sub(soc, cdp_pdev,
  172. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  173. WDI_EVENT_RX_DESC_REMOTE)) {
  174. return A_ERROR;
  175. }
  176. }
  177. if (log_state & ATH_PKTLOG_RCFIND) {
  178. if (cdp_wdi_event_sub(soc, cdp_pdev,
  179. &PKTLOG_RCFIND_SUBSCRIBER,
  180. WDI_EVENT_RATE_FIND)) {
  181. return A_ERROR;
  182. }
  183. }
  184. if (log_state & ATH_PKTLOG_RCUPDATE) {
  185. if (cdp_wdi_event_sub(soc, cdp_pdev,
  186. &PKTLOG_RCUPDATE_SUBSCRIBER,
  187. WDI_EVENT_RATE_UPDATE)) {
  188. return A_ERROR;
  189. }
  190. }
  191. if (log_state & ATH_PKTLOG_SW_EVENT) {
  192. if (cdp_wdi_event_sub(soc, cdp_pdev,
  193. &PKTLOG_SW_EVENT_SUBSCRIBER,
  194. WDI_EVENT_SW_EVENT)) {
  195. return A_ERROR;
  196. }
  197. }
  198. return A_OK;
  199. }
  200. #else
  201. static inline A_STATUS
  202. wdi_pktlog_subscribe(struct cdp_pdev *cdp_pdev, int32_t log_state)
  203. {
  204. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  205. if (!cdp_pdev) {
  206. qdf_print("Invalid pdev in %s", __func__);
  207. return A_ERROR;
  208. }
  209. if ((log_state & ATH_PKTLOG_TX) ||
  210. (log_state & ATH_PKTLOG_RCFIND) ||
  211. (log_state & ATH_PKTLOG_RCUPDATE) ||
  212. (log_state & ATH_PKTLOG_RX)) {
  213. if (cdp_wdi_event_sub(soc,
  214. cdp_pdev,
  215. &PKTLOG_OFFLOAD_SUBSCRIBER,
  216. WDI_EVENT_OFFLOAD_ALL)) {
  217. return A_ERROR;
  218. }
  219. }
  220. if (log_state & ATH_PKTLOG_RX) {
  221. if (cdp_wdi_event_sub(soc, cdp_pdev,
  222. &PKTLOG_RX_SUBSCRIBER,
  223. WDI_EVENT_RX_DESC)) {
  224. return A_ERROR;
  225. }
  226. }
  227. if (log_state & ATH_PKTLOG_SW_EVENT) {
  228. if (cdp_wdi_event_sub(soc, cdp_pdev,
  229. &PKTLOG_SW_EVENT_SUBSCRIBER,
  230. WDI_EVENT_SW_EVENT)) {
  231. return A_ERROR;
  232. }
  233. }
  234. if (log_state & ATH_PKTLOG_LITE_T2H) {
  235. if (cdp_wdi_event_sub(soc, cdp_pdev,
  236. &PKTLOG_LITE_T2H_SUBSCRIBER,
  237. WDI_EVENT_LITE_T2H)) {
  238. return A_ERROR;
  239. }
  240. }
  241. if (log_state & ATH_PKTLOG_LITE_RX) {
  242. if (cdp_wdi_event_sub(soc, cdp_pdev,
  243. &PKTLOG_LITE_RX_SUBSCRIBER,
  244. WDI_EVENT_LITE_RX)) {
  245. return A_ERROR;
  246. }
  247. }
  248. return A_OK;
  249. }
  250. #endif
  251. void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
  252. u_int16_t peer_id, uint32_t status)
  253. {
  254. switch (event) {
  255. case WDI_EVENT_OFFLOAD_ALL:
  256. {
  257. if (process_offload_pktlog(pdev, log_data)) {
  258. qdf_print("Unable to process offload info");
  259. return;
  260. }
  261. break;
  262. }
  263. case WDI_EVENT_TX_STATUS:
  264. {
  265. /*
  266. * process TX message
  267. */
  268. if (process_tx_info(pdev, log_data)) {
  269. qdf_print("Unable to process TX info");
  270. return;
  271. }
  272. break;
  273. }
  274. case WDI_EVENT_RX_DESC:
  275. {
  276. /*
  277. * process RX message for local frames
  278. */
  279. if (process_rx_info(pdev, log_data)) {
  280. qdf_print("Unable to process RX info");
  281. return;
  282. }
  283. break;
  284. }
  285. case WDI_EVENT_RX_DESC_REMOTE:
  286. {
  287. /*
  288. * process RX message for remote frames
  289. */
  290. if (process_rx_info_remote(pdev, log_data)) {
  291. qdf_print("Unable to process RX info");
  292. return;
  293. }
  294. break;
  295. }
  296. case WDI_EVENT_RATE_FIND:
  297. {
  298. /*
  299. * process RATE_FIND message
  300. */
  301. if (process_rate_find(pdev, log_data)) {
  302. qdf_print("Unable to process RC_FIND info");
  303. return;
  304. }
  305. break;
  306. }
  307. case WDI_EVENT_RATE_UPDATE:
  308. {
  309. /*
  310. * process RATE_UPDATE message
  311. */
  312. if (process_rate_update(pdev, log_data)) {
  313. qdf_print("Unable to process RC_UPDATE");
  314. return;
  315. }
  316. break;
  317. }
  318. case WDI_EVENT_SW_EVENT:
  319. {
  320. /*
  321. * process SW EVENT message
  322. */
  323. if (process_sw_event(pdev, log_data)) {
  324. qdf_print("Unable to process SW_EVENT");
  325. return;
  326. }
  327. break;
  328. }
  329. default:
  330. break;
  331. }
  332. }
  333. void
  334. lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
  335. u_int16_t peer_id, uint32_t status)
  336. {
  337. switch (event) {
  338. case WDI_EVENT_RX_DESC:
  339. {
  340. if (process_rx_desc_remote(context, log_data)) {
  341. qdf_print("Unable to process RX info");
  342. return;
  343. }
  344. break;
  345. }
  346. case WDI_EVENT_LITE_T2H:
  347. {
  348. if (process_pktlog_lite(context, log_data,
  349. PKTLOG_TYPE_LITE_T2H)) {
  350. qdf_print("Unable to process lite_t2h");
  351. return;
  352. }
  353. break;
  354. }
  355. case WDI_EVENT_LITE_RX:
  356. {
  357. if (process_pktlog_lite(context, log_data,
  358. PKTLOG_TYPE_LITE_RX)) {
  359. qdf_print("Unable to process lite_rx");
  360. return;
  361. }
  362. break;
  363. }
  364. default:
  365. break;
  366. }
  367. }
  368. #ifdef HELIUMPLUS
  369. /**
  370. * wdi_pktlog_unsubscribe() - Unsubscribe pktlog callbacks
  371. * @cdp_pdev: abstract pdev handle
  372. * @log_state: Pktlog registration
  373. *
  374. * Return: zero on success, non-zero on failure
  375. */
  376. A_STATUS
  377. wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
  378. {
  379. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  380. /* TODO: WIN implementation to get soc */
  381. if (log_state & ATH_PKTLOG_TX) {
  382. if (cdp_wdi_event_unsub(soc, pdev,
  383. &PKTLOG_TX_SUBSCRIBER,
  384. WDI_EVENT_TX_STATUS)) {
  385. return A_ERROR;
  386. }
  387. }
  388. if (log_state & ATH_PKTLOG_RX) {
  389. if (cdp_wdi_event_unsub(soc, pdev,
  390. &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
  391. return A_ERROR;
  392. }
  393. if (cdp_wdi_event_unsub(soc, pdev,
  394. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  395. WDI_EVENT_RX_DESC_REMOTE)) {
  396. return A_ERROR;
  397. }
  398. }
  399. if (log_state & ATH_PKTLOG_RCFIND) {
  400. if (cdp_wdi_event_unsub(soc, pdev,
  401. &PKTLOG_RCFIND_SUBSCRIBER,
  402. WDI_EVENT_RATE_FIND)) {
  403. return A_ERROR;
  404. }
  405. }
  406. if (log_state & ATH_PKTLOG_RCUPDATE) {
  407. if (cdp_wdi_event_unsub(soc, pdev,
  408. &PKTLOG_RCUPDATE_SUBSCRIBER,
  409. WDI_EVENT_RATE_UPDATE)) {
  410. return A_ERROR;
  411. }
  412. }
  413. if (log_state & ATH_PKTLOG_RCUPDATE) {
  414. if (cdp_wdi_event_unsub(soc, pdev,
  415. &PKTLOG_SW_EVENT_SUBSCRIBER,
  416. WDI_EVENT_SW_EVENT)) {
  417. return A_ERROR;
  418. }
  419. }
  420. return A_OK;
  421. }
  422. #else
  423. A_STATUS
  424. wdi_pktlog_unsubscribe(struct cdp_pdev *pdev, uint32_t log_state)
  425. {
  426. void *soc = cds_get_context(QDF_MODULE_ID_SOC);
  427. if ((log_state & ATH_PKTLOG_TX) ||
  428. (log_state & ATH_PKTLOG_RCFIND) ||
  429. (log_state & ATH_PKTLOG_RCUPDATE) ||
  430. (log_state & ATH_PKTLOG_RX)) {
  431. if (cdp_wdi_event_unsub(soc,
  432. pdev,
  433. &PKTLOG_OFFLOAD_SUBSCRIBER,
  434. WDI_EVENT_OFFLOAD_ALL)) {
  435. return A_ERROR;
  436. }
  437. }
  438. if (log_state & ATH_PKTLOG_RX) {
  439. if (cdp_wdi_event_unsub(soc, pdev,
  440. &PKTLOG_RX_SUBSCRIBER,
  441. WDI_EVENT_RX_DESC)) {
  442. return A_ERROR;
  443. }
  444. }
  445. if (log_state & ATH_PKTLOG_LITE_T2H) {
  446. if (cdp_wdi_event_unsub(soc, pdev,
  447. &PKTLOG_LITE_T2H_SUBSCRIBER,
  448. WDI_EVENT_LITE_T2H)) {
  449. return A_ERROR;
  450. }
  451. }
  452. if (log_state & ATH_PKTLOG_LITE_RX) {
  453. if (cdp_wdi_event_unsub(soc, pdev,
  454. &PKTLOG_LITE_RX_SUBSCRIBER,
  455. WDI_EVENT_LITE_RX)) {
  456. return A_ERROR;
  457. }
  458. }
  459. return A_OK;
  460. }
  461. #endif
  462. int pktlog_disable(struct hif_opaque_softc *scn)
  463. {
  464. struct pktlog_dev_t *pl_dev;
  465. struct ath_pktlog_info *pl_info;
  466. uint8_t save_pktlog_state;
  467. struct cdp_pdev *txrx_pdev = get_txrx_context();
  468. pl_dev = get_pktlog_handle();
  469. if (!pl_dev) {
  470. qdf_print("Invalid pl_dev");
  471. return -EINVAL;
  472. }
  473. pl_info = pl_dev->pl_info;
  474. if (!pl_dev->pl_info) {
  475. qdf_print("Invalid pl_info");
  476. return -EINVAL;
  477. }
  478. if (!txrx_pdev) {
  479. qdf_print("Invalid cdp_pdev");
  480. return -EINVAL;
  481. }
  482. if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
  483. pl_info->curr_pkt_state ==
  484. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
  485. pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  486. pl_info->curr_pkt_state ==
  487. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  488. return -EBUSY;
  489. save_pktlog_state = pl_info->curr_pkt_state;
  490. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  491. if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
  492. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  493. qdf_print("Failed to disable pktlog in target");
  494. return -EINVAL;
  495. }
  496. if (pl_dev->is_pktlog_cb_subscribed &&
  497. wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
  498. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  499. qdf_print("Cannot unsubscribe pktlog from the WDI");
  500. return -EINVAL;
  501. }
  502. pl_dev->is_pktlog_cb_subscribed = false;
  503. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
  504. pl_info->curr_pkt_state =
  505. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  506. else
  507. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  508. return 0;
  509. }
  510. void pktlog_init(struct hif_opaque_softc *scn)
  511. {
  512. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  513. struct ath_pktlog_info *pl_info;
  514. if (pl_dev == NULL || pl_dev->pl_info == NULL) {
  515. qdf_print("pl_dev or pl_info is invalid");
  516. return;
  517. }
  518. pl_info = pl_dev->pl_info;
  519. OS_MEMZERO(pl_info, sizeof(*pl_info));
  520. PKTLOG_LOCK_INIT(pl_info);
  521. mutex_init(&pl_info->pktlog_mutex);
  522. pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
  523. pl_info->buf = NULL;
  524. pl_info->log_state = 0;
  525. pl_info->init_saved_state = 0;
  526. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  527. pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
  528. pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
  529. pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
  530. pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
  531. pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
  532. pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
  533. pl_info->pktlen = 0;
  534. pl_info->start_time_thruput = 0;
  535. pl_info->start_time_per = 0;
  536. pl_dev->vendor_cmd_send = false;
  537. if (pl_dev->callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
  538. PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
  539. PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
  540. PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
  541. PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
  542. PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
  543. PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
  544. } else if (pl_dev->callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
  545. PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
  546. PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
  547. PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
  548. }
  549. }
  550. static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  551. bool ini_triggered, uint8_t user_triggered,
  552. uint32_t is_iwpriv_command)
  553. {
  554. struct pktlog_dev_t *pl_dev;
  555. struct ath_pktlog_info *pl_info;
  556. struct cdp_pdev *cdp_pdev;
  557. int error;
  558. if (!scn) {
  559. qdf_print("%s: Invalid scn context", __func__);
  560. ASSERT(0);
  561. return -EINVAL;
  562. }
  563. pl_dev = get_pktlog_handle();
  564. if (!pl_dev) {
  565. qdf_print("%s: Invalid pktlog context", __func__);
  566. ASSERT(0);
  567. return -EINVAL;
  568. }
  569. cdp_pdev = get_txrx_context();
  570. if (!cdp_pdev) {
  571. qdf_print("%s: Invalid txrx context", __func__);
  572. ASSERT(0);
  573. return -EINVAL;
  574. }
  575. pl_info = pl_dev->pl_info;
  576. if (!pl_info) {
  577. qdf_print("%s: Invalid pl_info context", __func__);
  578. ASSERT(0);
  579. return -EINVAL;
  580. }
  581. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  582. return -EBUSY;
  583. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  584. /* is_iwpriv_command : 0 indicates its a vendor command
  585. * log_state: 0 indicates pktlog disable command
  586. * vendor_cmd_send flag; false means no vendor pktlog enable
  587. * command was sent previously
  588. */
  589. if (is_iwpriv_command == 0 && log_state == 0 &&
  590. pl_dev->vendor_cmd_send == false) {
  591. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  592. qdf_print("%s: pktlog operation not in progress", __func__);
  593. return 0;
  594. }
  595. if (!pl_dev->tgt_pktlog_alloced) {
  596. if (pl_info->buf == NULL) {
  597. error = pktlog_alloc_buf(scn);
  598. if (error != 0) {
  599. pl_info->curr_pkt_state =
  600. PKTLOG_OPR_NOT_IN_PROGRESS;
  601. qdf_print("%s: pktlog buff alloc failed",
  602. __func__);
  603. return -ENOMEM;
  604. }
  605. if (!pl_info->buf) {
  606. pl_info->curr_pkt_state =
  607. PKTLOG_OPR_NOT_IN_PROGRESS;
  608. qdf_print("%s: pktlog buf alloc failed",
  609. __func__);
  610. ASSERT(0);
  611. return -ENOMEM;
  612. }
  613. }
  614. qdf_spin_lock_bh(&pl_info->log_lock);
  615. pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
  616. pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
  617. pl_info->buf->wr_offset = 0;
  618. pl_info->buf->rd_offset = -1;
  619. /* These below variables are used by per packet stats*/
  620. pl_info->buf->bytes_written = 0;
  621. pl_info->buf->msg_index = 1;
  622. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  623. qdf_spin_unlock_bh(&pl_info->log_lock);
  624. pl_info->start_time_thruput = os_get_timestamp();
  625. pl_info->start_time_per = pl_info->start_time_thruput;
  626. pl_dev->tgt_pktlog_alloced = true;
  627. }
  628. if (log_state != 0) {
  629. /* WDI subscribe */
  630. if (!pl_dev->is_pktlog_cb_subscribed) {
  631. error = wdi_pktlog_subscribe(cdp_pdev, log_state);
  632. if (error) {
  633. pl_info->curr_pkt_state =
  634. PKTLOG_OPR_NOT_IN_PROGRESS;
  635. qdf_print("Unable to subscribe to the WDI %s",
  636. __func__);
  637. return -EINVAL;
  638. }
  639. } else {
  640. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  641. qdf_print("Unable to subscribe %d to the WDI %s",
  642. log_state, __func__);
  643. return -EINVAL;
  644. }
  645. /* WMI command to enable pktlog on the firmware */
  646. if (pktlog_enable_tgt(scn, log_state, ini_triggered,
  647. user_triggered)) {
  648. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  649. qdf_print("Device cannot be enabled, %s", __func__);
  650. return -EINVAL;
  651. }
  652. pl_dev->is_pktlog_cb_subscribed = true;
  653. if (is_iwpriv_command == 0)
  654. pl_dev->vendor_cmd_send = true;
  655. } else {
  656. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  657. pl_dev->pl_funcs->pktlog_disable(scn);
  658. if (is_iwpriv_command == 0)
  659. pl_dev->vendor_cmd_send = false;
  660. }
  661. pl_info->log_state = log_state;
  662. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  663. return 0;
  664. }
  665. int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  666. bool ini_triggered, uint8_t user_triggered,
  667. uint32_t is_iwpriv_command)
  668. {
  669. struct pktlog_dev_t *pl_dev;
  670. struct ath_pktlog_info *pl_info;
  671. int err;
  672. pl_dev = get_pktlog_handle();
  673. if (!pl_dev) {
  674. qdf_print("%s: invalid pl_dev handle", __func__);
  675. return -EINVAL;
  676. }
  677. pl_info = pl_dev->pl_info;
  678. if (!pl_info) {
  679. qdf_print("%s: invalid pl_info handle", __func__);
  680. return -EINVAL;
  681. }
  682. mutex_lock(&pl_info->pktlog_mutex);
  683. err = __pktlog_enable(scn, log_state, ini_triggered,
  684. user_triggered, is_iwpriv_command);
  685. mutex_unlock(&pl_info->pktlog_mutex);
  686. return err;
  687. }
  688. #define ONE_MEGABYTE (1024 * 1024)
  689. #define MAX_ALLOWED_PKTLOG_SIZE (16 * ONE_MEGABYTE)
  690. static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  691. {
  692. struct pktlog_dev_t *pl_dev;
  693. struct ath_pktlog_info *pl_info;
  694. struct cdp_pdev *pdev;
  695. pl_dev = get_pktlog_handle();
  696. if (!pl_dev) {
  697. qdf_print("%s: invalid pl_dev handle", __func__);
  698. return -EINVAL;
  699. }
  700. pl_info = pl_dev->pl_info;
  701. if (!pl_info) {
  702. qdf_print("%s: invalid pl_dev handle", __func__);
  703. return -EINVAL;
  704. }
  705. pdev = get_txrx_context();
  706. if (!pdev) {
  707. qdf_print("%s: invalid pdev handle", __func__);
  708. return -EINVAL;
  709. }
  710. if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
  711. qdf_print("%s: pktlog is not configured", __func__);
  712. return -EBUSY;
  713. }
  714. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  715. if (size < ONE_MEGABYTE || size > MAX_ALLOWED_PKTLOG_SIZE) {
  716. qdf_print("%s: Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB.",
  717. __func__, size, (ONE_MEGABYTE / ONE_MEGABYTE),
  718. (MAX_ALLOWED_PKTLOG_SIZE / ONE_MEGABYTE));
  719. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  720. qdf_print("%s: Invalid requested buff size", __func__);
  721. return -EINVAL;
  722. }
  723. if (size == pl_info->buf_size) {
  724. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  725. qdf_print("%s: Pktlog Buff Size is already of same size.",
  726. __func__);
  727. return 0;
  728. }
  729. if (pl_info->log_state) {
  730. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  731. qdf_print("%s: Logging should be disabled before changing"
  732. "buffer size.", __func__);
  733. return -EINVAL;
  734. }
  735. qdf_spin_lock_bh(&pl_info->log_lock);
  736. if (pl_info->buf != NULL) {
  737. if (pl_dev->is_pktlog_cb_subscribed &&
  738. wdi_pktlog_unsubscribe(pdev, pl_info->log_state)) {
  739. pl_info->curr_pkt_state =
  740. PKTLOG_OPR_NOT_IN_PROGRESS;
  741. qdf_spin_unlock_bh(&pl_info->log_lock);
  742. qdf_print("Cannot unsubscribe pktlog from the WDI");
  743. return -EFAULT;
  744. }
  745. pktlog_release_buf(scn);
  746. pl_dev->is_pktlog_cb_subscribed = false;
  747. pl_dev->tgt_pktlog_alloced = false;
  748. }
  749. if (size != 0) {
  750. qdf_print("%s: New Pktlog Buff Size is %d", __func__, size);
  751. pl_info->buf_size = size;
  752. }
  753. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  754. qdf_spin_unlock_bh(&pl_info->log_lock);
  755. return 0;
  756. }
  757. int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  758. {
  759. struct pktlog_dev_t *pl_dev;
  760. struct ath_pktlog_info *pl_info;
  761. int status;
  762. pl_dev = get_pktlog_handle();
  763. if (!pl_dev) {
  764. qdf_print("%s: invalid pl_dev handle", __func__);
  765. return -EINVAL;
  766. }
  767. pl_info = pl_dev->pl_info;
  768. if (!pl_info) {
  769. qdf_print("%s: invalid pl_dev handle", __func__);
  770. return -EINVAL;
  771. }
  772. mutex_lock(&pl_info->pktlog_mutex);
  773. status = __pktlog_setsize(scn, size);
  774. mutex_unlock(&pl_info->pktlog_mutex);
  775. return status;
  776. }
  777. int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
  778. {
  779. struct pktlog_dev_t *pl_dev;
  780. struct ath_pktlog_info *pl_info;
  781. uint8_t save_pktlog_state;
  782. pl_dev = get_pktlog_handle();
  783. if (!pl_dev) {
  784. qdf_print("%s: invalid pl_dev handle", __func__);
  785. return -EINVAL;
  786. }
  787. pl_info = pl_dev->pl_info;
  788. if (!pl_info) {
  789. qdf_print("%s: invalid pl_dev handle", __func__);
  790. return -EINVAL;
  791. }
  792. if (!clear_buff)
  793. return -EINVAL;
  794. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  795. pl_info->curr_pkt_state ==
  796. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  797. return -EBUSY;
  798. save_pktlog_state = pl_info->curr_pkt_state;
  799. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  800. if (pl_info->log_state) {
  801. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  802. qdf_print("%s: Logging should be disabled before clearing "
  803. "pktlog buffer.", __func__);
  804. return -EINVAL;
  805. }
  806. if (pl_info->buf != NULL) {
  807. if (pl_info->buf_size > 0) {
  808. qdf_print("%s: pktlog buffer is cleared.", __func__);
  809. memset(pl_info->buf, 0, pl_info->buf_size);
  810. pl_dev->is_pktlog_cb_subscribed = false;
  811. pl_dev->tgt_pktlog_alloced = false;
  812. pl_info->buf->rd_offset = -1;
  813. } else {
  814. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  815. qdf_print("%s: pktlog buffer size is not proper. "
  816. "Existing Buf size %d", __func__,
  817. pl_info->buf_size);
  818. return -EFAULT;
  819. }
  820. } else {
  821. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  822. qdf_print("%s: pktlog buff is NULL", __func__);
  823. return -EFAULT;
  824. }
  825. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
  826. pl_info->curr_pkt_state =
  827. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
  828. else
  829. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  830. return 0;
  831. }
  832. /**
  833. * pktlog_process_fw_msg() - process packetlog message
  834. * @buff: buffer
  835. *
  836. * Return: None
  837. */
  838. void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
  839. {
  840. uint32_t *pl_hdr;
  841. uint32_t log_type;
  842. struct cdp_pdev *pdev = get_txrx_context();
  843. struct ol_fw_data pl_fw_data;
  844. if (!pdev) {
  845. qdf_print("%s: txrx_pdev is NULL", __func__);
  846. return;
  847. }
  848. pl_hdr = buff;
  849. pl_fw_data.data = pl_hdr;
  850. pl_fw_data.len = len;
  851. log_type =
  852. (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
  853. ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
  854. if ((log_type == PKTLOG_TYPE_TX_CTRL)
  855. || (log_type == PKTLOG_TYPE_TX_STAT)
  856. || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
  857. || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
  858. || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
  859. wdi_event_handler(WDI_EVENT_TX_STATUS,
  860. pdev, &pl_fw_data);
  861. else if (log_type == PKTLOG_TYPE_RC_FIND)
  862. wdi_event_handler(WDI_EVENT_RATE_FIND,
  863. pdev, &pl_fw_data);
  864. else if (log_type == PKTLOG_TYPE_RC_UPDATE)
  865. wdi_event_handler(WDI_EVENT_RATE_UPDATE,
  866. pdev, &pl_fw_data);
  867. else if (log_type == PKTLOG_TYPE_RX_STAT)
  868. wdi_event_handler(WDI_EVENT_RX_DESC,
  869. pdev, &pl_fw_data);
  870. else if (log_type == PKTLOG_TYPE_SW_EVENT)
  871. wdi_event_handler(WDI_EVENT_SW_EVENT,
  872. pdev, &pl_fw_data);
  873. }
  874. #if defined(QCA_WIFI_3_0_ADRASTEA)
  875. static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
  876. {
  877. int rc = 0; /* sane */
  878. if ((!nbuf) ||
  879. (nbuf->data < nbuf->head) ||
  880. ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
  881. rc = -EINVAL;
  882. return rc;
  883. }
  884. /**
  885. * pktlog_t2h_msg_handler() - Target to host message handler
  886. * @context: pdev context
  887. * @pkt: HTC packet
  888. *
  889. * Return: None
  890. */
  891. static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
  892. {
  893. struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
  894. qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
  895. uint32_t *msg_word;
  896. uint32_t msg_len;
  897. /* check for sanity of the packet, have seen corrupted pkts */
  898. if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
  899. qdf_print("%s: packet 0x%pK corrupted? Leaking...",
  900. __func__, pktlog_t2h_msg);
  901. /* do not free; may crash! */
  902. QDF_ASSERT(0);
  903. return;
  904. }
  905. /* check for successful message reception */
  906. if (pkt->Status != QDF_STATUS_SUCCESS) {
  907. if (pkt->Status != QDF_STATUS_E_CANCELED)
  908. pdev->htc_err_cnt++;
  909. qdf_nbuf_free(pktlog_t2h_msg);
  910. return;
  911. }
  912. /* confirm alignment */
  913. qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
  914. msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
  915. msg_len = qdf_nbuf_len(pktlog_t2h_msg);
  916. pktlog_process_fw_msg(msg_word, msg_len);
  917. qdf_nbuf_free(pktlog_t2h_msg);
  918. }
  919. /**
  920. * pktlog_tx_resume_handler() - resume callback
  921. * @context: pdev context
  922. *
  923. * Return: None
  924. */
  925. static void pktlog_tx_resume_handler(void *context)
  926. {
  927. qdf_print("%s: Not expected", __func__);
  928. qdf_assert(0);
  929. }
  930. /**
  931. * pktlog_h2t_send_complete() - send complete indication
  932. * @context: pdev context
  933. * @htc_pkt: HTC packet
  934. *
  935. * Return: None
  936. */
  937. static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  938. {
  939. qdf_print("%s: Not expected", __func__);
  940. qdf_assert(0);
  941. }
  942. /**
  943. * pktlog_h2t_full() - queue full indication
  944. * @context: pdev context
  945. * @pkt: HTC packet
  946. *
  947. * Return: HTC action
  948. */
  949. static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
  950. {
  951. return HTC_SEND_FULL_KEEP;
  952. }
  953. /**
  954. * pktlog_htc_connect_service() - create new endpoint for packetlog
  955. * @pdev - pktlog pdev
  956. *
  957. * Return: 0 for success/failure
  958. */
  959. static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
  960. {
  961. struct htc_service_connect_req connect;
  962. struct htc_service_connect_resp response;
  963. QDF_STATUS status;
  964. qdf_mem_set(&connect, sizeof(connect), 0);
  965. qdf_mem_set(&response, sizeof(response), 0);
  966. connect.pMetaData = NULL;
  967. connect.MetaDataLength = 0;
  968. connect.EpCallbacks.pContext = pdev;
  969. connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
  970. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  971. connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
  972. connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
  973. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  974. connect.EpCallbacks.EpRecvRefill = NULL;
  975. connect.EpCallbacks.RecvRefillWaterMark = 1;
  976. /* N/A, fill is done by HIF */
  977. connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
  978. /*
  979. * Specify how deep to let a queue get before htc_send_pkt will
  980. * call the EpSendFull function due to excessive send queue depth.
  981. */
  982. connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
  983. /* disable flow control for HTT data message service */
  984. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  985. /* connect to control service */
  986. connect.service_id = PACKET_LOG_SVC;
  987. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  988. if (status != QDF_STATUS_SUCCESS) {
  989. pdev->mt_pktlog_enabled = false;
  990. return -EIO; /* failure */
  991. }
  992. pdev->htc_endpoint = response.Endpoint;
  993. pdev->mt_pktlog_enabled = true;
  994. return 0; /* success */
  995. }
  996. /**
  997. * pktlog_htc_attach() - attach pktlog HTC service
  998. *
  999. * Return: 0 for success/failure
  1000. */
  1001. int pktlog_htc_attach(void)
  1002. {
  1003. struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
  1004. void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
  1005. if ((!pl_pdev) || (!htc_pdev)) {
  1006. qdf_print("Invalid pl_dev or htc_pdev handle");
  1007. return -EINVAL;
  1008. }
  1009. pl_pdev->htc_pdev = htc_pdev;
  1010. return pktlog_htc_connect_service(pl_pdev);
  1011. }
  1012. #else
  1013. int pktlog_htc_attach(void)
  1014. {
  1015. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  1016. if (!pl_dev) {
  1017. qdf_print("Invalid pl_dev handle");
  1018. return -EINVAL;
  1019. }
  1020. pl_dev->mt_pktlog_enabled = false;
  1021. return 0;
  1022. }
  1023. #endif
  1024. #endif /* REMOVE_PKT_LOG */