pktlog_ac.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /*
  2. * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /*
  27. *
  28. * Permission to use, copy, modify, and/or distribute this software for any
  29. * purpose with or without fee is hereby granted, provided that the above
  30. * copyright notice and this permission notice appear in all copies.
  31. *
  32. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  33. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  34. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  35. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  36. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  37. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  38. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  39. */
  40. #ifndef REMOVE_PKT_LOG
  41. #include "qdf_mem.h"
  42. #include "athdefs.h"
  43. #include "pktlog_ac_i.h"
  44. #include "cds_api.h"
  45. #include "wma_types.h"
  46. #include "htc.h"
  47. wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
  48. wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
  49. wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
  50. wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
  51. wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
  52. wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
  53. struct ol_pl_arch_dep_funcs ol_pl_funcs = {
  54. .pktlog_init = pktlog_init,
  55. .pktlog_enable = pktlog_enable,
  56. .pktlog_setsize = pktlog_setsize,
  57. .pktlog_disable = pktlog_disable, /* valid for f/w disable */
  58. };
  59. struct ol_pktlog_dev_t ol_pl_dev = {
  60. .pl_funcs = &ol_pl_funcs,
  61. };
  62. void ol_pl_sethandle(ol_pktlog_dev_handle *pl_handle,
  63. struct hif_opaque_softc *scn)
  64. {
  65. ol_pl_dev.scn = (ol_ath_generic_softc_handle) scn;
  66. *pl_handle = &ol_pl_dev;
  67. }
  68. static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
  69. WMI_CMD_ID cmd_id, bool ini_triggered,
  70. uint8_t user_triggered)
  71. {
  72. struct scheduler_msg msg = { 0 };
  73. QDF_STATUS status;
  74. struct ath_pktlog_wmi_params *param;
  75. param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
  76. if (!param)
  77. return A_NO_MEMORY;
  78. param->cmd_id = cmd_id;
  79. param->pktlog_event = event_types;
  80. param->ini_triggered = ini_triggered;
  81. param->user_triggered = user_triggered;
  82. msg.type = WMA_PKTLOG_ENABLE_REQ;
  83. msg.bodyptr = param;
  84. msg.bodyval = 0;
  85. status = scheduler_post_msg(QDF_MODULE_ID_WMA, &msg);
  86. if (status != QDF_STATUS_SUCCESS) {
  87. qdf_mem_free(param);
  88. return A_ERROR;
  89. }
  90. return A_OK;
  91. }
  92. static inline A_STATUS
  93. pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
  94. bool ini_triggered, uint8_t user_triggered)
  95. {
  96. uint32_t types = 0;
  97. if (log_state & ATH_PKTLOG_TX)
  98. types |= WMI_PKTLOG_EVENT_TX;
  99. if (log_state & ATH_PKTLOG_RX)
  100. types |= WMI_PKTLOG_EVENT_RX;
  101. if (log_state & ATH_PKTLOG_RCFIND)
  102. types |= WMI_PKTLOG_EVENT_RCF;
  103. if (log_state & ATH_PKTLOG_RCUPDATE)
  104. types |= WMI_PKTLOG_EVENT_RCU;
  105. if (log_state & ATH_PKTLOG_SW_EVENT)
  106. types |= WMI_PKTLOG_EVENT_SW;
  107. return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
  108. ini_triggered, user_triggered);
  109. }
  110. static inline A_STATUS
  111. wdi_pktlog_subscribe(struct ol_txrx_pdev_t *txrx_pdev, int32_t log_state)
  112. {
  113. if (!txrx_pdev) {
  114. printk("Invalid pdev in %s\n", __func__);
  115. return A_ERROR;
  116. }
  117. if (log_state & ATH_PKTLOG_TX) {
  118. if (wdi_event_sub(txrx_pdev,
  119. &PKTLOG_TX_SUBSCRIBER, WDI_EVENT_TX_STATUS)) {
  120. return A_ERROR;
  121. }
  122. }
  123. if (log_state & ATH_PKTLOG_RX) {
  124. if (wdi_event_sub(txrx_pdev,
  125. &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
  126. return A_ERROR;
  127. }
  128. if (wdi_event_sub(txrx_pdev,
  129. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  130. WDI_EVENT_RX_DESC_REMOTE)) {
  131. return A_ERROR;
  132. }
  133. }
  134. if (log_state & ATH_PKTLOG_RCFIND) {
  135. if (wdi_event_sub(txrx_pdev,
  136. &PKTLOG_RCFIND_SUBSCRIBER,
  137. WDI_EVENT_RATE_FIND)) {
  138. return A_ERROR;
  139. }
  140. }
  141. if (log_state & ATH_PKTLOG_RCUPDATE) {
  142. if (wdi_event_sub(txrx_pdev,
  143. &PKTLOG_RCUPDATE_SUBSCRIBER,
  144. WDI_EVENT_RATE_UPDATE)) {
  145. return A_ERROR;
  146. }
  147. }
  148. if (log_state & ATH_PKTLOG_SW_EVENT) {
  149. if (wdi_event_sub(txrx_pdev,
  150. &PKTLOG_SW_EVENT_SUBSCRIBER,
  151. WDI_EVENT_SW_EVENT)) {
  152. return A_ERROR;
  153. }
  154. }
  155. return A_OK;
  156. }
  157. void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data)
  158. {
  159. switch (event) {
  160. case WDI_EVENT_TX_STATUS:
  161. {
  162. /*
  163. * process TX message
  164. */
  165. if (process_tx_info(pdev, log_data)) {
  166. printk("Unable to process TX info\n");
  167. return;
  168. }
  169. break;
  170. }
  171. case WDI_EVENT_RX_DESC:
  172. {
  173. /*
  174. * process RX message for local frames
  175. */
  176. if (process_rx_info(pdev, log_data)) {
  177. printk("Unable to process RX info\n");
  178. return;
  179. }
  180. break;
  181. }
  182. case WDI_EVENT_RX_DESC_REMOTE:
  183. {
  184. /*
  185. * process RX message for remote frames
  186. */
  187. if (process_rx_info_remote(pdev, log_data)) {
  188. printk("Unable to process RX info\n");
  189. return;
  190. }
  191. break;
  192. }
  193. case WDI_EVENT_RATE_FIND:
  194. {
  195. /*
  196. * process RATE_FIND message
  197. */
  198. if (process_rate_find(pdev, log_data)) {
  199. printk("Unable to process RC_FIND info\n");
  200. return;
  201. }
  202. break;
  203. }
  204. case WDI_EVENT_RATE_UPDATE:
  205. {
  206. /*
  207. * process RATE_UPDATE message
  208. */
  209. if (process_rate_update(pdev, log_data)) {
  210. printk("Unable to process RC_UPDATE\n");
  211. return;
  212. }
  213. break;
  214. }
  215. case WDI_EVENT_SW_EVENT:
  216. {
  217. /*
  218. * process SW EVENT message
  219. */
  220. if (process_sw_event(pdev, log_data)) {
  221. printk("Unable to process SW_EVENT\n");
  222. return;
  223. }
  224. break;
  225. }
  226. default:
  227. break;
  228. }
  229. }
  230. A_STATUS
  231. wdi_pktlog_unsubscribe(struct ol_txrx_pdev_t *txrx_pdev, uint32_t log_state)
  232. {
  233. if (log_state & ATH_PKTLOG_TX) {
  234. if (wdi_event_unsub(txrx_pdev,
  235. &PKTLOG_TX_SUBSCRIBER,
  236. WDI_EVENT_TX_STATUS)) {
  237. return A_ERROR;
  238. }
  239. }
  240. if (log_state & ATH_PKTLOG_RX) {
  241. if (wdi_event_unsub(txrx_pdev,
  242. &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
  243. return A_ERROR;
  244. }
  245. if (wdi_event_unsub(txrx_pdev,
  246. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  247. WDI_EVENT_RX_DESC_REMOTE)) {
  248. return A_ERROR;
  249. }
  250. }
  251. if (log_state & ATH_PKTLOG_RCFIND) {
  252. if (wdi_event_unsub(txrx_pdev,
  253. &PKTLOG_RCFIND_SUBSCRIBER,
  254. WDI_EVENT_RATE_FIND)) {
  255. return A_ERROR;
  256. }
  257. }
  258. if (log_state & ATH_PKTLOG_RCUPDATE) {
  259. if (wdi_event_unsub(txrx_pdev,
  260. &PKTLOG_RCUPDATE_SUBSCRIBER,
  261. WDI_EVENT_RATE_UPDATE)) {
  262. return A_ERROR;
  263. }
  264. }
  265. if (log_state & ATH_PKTLOG_RCUPDATE) {
  266. if (wdi_event_unsub(txrx_pdev,
  267. &PKTLOG_SW_EVENT_SUBSCRIBER,
  268. WDI_EVENT_SW_EVENT)) {
  269. return A_ERROR;
  270. }
  271. }
  272. return A_OK;
  273. }
  274. int pktlog_disable(struct hif_opaque_softc *scn)
  275. {
  276. struct ol_txrx_pdev_t *txrx_pdev =
  277. cds_get_context(QDF_MODULE_ID_TXRX);
  278. struct ol_pktlog_dev_t *pl_dev;
  279. struct ath_pktlog_info *pl_info;
  280. uint8_t save_pktlog_state;
  281. if (txrx_pdev == NULL ||
  282. txrx_pdev->pl_dev == NULL ||
  283. txrx_pdev->pl_dev->pl_info == NULL)
  284. return -EFAULT;
  285. pl_dev = txrx_pdev->pl_dev;
  286. pl_info = pl_dev->pl_info;
  287. if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
  288. pl_info->curr_pkt_state ==
  289. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
  290. pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  291. pl_info->curr_pkt_state ==
  292. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  293. return -EBUSY;
  294. save_pktlog_state = pl_info->curr_pkt_state;
  295. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  296. if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
  297. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  298. printk("Failed to disable pktlog in target\n");
  299. return -EINVAL;
  300. }
  301. if (pl_dev->is_pktlog_cb_subscribed &&
  302. wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
  303. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  304. printk("Cannot unsubscribe pktlog from the WDI\n");
  305. return -EINVAL;
  306. }
  307. pl_dev->is_pktlog_cb_subscribed = false;
  308. pl_dev->is_pktlog_cb_subscribed = false;
  309. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
  310. pl_info->curr_pkt_state =
  311. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  312. else
  313. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  314. return 0;
  315. }
  316. void pktlog_init(struct hif_opaque_softc *scn)
  317. {
  318. struct ath_pktlog_info *pl_info;
  319. ol_txrx_pdev_handle pdev_txrx_handle;
  320. pdev_txrx_handle = cds_get_context(QDF_MODULE_ID_TXRX);
  321. if (pdev_txrx_handle == NULL ||
  322. pdev_txrx_handle->pl_dev == NULL ||
  323. pdev_txrx_handle->pl_dev->pl_info == NULL)
  324. return;
  325. pl_info = pdev_txrx_handle->pl_dev->pl_info;
  326. OS_MEMZERO(pl_info, sizeof(*pl_info));
  327. PKTLOG_LOCK_INIT(pl_info);
  328. mutex_init(&pl_info->pktlog_mutex);
  329. pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
  330. pl_info->buf = NULL;
  331. pl_info->log_state = 0;
  332. pl_info->init_saved_state = 0;
  333. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  334. pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
  335. pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
  336. pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
  337. pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
  338. pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
  339. pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
  340. pl_info->pktlen = 0;
  341. pl_info->start_time_thruput = 0;
  342. pl_info->start_time_per = 0;
  343. pdev_txrx_handle->pl_dev->vendor_cmd_send = false;
  344. PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
  345. PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
  346. PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
  347. PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
  348. PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
  349. PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
  350. }
  351. static int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  352. bool ini_triggered, uint8_t user_triggered,
  353. uint32_t is_iwpriv_command)
  354. {
  355. struct ol_pktlog_dev_t *pl_dev;
  356. struct ath_pktlog_info *pl_info;
  357. struct ol_txrx_pdev_t *txrx_pdev;
  358. int error;
  359. if (!scn) {
  360. printk("%s: Invalid scn context\n", __func__);
  361. ASSERT(0);
  362. return -EINVAL;
  363. }
  364. txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  365. if (!txrx_pdev) {
  366. printk("%s: Invalid txrx_pdev context\n", __func__);
  367. ASSERT(0);
  368. return -EINVAL;
  369. }
  370. pl_dev = txrx_pdev->pl_dev;
  371. if (!pl_dev) {
  372. printk("%s: Invalid pktlog context\n", __func__);
  373. ASSERT(0);
  374. return -EINVAL;
  375. }
  376. pl_info = pl_dev->pl_info;
  377. if (!pl_info)
  378. return 0;
  379. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  380. return -EBUSY;
  381. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  382. /* is_iwpriv_command : 0 indicates its a vendor command
  383. * log_state: 0 indicates pktlog disable command
  384. * vendor_cmd_send flag; false means no vendor pktlog enable
  385. * command was sent previously
  386. */
  387. if (is_iwpriv_command == 0 && log_state == 0 &&
  388. pl_dev->vendor_cmd_send == false) {
  389. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  390. return 0;
  391. }
  392. if (!pl_dev->tgt_pktlog_alloced) {
  393. if (pl_info->buf == NULL) {
  394. error = pktlog_alloc_buf(scn);
  395. if (error != 0) {
  396. pl_info->curr_pkt_state =
  397. PKTLOG_OPR_NOT_IN_PROGRESS;
  398. return error;
  399. }
  400. if (!pl_info->buf) {
  401. pl_info->curr_pkt_state =
  402. PKTLOG_OPR_NOT_IN_PROGRESS;
  403. printk("%s: pktlog buf alloc failed\n",
  404. __func__);
  405. ASSERT(0);
  406. return -ENOMEM;
  407. }
  408. }
  409. spin_lock_bh(&pl_info->log_lock);
  410. pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
  411. pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
  412. pl_info->buf->wr_offset = 0;
  413. pl_info->buf->rd_offset = -1;
  414. /* These below variables are used by per packet stats*/
  415. pl_info->buf->bytes_written = 0;
  416. pl_info->buf->msg_index = 1;
  417. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  418. spin_unlock_bh(&pl_info->log_lock);
  419. pl_info->start_time_thruput = os_get_timestamp();
  420. pl_info->start_time_per = pl_info->start_time_thruput;
  421. pl_dev->tgt_pktlog_alloced = true;
  422. }
  423. if (log_state != 0) {
  424. /* WDI subscribe */
  425. if ((!pl_dev->is_pktlog_cb_subscribed) &&
  426. wdi_pktlog_subscribe(txrx_pdev, log_state)) {
  427. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  428. printk("Unable to subscribe to the WDI %s\n", __func__);
  429. return -EINVAL;
  430. }
  431. pl_dev->is_pktlog_cb_subscribed = true;
  432. /* WMI command to enable pktlog on the firmware */
  433. if (pktlog_enable_tgt(scn, log_state, ini_triggered,
  434. user_triggered)) {
  435. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  436. printk("Device cannot be enabled, %s\n", __func__);
  437. return -EINVAL;
  438. }
  439. if (is_iwpriv_command == 0)
  440. pl_dev->vendor_cmd_send = true;
  441. } else {
  442. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  443. pl_dev->pl_funcs->pktlog_disable(scn);
  444. if (is_iwpriv_command == 0)
  445. pl_dev->vendor_cmd_send = false;
  446. }
  447. pl_info->log_state = log_state;
  448. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  449. return 0;
  450. }
  451. int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  452. bool ini_triggered, uint8_t user_triggered,
  453. uint32_t is_iwpriv_command)
  454. {
  455. struct ol_pktlog_dev_t *pl_dev;
  456. struct ath_pktlog_info *pl_info;
  457. struct ol_txrx_pdev_t *txrx_pdev;
  458. int error;
  459. if (!scn) {
  460. printk("%s: Invalid scn context\n", __func__);
  461. ASSERT(0);
  462. return -EINVAL;
  463. }
  464. txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  465. if (!txrx_pdev) {
  466. printk("%s: Invalid txrx_pdev context\n", __func__);
  467. ASSERT(0);
  468. return -EINVAL;
  469. }
  470. pl_dev = txrx_pdev->pl_dev;
  471. if (!pl_dev) {
  472. printk("%s: Invalid pktlog context\n", __func__);
  473. ASSERT(0);
  474. return -EINVAL;
  475. }
  476. pl_info = pl_dev->pl_info;
  477. if (!pl_info)
  478. return 0;
  479. mutex_lock(&pl_info->pktlog_mutex);
  480. error = __pktlog_enable(scn, log_state, ini_triggered,
  481. user_triggered, is_iwpriv_command);
  482. mutex_unlock(&pl_info->pktlog_mutex);
  483. return error;
  484. }
  485. static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  486. {
  487. ol_txrx_pdev_handle pdev_txrx_handle =
  488. cds_get_context(QDF_MODULE_ID_TXRX);
  489. struct ol_pktlog_dev_t *pl_dev;
  490. struct ath_pktlog_info *pl_info;
  491. if (pdev_txrx_handle == NULL ||
  492. pdev_txrx_handle->pl_dev == NULL ||
  493. pdev_txrx_handle->pl_dev->pl_info == NULL)
  494. return -EFAULT;
  495. pl_dev = pdev_txrx_handle->pl_dev;
  496. pl_info = pl_dev->pl_info;
  497. if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS)
  498. return -EBUSY;
  499. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  500. if (size < 0) {
  501. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  502. return -EINVAL;
  503. }
  504. if (size == pl_info->buf_size) {
  505. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  506. qdf_print("%s: Pktlog Buff Size is already of same size.",
  507. __func__);
  508. return 0;
  509. }
  510. if (pl_info->log_state) {
  511. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  512. qdf_print("%s: Logging should be disabled before changing"
  513. "buffer size.", __func__);
  514. return -EINVAL;
  515. }
  516. spin_lock_bh(&pl_info->log_lock);
  517. if (pl_info->buf != NULL) {
  518. if (pl_dev->is_pktlog_cb_subscribed &&
  519. wdi_pktlog_unsubscribe(pdev_txrx_handle,
  520. pl_info->log_state)) {
  521. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  522. printk("Cannot unsubscribe pktlog from the WDI\n");
  523. spin_unlock_bh(&pl_info->log_lock);
  524. return -EFAULT;
  525. }
  526. pktlog_release_buf(pdev_txrx_handle);
  527. pl_dev->is_pktlog_cb_subscribed = false;
  528. pl_dev->tgt_pktlog_alloced = false;
  529. }
  530. if (size != 0) {
  531. qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size);
  532. pl_info->buf_size = size;
  533. }
  534. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  535. spin_unlock_bh(&pl_info->log_lock);
  536. return 0;
  537. }
  538. int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  539. {
  540. int status;
  541. ol_txrx_pdev_handle pdev_txrx_handle =
  542. cds_get_context(QDF_MODULE_ID_TXRX);
  543. struct ol_pktlog_dev_t *pl_dev;
  544. struct ath_pktlog_info *pl_info;
  545. if (pdev_txrx_handle == NULL ||
  546. pdev_txrx_handle->pl_dev == NULL ||
  547. pdev_txrx_handle->pl_dev->pl_info == NULL)
  548. return -EFAULT;
  549. pl_dev = pdev_txrx_handle->pl_dev;
  550. pl_info = pl_dev->pl_info;
  551. mutex_lock(&pl_info->pktlog_mutex);
  552. status = __pktlog_setsize(scn, size);
  553. mutex_unlock(&pl_info->pktlog_mutex);
  554. return status;
  555. }
  556. int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
  557. {
  558. ol_txrx_pdev_handle pdev_txrx_handle =
  559. cds_get_context(QDF_MODULE_ID_TXRX);
  560. struct ol_pktlog_dev_t *pl_dev;
  561. struct ath_pktlog_info *pl_info;
  562. uint8_t save_pktlog_state;
  563. if (pdev_txrx_handle == NULL ||
  564. pdev_txrx_handle->pl_dev == NULL ||
  565. pdev_txrx_handle->pl_dev->pl_info == NULL)
  566. return -EFAULT;
  567. pl_dev = pdev_txrx_handle->pl_dev;
  568. pl_info = pl_dev->pl_info;
  569. if (!clear_buff)
  570. return -EINVAL;
  571. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  572. pl_info->curr_pkt_state ==
  573. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  574. return -EBUSY;
  575. save_pktlog_state = pl_info->curr_pkt_state;
  576. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  577. if (pl_info->log_state) {
  578. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  579. qdf_print("%s: Logging should be disabled before clearing "
  580. "pktlog buffer.", __func__);
  581. return -EINVAL;
  582. }
  583. if (pl_info->buf != NULL) {
  584. if (pl_info->buf_size > 0) {
  585. qdf_print("%s: pktlog buffer is cleared.", __func__);
  586. memset(pl_info->buf, 0, pl_info->buf_size);
  587. pl_dev->is_pktlog_cb_subscribed = false;
  588. pl_dev->tgt_pktlog_alloced = false;
  589. pl_info->buf->rd_offset = -1;
  590. } else {
  591. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  592. qdf_print("%s: pktlog buffer size is not proper. "
  593. "Existing Buf size %d", __func__,
  594. pl_info->buf_size);
  595. return -EFAULT;
  596. }
  597. } else {
  598. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  599. qdf_print("%s: pktlog buff is NULL", __func__);
  600. return -EFAULT;
  601. }
  602. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
  603. pl_info->curr_pkt_state =
  604. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
  605. else
  606. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  607. return 0;
  608. }
  609. /**
  610. * pktlog_process_fw_msg() - process packetlog message
  611. * @buff: buffer
  612. *
  613. * Return: None
  614. */
  615. void pktlog_process_fw_msg(uint32_t *buff)
  616. {
  617. uint32_t *pl_hdr;
  618. uint32_t log_type;
  619. struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  620. if (!txrx_pdev) {
  621. qdf_print("%s: txrx_pdev is NULL", __func__);
  622. return;
  623. }
  624. pl_hdr = buff;
  625. log_type =
  626. (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
  627. ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
  628. if ((log_type == PKTLOG_TYPE_TX_CTRL)
  629. || (log_type == PKTLOG_TYPE_TX_STAT)
  630. || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
  631. || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
  632. || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
  633. wdi_event_handler(WDI_EVENT_TX_STATUS,
  634. txrx_pdev, pl_hdr);
  635. else if (log_type == PKTLOG_TYPE_RC_FIND)
  636. wdi_event_handler(WDI_EVENT_RATE_FIND,
  637. txrx_pdev, pl_hdr);
  638. else if (log_type == PKTLOG_TYPE_RC_UPDATE)
  639. wdi_event_handler(WDI_EVENT_RATE_UPDATE,
  640. txrx_pdev, pl_hdr);
  641. else if (log_type == PKTLOG_TYPE_RX_STAT)
  642. wdi_event_handler(WDI_EVENT_RX_DESC,
  643. txrx_pdev, pl_hdr);
  644. else if (log_type == PKTLOG_TYPE_SW_EVENT)
  645. wdi_event_handler(WDI_EVENT_SW_EVENT,
  646. txrx_pdev, pl_hdr);
  647. }
  648. #if defined(QCA_WIFI_3_0_ADRASTEA)
  649. /**
  650. * pktlog_t2h_msg_handler() - Target to host message handler
  651. * @context: pdev context
  652. * @pkt: HTC packet
  653. *
  654. * Return: None
  655. */
  656. static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
  657. {
  658. struct ol_pktlog_dev_t *pdev = (struct ol_pktlog_dev_t *)context;
  659. qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
  660. uint32_t *msg_word;
  661. /* check for successful message reception */
  662. if (pkt->Status != QDF_STATUS_SUCCESS) {
  663. if (pkt->Status != QDF_STATUS_E_CANCELED)
  664. pdev->htc_err_cnt++;
  665. qdf_nbuf_free(pktlog_t2h_msg);
  666. return;
  667. }
  668. /* confirm alignment */
  669. qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
  670. msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
  671. pktlog_process_fw_msg(msg_word);
  672. qdf_nbuf_free(pktlog_t2h_msg);
  673. }
  674. /**
  675. * pktlog_tx_resume_handler() - resume callback
  676. * @context: pdev context
  677. *
  678. * Return: None
  679. */
  680. static void pktlog_tx_resume_handler(void *context)
  681. {
  682. qdf_print("%s: Not expected", __func__);
  683. qdf_assert(0);
  684. }
  685. /**
  686. * pktlog_h2t_send_complete() - send complete indication
  687. * @context: pdev context
  688. * @htc_pkt: HTC packet
  689. *
  690. * Return: None
  691. */
  692. static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  693. {
  694. qdf_print("%s: Not expected", __func__);
  695. qdf_assert(0);
  696. }
  697. /**
  698. * pktlog_h2t_full() - queue full indication
  699. * @context: pdev context
  700. * @pkt: HTC packet
  701. *
  702. * Return: HTC action
  703. */
  704. static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
  705. {
  706. return HTC_SEND_FULL_KEEP;
  707. }
  708. /**
  709. * pktlog_htc_connect_service() - create new endpoint for packetlog
  710. * @pdev - pktlog pdev
  711. *
  712. * Return: 0 for success/failure
  713. */
  714. static int pktlog_htc_connect_service(struct ol_pktlog_dev_t *pdev)
  715. {
  716. struct htc_service_connect_req connect;
  717. struct htc_service_connect_resp response;
  718. QDF_STATUS status;
  719. qdf_mem_set(&connect, sizeof(connect), 0);
  720. qdf_mem_set(&response, sizeof(response), 0);
  721. connect.pMetaData = NULL;
  722. connect.MetaDataLength = 0;
  723. connect.EpCallbacks.pContext = pdev;
  724. connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
  725. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  726. connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
  727. connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
  728. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  729. connect.EpCallbacks.EpRecvRefill = NULL;
  730. connect.EpCallbacks.RecvRefillWaterMark = 1;
  731. /* N/A, fill is done by HIF */
  732. connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
  733. /*
  734. * Specify how deep to let a queue get before htc_send_pkt will
  735. * call the EpSendFull function due to excessive send queue depth.
  736. */
  737. connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
  738. /* disable flow control for HTT data message service */
  739. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  740. /* connect to control service */
  741. connect.service_id = PACKET_LOG_SVC;
  742. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  743. if (status != QDF_STATUS_SUCCESS) {
  744. pdev->mt_pktlog_enabled = false;
  745. return -EIO; /* failure */
  746. }
  747. pdev->htc_endpoint = response.Endpoint;
  748. pdev->mt_pktlog_enabled = true;
  749. return 0; /* success */
  750. }
  751. /**
  752. * pktlog_htc_attach() - attach pktlog HTC service
  753. *
  754. * Return: 0 for success/failure
  755. */
  756. int pktlog_htc_attach(void)
  757. {
  758. struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  759. struct ol_pktlog_dev_t *pdev = NULL;
  760. void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
  761. if ((!txrx_pdev) || (!txrx_pdev->pl_dev) || (!htc_pdev))
  762. return -EINVAL;
  763. pdev = txrx_pdev->pl_dev;
  764. pdev->htc_pdev = htc_pdev;
  765. return pktlog_htc_connect_service(pdev);
  766. }
  767. #else
  768. int pktlog_htc_attach(void)
  769. {
  770. struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  771. struct ol_pktlog_dev_t *pdev = NULL;
  772. if (!txrx_pdev)
  773. return -EINVAL;
  774. pdev = txrx_pdev->pl_dev;
  775. pdev->mt_pktlog_enabled = false;
  776. return 0;
  777. }
  778. #endif
  779. #endif /* REMOVE_PKT_LOG */