pktlog_ac.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830
  1. /*
  2. * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. /*
  27. *
  28. * Permission to use, copy, modify, and/or distribute this software for any
  29. * purpose with or without fee is hereby granted, provided that the above
  30. * copyright notice and this permission notice appear in all copies.
  31. *
  32. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  33. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  34. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  35. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  36. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  37. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  38. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  39. */
  40. #ifndef REMOVE_PKT_LOG
  41. #include "qdf_mem.h"
  42. #include "athdefs.h"
  43. #include "pktlog_ac_i.h"
  44. #include "cds_api.h"
  45. #include "wma_types.h"
  46. #include "htc.h"
  47. wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
  48. wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
  49. wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
  50. wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
  51. wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
  52. wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
  53. struct ol_pl_arch_dep_funcs ol_pl_funcs = {
  54. .pktlog_init = pktlog_init,
  55. .pktlog_enable = pktlog_enable,
  56. .pktlog_setsize = pktlog_setsize,
  57. .pktlog_disable = pktlog_disable, /* valid for f/w disable */
  58. };
  59. struct ol_pktlog_dev_t ol_pl_dev = {
  60. .pl_funcs = &ol_pl_funcs,
  61. };
  62. void ol_pl_sethandle(ol_pktlog_dev_handle *pl_handle,
  63. struct hif_opaque_softc *scn)
  64. {
  65. ol_pl_dev.scn = (ol_ath_generic_softc_handle) scn;
  66. *pl_handle = &ol_pl_dev;
  67. }
  68. static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
  69. WMI_CMD_ID cmd_id, bool ini_triggered,
  70. uint8_t user_triggered)
  71. {
  72. struct scheduler_msg msg = { 0 };
  73. QDF_STATUS status;
  74. struct ath_pktlog_wmi_params *param;
  75. param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
  76. if (!param)
  77. return A_NO_MEMORY;
  78. param->cmd_id = cmd_id;
  79. param->pktlog_event = event_types;
  80. param->ini_triggered = ini_triggered;
  81. param->user_triggered = user_triggered;
  82. msg.type = WMA_PKTLOG_ENABLE_REQ;
  83. msg.bodyptr = param;
  84. msg.bodyval = 0;
  85. status = scheduler_post_msg(QDF_MODULE_ID_WMA, &msg);
  86. if (status != QDF_STATUS_SUCCESS) {
  87. qdf_mem_free(param);
  88. return A_ERROR;
  89. }
  90. return A_OK;
  91. }
  92. static inline A_STATUS
  93. pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
  94. bool ini_triggered, uint8_t user_triggered)
  95. {
  96. uint32_t types = 0;
  97. if (log_state & ATH_PKTLOG_TX)
  98. types |= WMI_PKTLOG_EVENT_TX;
  99. if (log_state & ATH_PKTLOG_RX)
  100. types |= WMI_PKTLOG_EVENT_RX;
  101. if (log_state & ATH_PKTLOG_RCFIND)
  102. types |= WMI_PKTLOG_EVENT_RCF;
  103. if (log_state & ATH_PKTLOG_RCUPDATE)
  104. types |= WMI_PKTLOG_EVENT_RCU;
  105. if (log_state & ATH_PKTLOG_SW_EVENT)
  106. types |= WMI_PKTLOG_EVENT_SW;
  107. return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
  108. ini_triggered, user_triggered);
  109. }
  110. static inline A_STATUS
  111. wdi_pktlog_subscribe(struct ol_txrx_pdev_t *txrx_pdev, int32_t log_state)
  112. {
  113. if (!txrx_pdev) {
  114. printk("Invalid pdev in %s\n", __func__);
  115. return A_ERROR;
  116. }
  117. if (log_state & ATH_PKTLOG_TX) {
  118. if (wdi_event_sub(txrx_pdev,
  119. &PKTLOG_TX_SUBSCRIBER, WDI_EVENT_TX_STATUS)) {
  120. return A_ERROR;
  121. }
  122. }
  123. if (log_state & ATH_PKTLOG_RX) {
  124. if (wdi_event_sub(txrx_pdev,
  125. &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
  126. return A_ERROR;
  127. }
  128. if (wdi_event_sub(txrx_pdev,
  129. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  130. WDI_EVENT_RX_DESC_REMOTE)) {
  131. return A_ERROR;
  132. }
  133. }
  134. if (log_state & ATH_PKTLOG_RCFIND) {
  135. if (wdi_event_sub(txrx_pdev,
  136. &PKTLOG_RCFIND_SUBSCRIBER,
  137. WDI_EVENT_RATE_FIND)) {
  138. return A_ERROR;
  139. }
  140. }
  141. if (log_state & ATH_PKTLOG_RCUPDATE) {
  142. if (wdi_event_sub(txrx_pdev,
  143. &PKTLOG_RCUPDATE_SUBSCRIBER,
  144. WDI_EVENT_RATE_UPDATE)) {
  145. return A_ERROR;
  146. }
  147. }
  148. if (log_state & ATH_PKTLOG_SW_EVENT) {
  149. if (wdi_event_sub(txrx_pdev,
  150. &PKTLOG_SW_EVENT_SUBSCRIBER,
  151. WDI_EVENT_SW_EVENT)) {
  152. return A_ERROR;
  153. }
  154. }
  155. return A_OK;
  156. }
  157. void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data)
  158. {
  159. switch (event) {
  160. case WDI_EVENT_TX_STATUS:
  161. {
  162. /*
  163. * process TX message
  164. */
  165. if (process_tx_info(pdev, log_data)) {
  166. printk("Unable to process TX info\n");
  167. return;
  168. }
  169. break;
  170. }
  171. case WDI_EVENT_RX_DESC:
  172. {
  173. /*
  174. * process RX message for local frames
  175. */
  176. if (process_rx_info(pdev, log_data)) {
  177. printk("Unable to process RX info\n");
  178. return;
  179. }
  180. break;
  181. }
  182. case WDI_EVENT_RX_DESC_REMOTE:
  183. {
  184. /*
  185. * process RX message for remote frames
  186. */
  187. if (process_rx_info_remote(pdev, log_data)) {
  188. printk("Unable to process RX info\n");
  189. return;
  190. }
  191. break;
  192. }
  193. case WDI_EVENT_RATE_FIND:
  194. {
  195. /*
  196. * process RATE_FIND message
  197. */
  198. if (process_rate_find(pdev, log_data)) {
  199. printk("Unable to process RC_FIND info\n");
  200. return;
  201. }
  202. break;
  203. }
  204. case WDI_EVENT_RATE_UPDATE:
  205. {
  206. /*
  207. * process RATE_UPDATE message
  208. */
  209. if (process_rate_update(pdev, log_data)) {
  210. printk("Unable to process RC_UPDATE\n");
  211. return;
  212. }
  213. break;
  214. }
  215. case WDI_EVENT_SW_EVENT:
  216. {
  217. /*
  218. * process SW EVENT message
  219. */
  220. if (process_sw_event(pdev, log_data)) {
  221. printk("Unable to process SW_EVENT\n");
  222. return;
  223. }
  224. break;
  225. }
  226. default:
  227. break;
  228. }
  229. }
  230. A_STATUS
  231. wdi_pktlog_unsubscribe(struct ol_txrx_pdev_t *txrx_pdev, uint32_t log_state)
  232. {
  233. if (log_state & ATH_PKTLOG_TX) {
  234. if (wdi_event_unsub(txrx_pdev,
  235. &PKTLOG_TX_SUBSCRIBER,
  236. WDI_EVENT_TX_STATUS)) {
  237. return A_ERROR;
  238. }
  239. }
  240. if (log_state & ATH_PKTLOG_RX) {
  241. if (wdi_event_unsub(txrx_pdev,
  242. &PKTLOG_RX_SUBSCRIBER, WDI_EVENT_RX_DESC)) {
  243. return A_ERROR;
  244. }
  245. if (wdi_event_unsub(txrx_pdev,
  246. &PKTLOG_RX_REMOTE_SUBSCRIBER,
  247. WDI_EVENT_RX_DESC_REMOTE)) {
  248. return A_ERROR;
  249. }
  250. }
  251. if (log_state & ATH_PKTLOG_RCFIND) {
  252. if (wdi_event_unsub(txrx_pdev,
  253. &PKTLOG_RCFIND_SUBSCRIBER,
  254. WDI_EVENT_RATE_FIND)) {
  255. return A_ERROR;
  256. }
  257. }
  258. if (log_state & ATH_PKTLOG_RCUPDATE) {
  259. if (wdi_event_unsub(txrx_pdev,
  260. &PKTLOG_RCUPDATE_SUBSCRIBER,
  261. WDI_EVENT_RATE_UPDATE)) {
  262. return A_ERROR;
  263. }
  264. }
  265. if (log_state & ATH_PKTLOG_RCUPDATE) {
  266. if (wdi_event_unsub(txrx_pdev,
  267. &PKTLOG_SW_EVENT_SUBSCRIBER,
  268. WDI_EVENT_SW_EVENT)) {
  269. return A_ERROR;
  270. }
  271. }
  272. return A_OK;
  273. }
  274. int pktlog_disable(struct hif_opaque_softc *scn)
  275. {
  276. struct ol_txrx_pdev_t *txrx_pdev =
  277. cds_get_context(QDF_MODULE_ID_TXRX);
  278. struct ol_pktlog_dev_t *pl_dev;
  279. struct ath_pktlog_info *pl_info;
  280. uint8_t save_pktlog_state;
  281. if (txrx_pdev == NULL ||
  282. txrx_pdev->pl_dev == NULL ||
  283. txrx_pdev->pl_dev->pl_info == NULL)
  284. return -EFAULT;
  285. pl_dev = txrx_pdev->pl_dev;
  286. pl_info = pl_dev->pl_info;
  287. if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
  288. pl_info->curr_pkt_state ==
  289. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
  290. pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  291. pl_info->curr_pkt_state ==
  292. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  293. return -EBUSY;
  294. save_pktlog_state = pl_info->curr_pkt_state;
  295. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  296. if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
  297. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  298. printk("Failed to disable pktlog in target\n");
  299. return -EINVAL;
  300. }
  301. if (pl_dev->is_pktlog_cb_subscribed &&
  302. wdi_pktlog_unsubscribe(txrx_pdev, pl_info->log_state)) {
  303. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  304. printk("Cannot unsubscribe pktlog from the WDI\n");
  305. return -EINVAL;
  306. }
  307. pl_dev->is_pktlog_cb_subscribed = false;
  308. pl_dev->is_pktlog_cb_subscribed = false;
  309. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
  310. pl_info->curr_pkt_state =
  311. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  312. else
  313. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  314. return 0;
  315. }
  316. void pktlog_init(struct hif_opaque_softc *scn)
  317. {
  318. struct ath_pktlog_info *pl_info;
  319. ol_txrx_pdev_handle pdev_txrx_handle;
  320. pdev_txrx_handle = cds_get_context(QDF_MODULE_ID_TXRX);
  321. if (pdev_txrx_handle == NULL ||
  322. pdev_txrx_handle->pl_dev == NULL ||
  323. pdev_txrx_handle->pl_dev->pl_info == NULL)
  324. return;
  325. pl_info = pdev_txrx_handle->pl_dev->pl_info;
  326. OS_MEMZERO(pl_info, sizeof(*pl_info));
  327. PKTLOG_LOCK_INIT(pl_info);
  328. pl_info->buf_size = PKTLOG_DEFAULT_BUFSIZE;
  329. pl_info->buf = NULL;
  330. pl_info->log_state = 0;
  331. pl_info->init_saved_state = 0;
  332. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  333. pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
  334. pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
  335. pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
  336. pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
  337. pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
  338. pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
  339. pl_info->pktlen = 0;
  340. pl_info->start_time_thruput = 0;
  341. pl_info->start_time_per = 0;
  342. pdev_txrx_handle->pl_dev->vendor_cmd_send = false;
  343. PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
  344. PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
  345. PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
  346. PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
  347. PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
  348. PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
  349. }
  350. int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
  351. bool ini_triggered, uint8_t user_triggered,
  352. uint32_t is_iwpriv_command)
  353. {
  354. struct ol_pktlog_dev_t *pl_dev;
  355. struct ath_pktlog_info *pl_info;
  356. struct ol_txrx_pdev_t *txrx_pdev;
  357. int error;
  358. if (!scn) {
  359. printk("%s: Invalid scn context\n", __func__);
  360. ASSERT(0);
  361. return -EINVAL;
  362. }
  363. txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  364. if (!txrx_pdev) {
  365. printk("%s: Invalid txrx_pdev context\n", __func__);
  366. ASSERT(0);
  367. return -EINVAL;
  368. }
  369. pl_dev = txrx_pdev->pl_dev;
  370. if (!pl_dev) {
  371. printk("%s: Invalid pktlog context\n", __func__);
  372. ASSERT(0);
  373. return -EINVAL;
  374. }
  375. pl_info = pl_dev->pl_info;
  376. if (!pl_info)
  377. return 0;
  378. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  379. return -EBUSY;
  380. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  381. /* is_iwpriv_command : 0 indicates its a vendor command
  382. * log_state: 0 indicates pktlog disable command
  383. * vendor_cmd_send flag; false means no vendor pktlog enable
  384. * command was sent previously
  385. */
  386. if (is_iwpriv_command == 0 && log_state == 0 &&
  387. pl_dev->vendor_cmd_send == false) {
  388. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  389. return 0;
  390. }
  391. if (!pl_dev->tgt_pktlog_alloced) {
  392. if (pl_info->buf == NULL) {
  393. error = pktlog_alloc_buf(scn);
  394. if (error != 0) {
  395. pl_info->curr_pkt_state =
  396. PKTLOG_OPR_NOT_IN_PROGRESS;
  397. return error;
  398. }
  399. if (!pl_info->buf) {
  400. pl_info->curr_pkt_state =
  401. PKTLOG_OPR_NOT_IN_PROGRESS;
  402. printk("%s: pktlog buf alloc failed\n",
  403. __func__);
  404. ASSERT(0);
  405. return -ENOMEM;
  406. }
  407. }
  408. spin_lock_bh(&pl_info->log_lock);
  409. pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
  410. pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
  411. pl_info->buf->wr_offset = 0;
  412. pl_info->buf->rd_offset = -1;
  413. /* These below variables are used by per packet stats*/
  414. pl_info->buf->bytes_written = 0;
  415. pl_info->buf->msg_index = 1;
  416. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  417. spin_unlock_bh(&pl_info->log_lock);
  418. pl_info->start_time_thruput = os_get_timestamp();
  419. pl_info->start_time_per = pl_info->start_time_thruput;
  420. pl_dev->tgt_pktlog_alloced = true;
  421. }
  422. if (log_state != 0) {
  423. /* WDI subscribe */
  424. if ((!pl_dev->is_pktlog_cb_subscribed) &&
  425. wdi_pktlog_subscribe(txrx_pdev, log_state)) {
  426. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  427. printk("Unable to subscribe to the WDI %s\n", __func__);
  428. return -EINVAL;
  429. }
  430. pl_dev->is_pktlog_cb_subscribed = true;
  431. /* WMI command to enable pktlog on the firmware */
  432. if (pktlog_enable_tgt(scn, log_state, ini_triggered,
  433. user_triggered)) {
  434. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  435. printk("Device cannot be enabled, %s\n", __func__);
  436. return -EINVAL;
  437. }
  438. if (is_iwpriv_command == 0)
  439. pl_dev->vendor_cmd_send = true;
  440. } else {
  441. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  442. pl_dev->pl_funcs->pktlog_disable(scn);
  443. if (is_iwpriv_command == 0)
  444. pl_dev->vendor_cmd_send = false;
  445. }
  446. pl_info->log_state = log_state;
  447. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  448. return 0;
  449. }
  450. int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
  451. {
  452. ol_txrx_pdev_handle pdev_txrx_handle =
  453. cds_get_context(QDF_MODULE_ID_TXRX);
  454. struct ol_pktlog_dev_t *pl_dev;
  455. struct ath_pktlog_info *pl_info;
  456. if (pdev_txrx_handle == NULL ||
  457. pdev_txrx_handle->pl_dev == NULL ||
  458. pdev_txrx_handle->pl_dev->pl_info == NULL)
  459. return -EFAULT;
  460. pl_dev = pdev_txrx_handle->pl_dev;
  461. pl_info = pl_dev->pl_info;
  462. if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS)
  463. return -EBUSY;
  464. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  465. if (size < 0) {
  466. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  467. return -EINVAL;
  468. }
  469. if (size == pl_info->buf_size) {
  470. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  471. qdf_print("%s: Pktlog Buff Size is already of same size.",
  472. __func__);
  473. return 0;
  474. }
  475. if (pl_info->log_state) {
  476. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  477. qdf_print("%s: Logging should be disabled before changing"
  478. "buffer size.", __func__);
  479. return -EINVAL;
  480. }
  481. spin_lock_bh(&pl_info->log_lock);
  482. if (pl_info->buf != NULL) {
  483. if (pl_dev->is_pktlog_cb_subscribed &&
  484. wdi_pktlog_unsubscribe(pdev_txrx_handle,
  485. pl_info->log_state)) {
  486. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  487. printk("Cannot unsubscribe pktlog from the WDI\n");
  488. spin_unlock_bh(&pl_info->log_lock);
  489. return -EFAULT;
  490. }
  491. pktlog_release_buf(pdev_txrx_handle);
  492. pl_dev->is_pktlog_cb_subscribed = false;
  493. pl_dev->tgt_pktlog_alloced = false;
  494. }
  495. if (size != 0) {
  496. qdf_print("%s: New Pktlog Buff Size is %d\n", __func__, size);
  497. pl_info->buf_size = size;
  498. }
  499. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  500. spin_unlock_bh(&pl_info->log_lock);
  501. return 0;
  502. }
  503. int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
  504. {
  505. ol_txrx_pdev_handle pdev_txrx_handle =
  506. cds_get_context(QDF_MODULE_ID_TXRX);
  507. struct ol_pktlog_dev_t *pl_dev;
  508. struct ath_pktlog_info *pl_info;
  509. uint8_t save_pktlog_state;
  510. if (pdev_txrx_handle == NULL ||
  511. pdev_txrx_handle->pl_dev == NULL ||
  512. pdev_txrx_handle->pl_dev->pl_info == NULL)
  513. return -EFAULT;
  514. pl_dev = pdev_txrx_handle->pl_dev;
  515. pl_info = pl_dev->pl_info;
  516. if (!clear_buff)
  517. return -EINVAL;
  518. if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
  519. pl_info->curr_pkt_state ==
  520. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
  521. return -EBUSY;
  522. save_pktlog_state = pl_info->curr_pkt_state;
  523. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
  524. if (pl_info->log_state) {
  525. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  526. qdf_print("%s: Logging should be disabled before clearing "
  527. "pktlog buffer.", __func__);
  528. return -EINVAL;
  529. }
  530. if (pl_info->buf != NULL) {
  531. if (pl_info->buf_size > 0) {
  532. qdf_print("%s: pktlog buffer is cleared.", __func__);
  533. memset(pl_info->buf, 0, pl_info->buf_size);
  534. pl_dev->is_pktlog_cb_subscribed = false;
  535. pl_dev->tgt_pktlog_alloced = false;
  536. pl_info->buf->rd_offset = -1;
  537. } else {
  538. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  539. qdf_print("%s: pktlog buffer size is not proper. "
  540. "Existing Buf size %d", __func__,
  541. pl_info->buf_size);
  542. return -EFAULT;
  543. }
  544. } else {
  545. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  546. qdf_print("%s: pktlog buff is NULL", __func__);
  547. return -EFAULT;
  548. }
  549. if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
  550. pl_info->curr_pkt_state =
  551. PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
  552. else
  553. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  554. return 0;
  555. }
  556. /**
  557. * pktlog_process_fw_msg() - process packetlog message
  558. * @buff: buffer
  559. *
  560. * Return: None
  561. */
  562. void pktlog_process_fw_msg(uint32_t *buff)
  563. {
  564. uint32_t *pl_hdr;
  565. uint32_t log_type;
  566. struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  567. if (!txrx_pdev) {
  568. qdf_print("%s: txrx_pdev is NULL", __func__);
  569. return;
  570. }
  571. pl_hdr = buff;
  572. log_type =
  573. (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
  574. ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
  575. if ((log_type == PKTLOG_TYPE_TX_CTRL)
  576. || (log_type == PKTLOG_TYPE_TX_STAT)
  577. || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
  578. || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
  579. || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
  580. wdi_event_handler(WDI_EVENT_TX_STATUS,
  581. txrx_pdev, pl_hdr);
  582. else if (log_type == PKTLOG_TYPE_RC_FIND)
  583. wdi_event_handler(WDI_EVENT_RATE_FIND,
  584. txrx_pdev, pl_hdr);
  585. else if (log_type == PKTLOG_TYPE_RC_UPDATE)
  586. wdi_event_handler(WDI_EVENT_RATE_UPDATE,
  587. txrx_pdev, pl_hdr);
  588. else if (log_type == PKTLOG_TYPE_RX_STAT)
  589. wdi_event_handler(WDI_EVENT_RX_DESC,
  590. txrx_pdev, pl_hdr);
  591. else if (log_type == PKTLOG_TYPE_SW_EVENT)
  592. wdi_event_handler(WDI_EVENT_SW_EVENT,
  593. txrx_pdev, pl_hdr);
  594. }
  595. #if defined(QCA_WIFI_3_0_ADRASTEA)
  596. /**
  597. * pktlog_t2h_msg_handler() - Target to host message handler
  598. * @context: pdev context
  599. * @pkt: HTC packet
  600. *
  601. * Return: None
  602. */
  603. static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
  604. {
  605. struct ol_pktlog_dev_t *pdev = (struct ol_pktlog_dev_t *)context;
  606. qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
  607. uint32_t *msg_word;
  608. /* check for successful message reception */
  609. if (pkt->Status != A_OK) {
  610. if (pkt->Status != A_ECANCELED)
  611. pdev->htc_err_cnt++;
  612. qdf_nbuf_free(pktlog_t2h_msg);
  613. return;
  614. }
  615. /* confirm alignment */
  616. qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
  617. msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
  618. pktlog_process_fw_msg(msg_word);
  619. qdf_nbuf_free(pktlog_t2h_msg);
  620. }
  621. /**
  622. * pktlog_tx_resume_handler() - resume callback
  623. * @context: pdev context
  624. *
  625. * Return: None
  626. */
  627. static void pktlog_tx_resume_handler(void *context)
  628. {
  629. qdf_print("%s: Not expected", __func__);
  630. qdf_assert(0);
  631. }
  632. /**
  633. * pktlog_h2t_send_complete() - send complete indication
  634. * @context: pdev context
  635. * @htc_pkt: HTC packet
  636. *
  637. * Return: None
  638. */
  639. static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
  640. {
  641. qdf_print("%s: Not expected", __func__);
  642. qdf_assert(0);
  643. }
  644. /**
  645. * pktlog_h2t_full() - queue full indication
  646. * @context: pdev context
  647. * @pkt: HTC packet
  648. *
  649. * Return: HTC action
  650. */
  651. static HTC_SEND_FULL_ACTION pktlog_h2t_full(void *context, HTC_PACKET *pkt)
  652. {
  653. return HTC_SEND_FULL_KEEP;
  654. }
  655. /**
  656. * pktlog_htc_connect_service() - create new endpoint for packetlog
  657. * @pdev - pktlog pdev
  658. *
  659. * Return: 0 for success/failure
  660. */
  661. static int pktlog_htc_connect_service(struct ol_pktlog_dev_t *pdev)
  662. {
  663. HTC_SERVICE_CONNECT_REQ connect;
  664. HTC_SERVICE_CONNECT_RESP response;
  665. A_STATUS status;
  666. qdf_mem_set(&connect, sizeof(connect), 0);
  667. qdf_mem_set(&response, sizeof(response), 0);
  668. connect.pMetaData = NULL;
  669. connect.MetaDataLength = 0;
  670. connect.EpCallbacks.pContext = pdev;
  671. connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
  672. connect.EpCallbacks.EpTxCompleteMultiple = NULL;
  673. connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
  674. connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
  675. /* rx buffers currently are provided by HIF, not by EpRecvRefill */
  676. connect.EpCallbacks.EpRecvRefill = NULL;
  677. connect.EpCallbacks.RecvRefillWaterMark = 1;
  678. /* N/A, fill is done by HIF */
  679. connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
  680. /*
  681. * Specify how deep to let a queue get before htc_send_pkt will
  682. * call the EpSendFull function due to excessive send queue depth.
  683. */
  684. connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
  685. /* disable flow control for HTT data message service */
  686. connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  687. /* connect to control service */
  688. connect.service_id = PACKET_LOG_SVC;
  689. status = htc_connect_service(pdev->htc_pdev, &connect, &response);
  690. if (status != A_OK) {
  691. pdev->mt_pktlog_enabled = false;
  692. return -EIO; /* failure */
  693. }
  694. pdev->htc_endpoint = response.Endpoint;
  695. pdev->mt_pktlog_enabled = true;
  696. return 0; /* success */
  697. }
  698. /**
  699. * pktlog_htc_attach() - attach pktlog HTC service
  700. *
  701. * Return: 0 for success/failure
  702. */
  703. int pktlog_htc_attach(void)
  704. {
  705. struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  706. struct ol_pktlog_dev_t *pdev = NULL;
  707. void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
  708. if ((!txrx_pdev) || (!txrx_pdev->pl_dev) || (!htc_pdev))
  709. return -EINVAL;
  710. pdev = txrx_pdev->pl_dev;
  711. pdev->htc_pdev = htc_pdev;
  712. return pktlog_htc_connect_service(pdev);
  713. }
  714. #else
  715. int pktlog_htc_attach(void)
  716. {
  717. struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
  718. struct ol_pktlog_dev_t *pdev = NULL;
  719. if (!txrx_pdev)
  720. return -EINVAL;
  721. pdev = txrx_pdev->pl_dev;
  722. pdev->mt_pktlog_enabled = false;
  723. return 0;
  724. }
  725. #endif
  726. #endif /* REMOVE_PKT_LOG */