linux_ac.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056
  1. /*
  2. * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #ifndef REMOVE_PKT_LOG
  27. #ifndef EXPORT_SYMTAB
  28. #define EXPORT_SYMTAB
  29. #endif
  30. #ifndef __KERNEL__
  31. #define __KERNEL__
  32. #endif
  33. /*
  34. * Linux specific implementation of Pktlogs for 802.11ac
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/init.h>
  38. #include <linux/module.h>
  39. #include <linux/vmalloc.h>
  40. #include <linux/proc_fs.h>
  41. #include <pktlog_ac_i.h>
  42. #include <pktlog_ac_fmt.h>
  43. #include "i_host_diag_core_log.h"
  44. #include "host_diag_core_log.h"
  45. #include "ani_global.h"
  46. #define PKTLOG_TAG "ATH_PKTLOG"
  47. #define PKTLOG_DEVNAME_SIZE 32
  48. #define MAX_WLANDEV 1
  49. #ifdef MULTI_IF_NAME
  50. #define PKTLOG_PROC_DIR "ath_pktlog" MULTI_IF_NAME
  51. #else
  52. #define PKTLOG_PROC_DIR "ath_pktlog"
  53. #endif
  54. /* Permissions for creating proc entries */
  55. #define PKTLOG_PROC_PERM 0444
  56. #define PKTLOG_PROCSYS_DIR_PERM 0555
  57. #define PKTLOG_PROCSYS_PERM 0644
  58. #ifndef __MOD_INC_USE_COUNT
  59. #define PKTLOG_MOD_INC_USE_COUNT do { \
  60. if (!try_module_get(THIS_MODULE)) { \
  61. printk(KERN_WARNING "try_module_get failed\n"); \
  62. } } while (0)
  63. #define PKTLOG_MOD_DEC_USE_COUNT module_put(THIS_MODULE)
  64. #else
  65. #define PKTLOG_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
  66. #define PKTLOG_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
  67. #endif
  68. static struct ath_pktlog_info *g_pktlog_info;
  69. static struct proc_dir_entry *g_pktlog_pde;
  70. static DEFINE_MUTEX(proc_mutex);
  71. static int pktlog_attach(struct hif_opaque_softc *scn);
  72. static void pktlog_detach(struct hif_opaque_softc *scn);
  73. static int pktlog_open(struct inode *i, struct file *f);
  74. static int pktlog_release(struct inode *i, struct file *f);
  75. static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes,
  76. loff_t *ppos);
  77. static struct file_operations pktlog_fops = {
  78. open: pktlog_open,
  79. release:pktlog_release,
  80. read : pktlog_read,
  81. };
  82. void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn)
  83. {
  84. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  85. if (pl_dev)
  86. pl_dev->pl_info->log_state = 0;
  87. }
  88. int pktlog_alloc_buf(struct hif_opaque_softc *scn)
  89. {
  90. uint32_t page_cnt;
  91. unsigned long vaddr;
  92. struct page *vpg;
  93. struct pktlog_dev_t *pl_dev;
  94. struct ath_pktlog_info *pl_info;
  95. struct ath_pktlog_buf *buffer;
  96. pl_dev = get_pktlog_handle();
  97. if (!pl_dev) {
  98. printk(PKTLOG_TAG
  99. "%s: Unable to allocate buffer pdev_txrx_handle or pdev_txrx_handle->pl_dev is null\n",
  100. __func__);
  101. return -EINVAL;
  102. }
  103. pl_info = pl_dev->pl_info;
  104. page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE;
  105. spin_lock_bh(&pl_info->log_lock);
  106. if (pl_info->buf != NULL) {
  107. printk(PKTLOG_TAG "Buffer is already in use\n");
  108. spin_unlock_bh(&pl_info->log_lock);
  109. return -EINVAL;
  110. }
  111. spin_unlock_bh(&pl_info->log_lock);
  112. buffer = vmalloc((page_cnt + 2) * PAGE_SIZE);
  113. if (buffer == NULL) {
  114. printk(PKTLOG_TAG
  115. "%s: Unable to allocate buffer "
  116. "(%d pages)\n", __func__, page_cnt);
  117. return -ENOMEM;
  118. }
  119. buffer = (struct ath_pktlog_buf *)
  120. (((unsigned long)(buffer) + PAGE_SIZE - 1)
  121. & PAGE_MASK);
  122. for (vaddr = (unsigned long)(buffer);
  123. vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE));
  124. vaddr += PAGE_SIZE) {
  125. vpg = vmalloc_to_page((const void *)vaddr);
  126. SetPageReserved(vpg);
  127. }
  128. spin_lock_bh(&pl_info->log_lock);
  129. if (pl_info->buf != NULL)
  130. pktlog_release_buf(scn);
  131. pl_info->buf = buffer;
  132. spin_unlock_bh(&pl_info->log_lock);
  133. return 0;
  134. }
  135. void pktlog_release_buf(struct hif_opaque_softc *scn)
  136. {
  137. unsigned long page_cnt;
  138. unsigned long vaddr;
  139. struct page *vpg;
  140. struct pktlog_dev_t *pl_dev;
  141. struct ath_pktlog_info *pl_info;
  142. pl_dev = get_pktlog_handle();
  143. if (!pl_dev) {
  144. qdf_print("%s: invalid pl_dev handle", __func__);
  145. return;
  146. }
  147. if (!pl_dev->pl_info) {
  148. qdf_print("%s: invalid pl_dev handle", __func__);
  149. return;
  150. }
  151. pl_info = pl_dev->pl_info;
  152. page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) /
  153. PAGE_SIZE) + 1;
  154. for (vaddr = (unsigned long)(pl_info->buf);
  155. vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE);
  156. vaddr += PAGE_SIZE) {
  157. vpg = vmalloc_to_page((const void *)vaddr);
  158. ClearPageReserved(vpg);
  159. }
  160. vfree(pl_info->buf);
  161. pl_info->buf = NULL;
  162. }
  163. static void pktlog_cleanup(struct ath_pktlog_info *pl_info)
  164. {
  165. pl_info->log_state = 0;
  166. PKTLOG_LOCK_DESTROY(pl_info);
  167. mutex_destroy(&pl_info->pktlog_mutex);
  168. }
  169. /* sysctl procfs handler to enable pktlog */
  170. static int
  171. qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
  172. {
  173. int ret, enable;
  174. ol_ath_generic_softc_handle scn;
  175. struct pktlog_dev_t *pl_dev;
  176. mutex_lock(&proc_mutex);
  177. scn = (ol_ath_generic_softc_handle) ctl->extra1;
  178. if (!scn) {
  179. mutex_unlock(&proc_mutex);
  180. printk("%s: Invalid scn context\n", __func__);
  181. ASSERT(0);
  182. return -EINVAL;
  183. }
  184. pl_dev = get_pktlog_handle();
  185. if (!pl_dev) {
  186. mutex_unlock(&proc_mutex);
  187. printk("%s: Invalid pktlog context\n", __func__);
  188. ASSERT(0);
  189. return -ENODEV;
  190. }
  191. ctl->data = &enable;
  192. ctl->maxlen = sizeof(enable);
  193. if (write) {
  194. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  195. lenp, ppos);
  196. if (ret == 0) {
  197. ret = pl_dev->pl_funcs->pktlog_enable(
  198. (struct hif_opaque_softc *)scn, enable,
  199. cds_is_packet_log_enabled(), 0, 1);
  200. }
  201. else
  202. QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
  203. "Line:%d %s:proc_dointvec failed reason %d",
  204. __LINE__, __func__, ret);
  205. } else {
  206. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  207. lenp, ppos);
  208. if (ret)
  209. QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
  210. "Line:%d %s:proc_dointvec failed reason %d",
  211. __LINE__, __func__, ret);
  212. }
  213. ctl->data = NULL;
  214. ctl->maxlen = 0;
  215. mutex_unlock(&proc_mutex);
  216. return ret;
  217. }
  218. static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev)
  219. {
  220. return pl_dev->pl_info->buf_size;
  221. }
  222. /* sysctl procfs handler to set/get pktlog size */
  223. static int
  224. qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
  225. {
  226. int ret, size;
  227. ol_ath_generic_softc_handle scn;
  228. struct pktlog_dev_t *pl_dev;
  229. mutex_lock(&proc_mutex);
  230. scn = (ol_ath_generic_softc_handle) ctl->extra1;
  231. if (!scn) {
  232. mutex_unlock(&proc_mutex);
  233. printk("%s: Invalid scn context\n", __func__);
  234. ASSERT(0);
  235. return -EINVAL;
  236. }
  237. pl_dev = get_pktlog_handle();
  238. if (!pl_dev) {
  239. mutex_unlock(&proc_mutex);
  240. printk("%s: Invalid pktlog handle\n", __func__);
  241. ASSERT(0);
  242. return -ENODEV;
  243. }
  244. ctl->data = &size;
  245. ctl->maxlen = sizeof(size);
  246. if (write) {
  247. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  248. lenp, ppos);
  249. if (ret == 0)
  250. ret = pl_dev->pl_funcs->pktlog_setsize(
  251. (struct hif_opaque_softc *)scn, size);
  252. } else {
  253. size = get_pktlog_bufsize(pl_dev);
  254. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  255. lenp, ppos);
  256. }
  257. ctl->data = NULL;
  258. ctl->maxlen = 0;
  259. mutex_unlock(&proc_mutex);
  260. return ret;
  261. }
  262. /* Register sysctl table */
  263. static int pktlog_sysctl_register(struct hif_opaque_softc *scn)
  264. {
  265. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  266. struct ath_pktlog_info_lnx *pl_info_lnx;
  267. char *proc_name;
  268. if (pl_dev) {
  269. pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info);
  270. proc_name = pl_dev->name;
  271. } else {
  272. pl_info_lnx = PL_INFO_LNX(g_pktlog_info);
  273. proc_name = PKTLOG_PROC_SYSTEM;
  274. }
  275. /*
  276. * Setup the sysctl table for creating the following sysctl entries:
  277. * /proc/sys/PKTLOG_PROC_DIR/<adapter>/enable for enabling/disabling
  278. * pktlog
  279. * /proc/sys/PKTLOG_PROC_DIR/<adapter>/size for changing the buffer size
  280. */
  281. memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls));
  282. pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR;
  283. pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM;
  284. pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2];
  285. /* [1] is NULL terminator */
  286. pl_info_lnx->sysctls[2].procname = proc_name;
  287. pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM;
  288. pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4];
  289. /* [3] is NULL terminator */
  290. pl_info_lnx->sysctls[4].procname = "enable";
  291. pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM;
  292. pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable;
  293. pl_info_lnx->sysctls[4].extra1 = scn;
  294. pl_info_lnx->sysctls[5].procname = "size";
  295. pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM;
  296. pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size;
  297. pl_info_lnx->sysctls[5].extra1 = scn;
  298. pl_info_lnx->sysctls[6].procname = "options";
  299. pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM;
  300. pl_info_lnx->sysctls[6].proc_handler = proc_dointvec;
  301. pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options;
  302. pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options);
  303. pl_info_lnx->sysctls[7].procname = "sack_thr";
  304. pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM;
  305. pl_info_lnx->sysctls[7].proc_handler = proc_dointvec;
  306. pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr;
  307. pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr);
  308. pl_info_lnx->sysctls[8].procname = "tail_length";
  309. pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM;
  310. pl_info_lnx->sysctls[8].proc_handler = proc_dointvec;
  311. pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length;
  312. pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length);
  313. pl_info_lnx->sysctls[9].procname = "thruput_thresh";
  314. pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM;
  315. pl_info_lnx->sysctls[9].proc_handler = proc_dointvec;
  316. pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh;
  317. pl_info_lnx->sysctls[9].maxlen =
  318. sizeof(pl_info_lnx->info.thruput_thresh);
  319. pl_info_lnx->sysctls[10].procname = "phyerr_thresh";
  320. pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM;
  321. pl_info_lnx->sysctls[10].proc_handler = proc_dointvec;
  322. pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh;
  323. pl_info_lnx->sysctls[10].maxlen =
  324. sizeof(pl_info_lnx->info.phyerr_thresh);
  325. pl_info_lnx->sysctls[11].procname = "per_thresh";
  326. pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM;
  327. pl_info_lnx->sysctls[11].proc_handler = proc_dointvec;
  328. pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh;
  329. pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh);
  330. pl_info_lnx->sysctls[12].procname = "trigger_interval";
  331. pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM;
  332. pl_info_lnx->sysctls[12].proc_handler = proc_dointvec;
  333. pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval;
  334. pl_info_lnx->sysctls[12].maxlen =
  335. sizeof(pl_info_lnx->info.trigger_interval);
  336. /* [13] is NULL terminator */
  337. /* and register everything */
  338. /* register_sysctl_table changed from 2.6.21 onwards */
  339. pl_info_lnx->sysctl_header =
  340. register_sysctl_table(pl_info_lnx->sysctls);
  341. if (!pl_info_lnx->sysctl_header) {
  342. printk("%s: failed to register sysctls!\n", proc_name);
  343. return -EINVAL;
  344. }
  345. return 0;
  346. }
  347. /*
  348. * Initialize logging for system or adapter
  349. * Parameter scn should be NULL for system wide logging
  350. */
  351. static int pktlog_attach(struct hif_opaque_softc *scn)
  352. {
  353. struct pktlog_dev_t *pl_dev;
  354. struct ath_pktlog_info_lnx *pl_info_lnx;
  355. char *proc_name;
  356. struct proc_dir_entry *proc_entry;
  357. /* Allocate pktlog dev for later use */
  358. pl_dev = get_pktlog_handle();
  359. if (pl_dev != NULL) {
  360. pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL);
  361. if (pl_info_lnx == NULL) {
  362. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
  363. "%s: Allocation failed for pl_info\n",
  364. __func__);
  365. goto attach_fail1;
  366. }
  367. pl_dev->pl_info = &pl_info_lnx->info;
  368. pl_dev->name = WLANDEV_BASENAME;
  369. proc_name = pl_dev->name;
  370. if (!pl_dev->pl_funcs)
  371. pl_dev->pl_funcs = &ol_pl_funcs;
  372. /*
  373. * Valid for both direct attach and offload architecture
  374. */
  375. pl_dev->pl_funcs->pktlog_init(scn);
  376. } else {
  377. return -EINVAL;
  378. }
  379. /*
  380. * initialize log info
  381. * might be good to move to pktlog_init
  382. */
  383. /* pl_dev->tgt_pktlog_alloced = false; */
  384. pl_info_lnx->proc_entry = NULL;
  385. pl_info_lnx->sysctl_header = NULL;
  386. proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM,
  387. g_pktlog_pde, &pktlog_fops,
  388. &pl_info_lnx->info);
  389. if (proc_entry == NULL) {
  390. printk(PKTLOG_TAG "%s: create_proc_entry failed for %s\n",
  391. __func__, proc_name);
  392. goto attach_fail1;
  393. }
  394. pl_info_lnx->proc_entry = proc_entry;
  395. if (pktlog_sysctl_register(scn)) {
  396. printk(PKTLOG_TAG "%s: sysctl register failed for %s\n",
  397. __func__, proc_name);
  398. goto attach_fail2;
  399. }
  400. return 0;
  401. attach_fail2:
  402. remove_proc_entry(proc_name, g_pktlog_pde);
  403. attach_fail1:
  404. if (pl_dev)
  405. kfree(pl_dev->pl_info);
  406. return -EINVAL;
  407. }
  408. static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev)
  409. {
  410. struct ath_pktlog_info_lnx *pl_info_lnx;
  411. if (!pl_dev) {
  412. printk("%s: Invalid pktlog context\n", __func__);
  413. ASSERT(0);
  414. return;
  415. }
  416. pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) :
  417. PL_INFO_LNX(g_pktlog_info);
  418. if (pl_info_lnx->sysctl_header) {
  419. unregister_sysctl_table(pl_info_lnx->sysctl_header);
  420. pl_info_lnx->sysctl_header = NULL;
  421. }
  422. }
  423. static void pktlog_detach(struct hif_opaque_softc *scn)
  424. {
  425. struct ath_pktlog_info *pl_info;
  426. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  427. if (!pl_dev) {
  428. printk("%s: Invalid pktlog context\n", __func__);
  429. ASSERT(0);
  430. return;
  431. }
  432. pl_info = pl_dev->pl_info;
  433. remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde);
  434. pktlog_sysctl_unregister(pl_dev);
  435. spin_lock_bh(&pl_info->log_lock);
  436. if (pl_info->buf) {
  437. pktlog_release_buf(scn);
  438. pl_dev->tgt_pktlog_alloced = false;
  439. }
  440. spin_unlock_bh(&pl_info->log_lock);
  441. pktlog_cleanup(pl_info);
  442. if (pl_dev) {
  443. kfree(pl_info);
  444. pl_dev->pl_info = NULL;
  445. }
  446. }
  447. static int __pktlog_open(struct inode *i, struct file *f)
  448. {
  449. struct hif_opaque_softc *scn;
  450. struct pktlog_dev_t *pl_dev;
  451. struct ath_pktlog_info *pl_info;
  452. int ret = 0;
  453. PKTLOG_MOD_INC_USE_COUNT;
  454. pl_info = (struct ath_pktlog_info *)
  455. PDE_DATA(f->f_path.dentry->d_inode);
  456. if (!pl_info) {
  457. pr_err("%s: pl_info NULL", __func__);
  458. return -EINVAL;
  459. }
  460. if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) {
  461. pr_info("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS",
  462. __func__, pl_info->curr_pkt_state);
  463. return -EBUSY;
  464. }
  465. if (cds_is_module_state_transitioning()) {
  466. pr_info("%s: module transition in progress", __func__);
  467. return -EAGAIN;
  468. }
  469. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START;
  470. scn = cds_get_context(QDF_MODULE_ID_HIF);
  471. if (!scn) {
  472. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  473. qdf_print("%s: Invalid scn context\n", __func__);
  474. ASSERT(0);
  475. return -EINVAL;
  476. }
  477. pl_dev = get_pktlog_handle();
  478. if (!pl_dev) {
  479. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  480. qdf_print("%s: Invalid pktlog handle\n", __func__);
  481. ASSERT(0);
  482. return -ENODEV;
  483. }
  484. pl_info->init_saved_state = pl_info->log_state;
  485. if (!pl_info->log_state) {
  486. /* Pktlog is already disabled.
  487. * Proceed to read directly.
  488. */
  489. pl_info->curr_pkt_state =
  490. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  491. return ret;
  492. }
  493. /* Disbable the pktlog internally. */
  494. ret = pl_dev->pl_funcs->pktlog_disable(scn);
  495. pl_info->log_state = 0;
  496. pl_info->curr_pkt_state =
  497. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  498. return ret;
  499. }
  500. static int pktlog_open(struct inode *i, struct file *f)
  501. {
  502. int ret;
  503. cds_ssr_protect(__func__);
  504. ret = __pktlog_open(i, f);
  505. cds_ssr_unprotect(__func__);
  506. return ret;
  507. }
  508. static int __pktlog_release(struct inode *i, struct file *f)
  509. {
  510. struct hif_opaque_softc *scn;
  511. struct pktlog_dev_t *pl_dev;
  512. struct ath_pktlog_info *pl_info;
  513. int ret = 0;
  514. PKTLOG_MOD_DEC_USE_COUNT;
  515. pl_info = (struct ath_pktlog_info *)
  516. PDE_DATA(f->f_path.dentry->d_inode);
  517. if (!pl_info)
  518. return -EINVAL;
  519. if (cds_is_module_state_transitioning()) {
  520. pr_info("%s: module transition in progress", __func__);
  521. return -EAGAIN;
  522. }
  523. scn = cds_get_context(QDF_MODULE_ID_HIF);
  524. if (!scn) {
  525. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  526. qdf_print("%s: Invalid scn context\n", __func__);
  527. ASSERT(0);
  528. return -EINVAL;
  529. }
  530. pl_dev = get_pktlog_handle();
  531. if (!pl_dev) {
  532. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  533. qdf_print("%s: Invalid pktlog handle\n", __func__);
  534. ASSERT(0);
  535. return -ENODEV;
  536. }
  537. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE;
  538. /*clear pktlog buffer.*/
  539. pktlog_clearbuff(scn, true);
  540. pl_info->log_state = pl_info->init_saved_state;
  541. pl_info->init_saved_state = 0;
  542. /*Enable pktlog again*/
  543. ret = pl_dev->pl_funcs->pktlog_enable(
  544. (struct hif_opaque_softc *)scn, pl_info->log_state,
  545. cds_is_packet_log_enabled(), 0, 1);
  546. if (ret != 0)
  547. pr_warn("%s: pktlog cannot be enabled. ret value %d\n",
  548. __func__, ret);
  549. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  550. return ret;
  551. }
  552. static int pktlog_release(struct inode *i, struct file *f)
  553. {
  554. int ret;
  555. cds_ssr_protect(__func__);
  556. ret = __pktlog_release(i, f);
  557. cds_ssr_unprotect(__func__);
  558. return ret;
  559. }
  560. #ifndef MIN
  561. #define MIN(a, b) (((a) < (b)) ? (a) : (b))
  562. #endif
  563. /**
  564. * pktlog_read_proc_entry() - This function is used to read data from the
  565. * proc entry into the readers buffer
  566. * @buf: Readers buffer
  567. * @nbytes: Number of bytes to read
  568. * @ppos: Offset within the drivers buffer
  569. * @pl_info: Packet log information pointer
  570. * @read_complete: Boolean value indication whether read is complete
  571. *
  572. * This function is used to read data from the proc entry into the readers
  573. * buffer. Its functionality is similar to 'pktlog_read' which does
  574. * copy to user to the user space buffer
  575. *
  576. * Return: Number of bytes read from the buffer
  577. *
  578. */
  579. ssize_t
  580. pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos,
  581. struct ath_pktlog_info *pl_info, bool *read_complete)
  582. {
  583. size_t bufhdr_size;
  584. size_t count = 0, ret_val = 0;
  585. int rem_len;
  586. int start_offset, end_offset;
  587. int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset;
  588. struct ath_pktlog_buf *log_buf;
  589. spin_lock_bh(&pl_info->log_lock);
  590. log_buf = pl_info->buf;
  591. *read_complete = false;
  592. if (log_buf == NULL) {
  593. *read_complete = true;
  594. spin_unlock_bh(&pl_info->log_lock);
  595. return 0;
  596. }
  597. if (*ppos == 0 && pl_info->log_state) {
  598. pl_info->saved_state = pl_info->log_state;
  599. pl_info->log_state = 0;
  600. }
  601. bufhdr_size = sizeof(log_buf->bufhdr);
  602. /* copy valid log entries from circular buffer into user space */
  603. rem_len = nbytes;
  604. count = 0;
  605. if (*ppos < bufhdr_size) {
  606. count = MIN((bufhdr_size - *ppos), rem_len);
  607. qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos,
  608. count);
  609. rem_len -= count;
  610. ret_val += count;
  611. }
  612. start_offset = log_buf->rd_offset;
  613. cur_wr_offset = log_buf->wr_offset;
  614. if ((rem_len == 0) || (start_offset < 0))
  615. goto rd_done;
  616. fold_offset = -1;
  617. cur_rd_offset = start_offset;
  618. /* Find the last offset and fold-offset if the buffer is folded */
  619. do {
  620. struct ath_pktlog_hdr *log_hdr;
  621. int log_data_offset;
  622. log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data +
  623. cur_rd_offset);
  624. log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
  625. if ((fold_offset == -1)
  626. && ((pl_info->buf_size - log_data_offset)
  627. <= log_hdr->size))
  628. fold_offset = log_data_offset - 1;
  629. PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
  630. if ((fold_offset == -1) && (cur_rd_offset == 0)
  631. && (cur_rd_offset != cur_wr_offset))
  632. fold_offset = log_data_offset + log_hdr->size - 1;
  633. end_offset = log_data_offset + log_hdr->size - 1;
  634. } while (cur_rd_offset != cur_wr_offset);
  635. ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
  636. if (fold_offset == -1) {
  637. if (ppos_data > end_offset)
  638. goto rd_done;
  639. count = MIN(rem_len, (end_offset - ppos_data + 1));
  640. qdf_mem_copy(buf + ret_val,
  641. log_buf->log_data + ppos_data,
  642. count);
  643. ret_val += count;
  644. rem_len -= count;
  645. } else {
  646. if (ppos_data <= fold_offset) {
  647. count = MIN(rem_len, (fold_offset - ppos_data + 1));
  648. qdf_mem_copy(buf + ret_val,
  649. log_buf->log_data + ppos_data,
  650. count);
  651. ret_val += count;
  652. rem_len -= count;
  653. }
  654. if (rem_len == 0)
  655. goto rd_done;
  656. ppos_data =
  657. *ppos + ret_val - (bufhdr_size +
  658. (fold_offset - start_offset + 1));
  659. if (ppos_data <= end_offset) {
  660. count = MIN(rem_len, (end_offset - ppos_data + 1));
  661. qdf_mem_copy(buf + ret_val,
  662. log_buf->log_data + ppos_data,
  663. count);
  664. ret_val += count;
  665. rem_len -= count;
  666. }
  667. }
  668. rd_done:
  669. if ((ret_val < nbytes) && pl_info->saved_state) {
  670. pl_info->log_state = pl_info->saved_state;
  671. pl_info->saved_state = 0;
  672. }
  673. *ppos += ret_val;
  674. if (ret_val == 0) {
  675. /* Write pointer might have been updated during the read.
  676. * So, if some data is written into, lets not reset the pointers
  677. * We can continue to read from the offset position
  678. */
  679. if (cur_wr_offset != log_buf->wr_offset) {
  680. *read_complete = false;
  681. } else {
  682. pl_info->buf->rd_offset = -1;
  683. pl_info->buf->wr_offset = 0;
  684. pl_info->buf->bytes_written = 0;
  685. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  686. *read_complete = true;
  687. }
  688. }
  689. spin_unlock_bh(&pl_info->log_lock);
  690. return ret_val;
  691. }
  692. static ssize_t
  693. __pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
  694. {
  695. size_t bufhdr_size;
  696. size_t count = 0, ret_val = 0;
  697. int rem_len;
  698. int start_offset, end_offset;
  699. int fold_offset, ppos_data, cur_rd_offset;
  700. struct ath_pktlog_info *pl_info;
  701. struct ath_pktlog_buf *log_buf;
  702. if (cds_is_module_state_transitioning()) {
  703. pr_info("%s: module transition in progress", __func__);
  704. return -EAGAIN;
  705. }
  706. pl_info = (struct ath_pktlog_info *)
  707. PDE_DATA(file->f_path.dentry->d_inode);
  708. if (!pl_info)
  709. return 0;
  710. spin_lock_bh(&pl_info->log_lock);
  711. log_buf = pl_info->buf;
  712. if (log_buf == NULL) {
  713. spin_unlock_bh(&pl_info->log_lock);
  714. return 0;
  715. }
  716. if (pl_info->log_state) {
  717. /* Read is not allowed when write is going on
  718. * When issuing cat command, ensure to send
  719. * pktlog disable command first.
  720. */
  721. spin_unlock_bh(&pl_info->log_lock);
  722. return -EINVAL;
  723. }
  724. if (*ppos == 0 && pl_info->log_state) {
  725. pl_info->saved_state = pl_info->log_state;
  726. pl_info->log_state = 0;
  727. }
  728. bufhdr_size = sizeof(log_buf->bufhdr);
  729. /* copy valid log entries from circular buffer into user space */
  730. rem_len = nbytes;
  731. count = 0;
  732. if (*ppos < bufhdr_size) {
  733. count = QDF_MIN((bufhdr_size - *ppos), rem_len);
  734. spin_unlock_bh(&pl_info->log_lock);
  735. if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos,
  736. count)) {
  737. return -EFAULT;
  738. }
  739. rem_len -= count;
  740. ret_val += count;
  741. spin_lock_bh(&pl_info->log_lock);
  742. }
  743. start_offset = log_buf->rd_offset;
  744. if ((rem_len == 0) || (start_offset < 0))
  745. goto rd_done;
  746. fold_offset = -1;
  747. cur_rd_offset = start_offset;
  748. /* Find the last offset and fold-offset if the buffer is folded */
  749. do {
  750. struct ath_pktlog_hdr *log_hdr;
  751. int log_data_offset;
  752. log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data +
  753. cur_rd_offset);
  754. log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
  755. if ((fold_offset == -1)
  756. && ((pl_info->buf_size - log_data_offset)
  757. <= log_hdr->size))
  758. fold_offset = log_data_offset - 1;
  759. PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
  760. if ((fold_offset == -1) && (cur_rd_offset == 0)
  761. && (cur_rd_offset != log_buf->wr_offset))
  762. fold_offset = log_data_offset + log_hdr->size - 1;
  763. end_offset = log_data_offset + log_hdr->size - 1;
  764. } while (cur_rd_offset != log_buf->wr_offset);
  765. ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
  766. if (fold_offset == -1) {
  767. if (ppos_data > end_offset)
  768. goto rd_done;
  769. count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
  770. spin_unlock_bh(&pl_info->log_lock);
  771. if (copy_to_user(buf + ret_val,
  772. log_buf->log_data + ppos_data, count)) {
  773. return -EFAULT;
  774. }
  775. ret_val += count;
  776. rem_len -= count;
  777. spin_lock_bh(&pl_info->log_lock);
  778. } else {
  779. if (ppos_data <= fold_offset) {
  780. count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1));
  781. spin_unlock_bh(&pl_info->log_lock);
  782. if (copy_to_user(buf + ret_val,
  783. log_buf->log_data + ppos_data,
  784. count)) {
  785. return -EFAULT;
  786. }
  787. ret_val += count;
  788. rem_len -= count;
  789. spin_lock_bh(&pl_info->log_lock);
  790. }
  791. if (rem_len == 0)
  792. goto rd_done;
  793. ppos_data =
  794. *ppos + ret_val - (bufhdr_size +
  795. (fold_offset - start_offset + 1));
  796. if (ppos_data <= end_offset) {
  797. count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
  798. spin_unlock_bh(&pl_info->log_lock);
  799. if (copy_to_user(buf + ret_val,
  800. log_buf->log_data + ppos_data,
  801. count)) {
  802. return -EFAULT;
  803. }
  804. ret_val += count;
  805. rem_len -= count;
  806. spin_lock_bh(&pl_info->log_lock);
  807. }
  808. }
  809. rd_done:
  810. if ((ret_val < nbytes) && pl_info->saved_state) {
  811. pl_info->log_state = pl_info->saved_state;
  812. pl_info->saved_state = 0;
  813. }
  814. *ppos += ret_val;
  815. spin_unlock_bh(&pl_info->log_lock);
  816. return ret_val;
  817. }
  818. static ssize_t
  819. pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
  820. {
  821. size_t ret;
  822. struct ath_pktlog_info *pl_info;
  823. pl_info = (struct ath_pktlog_info *)
  824. PDE_DATA(file->f_path.dentry->d_inode);
  825. if (!pl_info)
  826. return 0;
  827. cds_ssr_protect(__func__);
  828. mutex_lock(&pl_info->pktlog_mutex);
  829. ret = __pktlog_read(file, buf, nbytes, ppos);
  830. mutex_unlock(&pl_info->pktlog_mutex);
  831. cds_ssr_unprotect(__func__);
  832. return ret;
  833. }
  834. int pktlogmod_init(void *context)
  835. {
  836. int ret;
  837. /* create the proc directory entry */
  838. g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL);
  839. if (g_pktlog_pde == NULL) {
  840. printk(PKTLOG_TAG "%s: proc_mkdir failed\n", __func__);
  841. return -EPERM;
  842. }
  843. /* Attach packet log */
  844. ret = pktlog_attach((struct hif_opaque_softc *)context);
  845. /* If packet log init failed */
  846. if (ret)
  847. goto attach_fail;
  848. return ret;
  849. attach_fail:
  850. remove_proc_entry(PKTLOG_PROC_DIR, NULL);
  851. g_pktlog_pde = NULL;
  852. return ret;
  853. }
  854. void pktlogmod_exit(void *context)
  855. {
  856. if (g_pktlog_pde == NULL)
  857. return;
  858. pktlog_detach((struct hif_opaque_softc *)context);
  859. /*
  860. * pdev kill needs to be implemented
  861. */
  862. remove_proc_entry(PKTLOG_PROC_DIR, NULL);
  863. }
  864. #endif