linux_ac.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #ifndef REMOVE_PKT_LOG
  27. #ifndef EXPORT_SYMTAB
  28. #define EXPORT_SYMTAB
  29. #endif
  30. #ifndef __KERNEL__
  31. #define __KERNEL__
  32. #endif
  33. /*
  34. * Linux specific implementation of Pktlogs for 802.11ac
  35. */
  36. #include <linux/kernel.h>
  37. #include <linux/init.h>
  38. #include <linux/module.h>
  39. #include <linux/vmalloc.h>
  40. #include <linux/proc_fs.h>
  41. #include <pktlog_ac_i.h>
  42. #include <pktlog_ac_fmt.h>
  43. #include "i_host_diag_core_log.h"
  44. #include "host_diag_core_log.h"
  45. #include "ani_global.h"
  46. #define PKTLOG_TAG "ATH_PKTLOG"
  47. #define PKTLOG_DEVNAME_SIZE 32
  48. #define MAX_WLANDEV 1
  49. #ifdef MULTI_IF_NAME
  50. #define PKTLOG_PROC_DIR "ath_pktlog" MULTI_IF_NAME
  51. #else
  52. #define PKTLOG_PROC_DIR "ath_pktlog"
  53. #endif
  54. /* Permissions for creating proc entries */
  55. #define PKTLOG_PROC_PERM 0444
  56. #define PKTLOG_PROCSYS_DIR_PERM 0555
  57. #define PKTLOG_PROCSYS_PERM 0644
  58. #ifndef __MOD_INC_USE_COUNT
  59. #define PKTLOG_MOD_INC_USE_COUNT do { \
  60. if (!try_module_get(THIS_MODULE)) { \
  61. printk(KERN_WARNING "try_module_get failed\n"); \
  62. } } while (0)
  63. #define PKTLOG_MOD_DEC_USE_COUNT module_put(THIS_MODULE)
  64. #else
  65. #define PKTLOG_MOD_INC_USE_COUNT MOD_INC_USE_COUNT
  66. #define PKTLOG_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT
  67. #endif
  68. static struct ath_pktlog_info *g_pktlog_info;
  69. static struct proc_dir_entry *g_pktlog_pde;
  70. static DEFINE_MUTEX(proc_mutex);
  71. static int pktlog_attach(struct hif_opaque_softc *scn);
  72. static void pktlog_detach(struct hif_opaque_softc *scn);
  73. static int pktlog_open(struct inode *i, struct file *f);
  74. static int pktlog_release(struct inode *i, struct file *f);
  75. static ssize_t pktlog_read(struct file *file, char *buf, size_t nbytes,
  76. loff_t *ppos);
  77. static struct file_operations pktlog_fops = {
  78. open: pktlog_open,
  79. release:pktlog_release,
  80. read : pktlog_read,
  81. };
  82. void pktlog_disable_adapter_logging(struct hif_opaque_softc *scn)
  83. {
  84. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  85. if (pl_dev)
  86. pl_dev->pl_info->log_state = 0;
  87. }
  88. int pktlog_alloc_buf(struct hif_opaque_softc *scn)
  89. {
  90. uint32_t page_cnt;
  91. unsigned long vaddr;
  92. struct page *vpg;
  93. struct pktlog_dev_t *pl_dev;
  94. struct ath_pktlog_info *pl_info;
  95. struct ath_pktlog_buf *buffer;
  96. pl_dev = get_pktlog_handle();
  97. if (!pl_dev) {
  98. printk(PKTLOG_TAG
  99. "%s: Unable to allocate buffer pdev_txrx_handle or pdev_txrx_handle->pl_dev is null\n",
  100. __func__);
  101. return -EINVAL;
  102. }
  103. pl_info = pl_dev->pl_info;
  104. page_cnt = (sizeof(*(pl_info->buf)) + pl_info->buf_size) / PAGE_SIZE;
  105. spin_lock_bh(&pl_info->log_lock);
  106. if (pl_info->buf != NULL) {
  107. printk(PKTLOG_TAG "Buffer is already in use\n");
  108. spin_unlock_bh(&pl_info->log_lock);
  109. return -EINVAL;
  110. }
  111. spin_unlock_bh(&pl_info->log_lock);
  112. buffer = vmalloc((page_cnt + 2) * PAGE_SIZE);
  113. if (buffer == NULL) {
  114. printk(PKTLOG_TAG
  115. "%s: Unable to allocate buffer "
  116. "(%d pages)\n", __func__, page_cnt);
  117. return -ENOMEM;
  118. }
  119. buffer = (struct ath_pktlog_buf *)
  120. (((unsigned long)(buffer) + PAGE_SIZE - 1)
  121. & PAGE_MASK);
  122. for (vaddr = (unsigned long)(buffer);
  123. vaddr < ((unsigned long)(buffer) + (page_cnt * PAGE_SIZE));
  124. vaddr += PAGE_SIZE) {
  125. vpg = vmalloc_to_page((const void *)vaddr);
  126. SetPageReserved(vpg);
  127. }
  128. spin_lock_bh(&pl_info->log_lock);
  129. if (pl_info->buf != NULL)
  130. pktlog_release_buf(scn);
  131. pl_info->buf = buffer;
  132. spin_unlock_bh(&pl_info->log_lock);
  133. return 0;
  134. }
  135. void pktlog_release_buf(struct hif_opaque_softc *scn)
  136. {
  137. unsigned long page_cnt;
  138. unsigned long vaddr;
  139. struct page *vpg;
  140. struct pktlog_dev_t *pl_dev;
  141. struct ath_pktlog_info *pl_info;
  142. pl_dev = get_pktlog_handle();
  143. if (!pl_dev) {
  144. qdf_print("%s: invalid pl_dev handle", __func__);
  145. return;
  146. }
  147. if (!pl_dev->pl_info) {
  148. qdf_print("%s: invalid pl_dev handle", __func__);
  149. return;
  150. }
  151. pl_info = pl_dev->pl_info;
  152. page_cnt = ((sizeof(*(pl_info->buf)) + pl_info->buf_size) /
  153. PAGE_SIZE) + 1;
  154. for (vaddr = (unsigned long)(pl_info->buf);
  155. vaddr < (unsigned long)(pl_info->buf) + (page_cnt * PAGE_SIZE);
  156. vaddr += PAGE_SIZE) {
  157. vpg = vmalloc_to_page((const void *)vaddr);
  158. ClearPageReserved(vpg);
  159. }
  160. vfree(pl_info->buf);
  161. pl_info->buf = NULL;
  162. }
  163. static void pktlog_cleanup(struct ath_pktlog_info *pl_info)
  164. {
  165. pl_info->log_state = 0;
  166. PKTLOG_LOCK_DESTROY(pl_info);
  167. mutex_destroy(&pl_info->pktlog_mutex);
  168. }
  169. /* sysctl procfs handler to enable pktlog */
  170. static int
  171. qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
  172. {
  173. int ret, enable;
  174. ol_ath_generic_softc_handle scn;
  175. struct pktlog_dev_t *pl_dev;
  176. mutex_lock(&proc_mutex);
  177. scn = (ol_ath_generic_softc_handle) ctl->extra1;
  178. if (!scn) {
  179. mutex_unlock(&proc_mutex);
  180. printk("%s: Invalid scn context\n", __func__);
  181. ASSERT(0);
  182. return -EINVAL;
  183. }
  184. pl_dev = get_pktlog_handle();
  185. if (!pl_dev) {
  186. mutex_unlock(&proc_mutex);
  187. printk("%s: Invalid pktlog context\n", __func__);
  188. ASSERT(0);
  189. return -ENODEV;
  190. }
  191. ctl->data = &enable;
  192. ctl->maxlen = sizeof(enable);
  193. if (write) {
  194. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  195. lenp, ppos);
  196. if (ret == 0)
  197. ret = pl_dev->pl_funcs->pktlog_enable(
  198. (struct hif_opaque_softc *)scn, enable,
  199. cds_is_packet_log_enabled(), 0, 1);
  200. else
  201. QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
  202. "Line:%d %s:proc_dointvec failed reason %d",
  203. __LINE__, __func__, ret);
  204. } else {
  205. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  206. lenp, ppos);
  207. if (ret)
  208. QDF_TRACE(QDF_MODULE_ID_SYS, QDF_TRACE_LEVEL_DEBUG,
  209. "Line:%d %s:proc_dointvec failed reason %d",
  210. __LINE__, __func__, ret);
  211. }
  212. ctl->data = NULL;
  213. ctl->maxlen = 0;
  214. mutex_unlock(&proc_mutex);
  215. return ret;
  216. }
  217. static int get_pktlog_bufsize(struct pktlog_dev_t *pl_dev)
  218. {
  219. return pl_dev->pl_info->buf_size;
  220. }
  221. /* sysctl procfs handler to set/get pktlog size */
  222. static int
  223. qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
  224. {
  225. int ret, size;
  226. ol_ath_generic_softc_handle scn;
  227. struct pktlog_dev_t *pl_dev;
  228. mutex_lock(&proc_mutex);
  229. scn = (ol_ath_generic_softc_handle) ctl->extra1;
  230. if (!scn) {
  231. mutex_unlock(&proc_mutex);
  232. printk("%s: Invalid scn context\n", __func__);
  233. ASSERT(0);
  234. return -EINVAL;
  235. }
  236. pl_dev = get_pktlog_handle();
  237. if (!pl_dev) {
  238. mutex_unlock(&proc_mutex);
  239. printk("%s: Invalid pktlog handle\n", __func__);
  240. ASSERT(0);
  241. return -ENODEV;
  242. }
  243. ctl->data = &size;
  244. ctl->maxlen = sizeof(size);
  245. if (write) {
  246. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  247. lenp, ppos);
  248. if (ret == 0)
  249. ret = pl_dev->pl_funcs->pktlog_setsize(
  250. (struct hif_opaque_softc *)scn, size);
  251. } else {
  252. size = get_pktlog_bufsize(pl_dev);
  253. ret = QDF_SYSCTL_PROC_DOINTVEC(ctl, write, filp, buffer,
  254. lenp, ppos);
  255. }
  256. ctl->data = NULL;
  257. ctl->maxlen = 0;
  258. mutex_unlock(&proc_mutex);
  259. return ret;
  260. }
  261. /* Register sysctl table */
  262. static int pktlog_sysctl_register(struct hif_opaque_softc *scn)
  263. {
  264. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  265. struct ath_pktlog_info_lnx *pl_info_lnx;
  266. char *proc_name;
  267. if (pl_dev) {
  268. pl_info_lnx = PL_INFO_LNX(pl_dev->pl_info);
  269. proc_name = pl_dev->name;
  270. } else {
  271. pl_info_lnx = PL_INFO_LNX(g_pktlog_info);
  272. proc_name = PKTLOG_PROC_SYSTEM;
  273. }
  274. /*
  275. * Setup the sysctl table for creating the following sysctl entries:
  276. * /proc/sys/PKTLOG_PROC_DIR/<adapter>/enable for enabling/disabling
  277. * pktlog
  278. * /proc/sys/PKTLOG_PROC_DIR/<adapter>/size for changing the buffer size
  279. */
  280. memset(pl_info_lnx->sysctls, 0, sizeof(pl_info_lnx->sysctls));
  281. pl_info_lnx->sysctls[0].procname = PKTLOG_PROC_DIR;
  282. pl_info_lnx->sysctls[0].mode = PKTLOG_PROCSYS_DIR_PERM;
  283. pl_info_lnx->sysctls[0].child = &pl_info_lnx->sysctls[2];
  284. /* [1] is NULL terminator */
  285. pl_info_lnx->sysctls[2].procname = proc_name;
  286. pl_info_lnx->sysctls[2].mode = PKTLOG_PROCSYS_DIR_PERM;
  287. pl_info_lnx->sysctls[2].child = &pl_info_lnx->sysctls[4];
  288. /* [3] is NULL terminator */
  289. pl_info_lnx->sysctls[4].procname = "enable";
  290. pl_info_lnx->sysctls[4].mode = PKTLOG_PROCSYS_PERM;
  291. pl_info_lnx->sysctls[4].proc_handler = ath_sysctl_pktlog_enable;
  292. pl_info_lnx->sysctls[4].extra1 = scn;
  293. pl_info_lnx->sysctls[5].procname = "size";
  294. pl_info_lnx->sysctls[5].mode = PKTLOG_PROCSYS_PERM;
  295. pl_info_lnx->sysctls[5].proc_handler = ath_sysctl_pktlog_size;
  296. pl_info_lnx->sysctls[5].extra1 = scn;
  297. pl_info_lnx->sysctls[6].procname = "options";
  298. pl_info_lnx->sysctls[6].mode = PKTLOG_PROCSYS_PERM;
  299. pl_info_lnx->sysctls[6].proc_handler = proc_dointvec;
  300. pl_info_lnx->sysctls[6].data = &pl_info_lnx->info.options;
  301. pl_info_lnx->sysctls[6].maxlen = sizeof(pl_info_lnx->info.options);
  302. pl_info_lnx->sysctls[7].procname = "sack_thr";
  303. pl_info_lnx->sysctls[7].mode = PKTLOG_PROCSYS_PERM;
  304. pl_info_lnx->sysctls[7].proc_handler = proc_dointvec;
  305. pl_info_lnx->sysctls[7].data = &pl_info_lnx->info.sack_thr;
  306. pl_info_lnx->sysctls[7].maxlen = sizeof(pl_info_lnx->info.sack_thr);
  307. pl_info_lnx->sysctls[8].procname = "tail_length";
  308. pl_info_lnx->sysctls[8].mode = PKTLOG_PROCSYS_PERM;
  309. pl_info_lnx->sysctls[8].proc_handler = proc_dointvec;
  310. pl_info_lnx->sysctls[8].data = &pl_info_lnx->info.tail_length;
  311. pl_info_lnx->sysctls[8].maxlen = sizeof(pl_info_lnx->info.tail_length);
  312. pl_info_lnx->sysctls[9].procname = "thruput_thresh";
  313. pl_info_lnx->sysctls[9].mode = PKTLOG_PROCSYS_PERM;
  314. pl_info_lnx->sysctls[9].proc_handler = proc_dointvec;
  315. pl_info_lnx->sysctls[9].data = &pl_info_lnx->info.thruput_thresh;
  316. pl_info_lnx->sysctls[9].maxlen =
  317. sizeof(pl_info_lnx->info.thruput_thresh);
  318. pl_info_lnx->sysctls[10].procname = "phyerr_thresh";
  319. pl_info_lnx->sysctls[10].mode = PKTLOG_PROCSYS_PERM;
  320. pl_info_lnx->sysctls[10].proc_handler = proc_dointvec;
  321. pl_info_lnx->sysctls[10].data = &pl_info_lnx->info.phyerr_thresh;
  322. pl_info_lnx->sysctls[10].maxlen =
  323. sizeof(pl_info_lnx->info.phyerr_thresh);
  324. pl_info_lnx->sysctls[11].procname = "per_thresh";
  325. pl_info_lnx->sysctls[11].mode = PKTLOG_PROCSYS_PERM;
  326. pl_info_lnx->sysctls[11].proc_handler = proc_dointvec;
  327. pl_info_lnx->sysctls[11].data = &pl_info_lnx->info.per_thresh;
  328. pl_info_lnx->sysctls[11].maxlen = sizeof(pl_info_lnx->info.per_thresh);
  329. pl_info_lnx->sysctls[12].procname = "trigger_interval";
  330. pl_info_lnx->sysctls[12].mode = PKTLOG_PROCSYS_PERM;
  331. pl_info_lnx->sysctls[12].proc_handler = proc_dointvec;
  332. pl_info_lnx->sysctls[12].data = &pl_info_lnx->info.trigger_interval;
  333. pl_info_lnx->sysctls[12].maxlen =
  334. sizeof(pl_info_lnx->info.trigger_interval);
  335. /* [13] is NULL terminator */
  336. /* and register everything */
  337. /* register_sysctl_table changed from 2.6.21 onwards */
  338. pl_info_lnx->sysctl_header =
  339. register_sysctl_table(pl_info_lnx->sysctls);
  340. if (!pl_info_lnx->sysctl_header) {
  341. printk("%s: failed to register sysctls!\n", proc_name);
  342. return -EINVAL;
  343. }
  344. return 0;
  345. }
  346. /*
  347. * Initialize logging for system or adapter
  348. * Parameter scn should be NULL for system wide logging
  349. */
  350. static int pktlog_attach(struct hif_opaque_softc *scn)
  351. {
  352. struct pktlog_dev_t *pl_dev;
  353. struct ath_pktlog_info_lnx *pl_info_lnx;
  354. char *proc_name;
  355. struct proc_dir_entry *proc_entry;
  356. /* Allocate pktlog dev for later use */
  357. pl_dev = get_pktlog_handle();
  358. if (pl_dev != NULL) {
  359. pl_info_lnx = kmalloc(sizeof(*pl_info_lnx), GFP_KERNEL);
  360. if (pl_info_lnx == NULL) {
  361. QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
  362. "%s: Allocation failed for pl_info\n",
  363. __func__);
  364. goto attach_fail1;
  365. }
  366. pl_dev->pl_info = &pl_info_lnx->info;
  367. pl_dev->name = WLANDEV_BASENAME;
  368. proc_name = pl_dev->name;
  369. if (!pl_dev->pl_funcs)
  370. pl_dev->pl_funcs = &ol_pl_funcs;
  371. /*
  372. * Valid for both direct attach and offload architecture
  373. */
  374. pl_dev->pl_funcs->pktlog_init(scn);
  375. } else {
  376. return -EINVAL;
  377. }
  378. /*
  379. * initialize log info
  380. * might be good to move to pktlog_init
  381. */
  382. /* pl_dev->tgt_pktlog_alloced = false; */
  383. pl_info_lnx->proc_entry = NULL;
  384. pl_info_lnx->sysctl_header = NULL;
  385. proc_entry = proc_create_data(proc_name, PKTLOG_PROC_PERM,
  386. g_pktlog_pde, &pktlog_fops,
  387. &pl_info_lnx->info);
  388. if (proc_entry == NULL) {
  389. printk(PKTLOG_TAG "%s: create_proc_entry failed for %s\n",
  390. __func__, proc_name);
  391. goto attach_fail1;
  392. }
  393. pl_info_lnx->proc_entry = proc_entry;
  394. if (pktlog_sysctl_register(scn)) {
  395. printk(PKTLOG_TAG "%s: sysctl register failed for %s\n",
  396. __func__, proc_name);
  397. goto attach_fail2;
  398. }
  399. return 0;
  400. attach_fail2:
  401. remove_proc_entry(proc_name, g_pktlog_pde);
  402. attach_fail1:
  403. if (pl_dev)
  404. kfree(pl_dev->pl_info);
  405. return -EINVAL;
  406. }
  407. static void pktlog_sysctl_unregister(struct pktlog_dev_t *pl_dev)
  408. {
  409. struct ath_pktlog_info_lnx *pl_info_lnx;
  410. if (!pl_dev) {
  411. printk("%s: Invalid pktlog context\n", __func__);
  412. ASSERT(0);
  413. return;
  414. }
  415. pl_info_lnx = (pl_dev) ? PL_INFO_LNX(pl_dev->pl_info) :
  416. PL_INFO_LNX(g_pktlog_info);
  417. if (pl_info_lnx->sysctl_header) {
  418. unregister_sysctl_table(pl_info_lnx->sysctl_header);
  419. pl_info_lnx->sysctl_header = NULL;
  420. }
  421. }
  422. static void pktlog_detach(struct hif_opaque_softc *scn)
  423. {
  424. struct ath_pktlog_info *pl_info;
  425. struct pktlog_dev_t *pl_dev = get_pktlog_handle();
  426. if (!pl_dev) {
  427. printk("%s: Invalid pktlog context\n", __func__);
  428. ASSERT(0);
  429. return;
  430. }
  431. pl_info = pl_dev->pl_info;
  432. remove_proc_entry(WLANDEV_BASENAME, g_pktlog_pde);
  433. pktlog_sysctl_unregister(pl_dev);
  434. spin_lock_bh(&pl_info->log_lock);
  435. if (pl_info->buf) {
  436. pktlog_release_buf(scn);
  437. pl_dev->tgt_pktlog_alloced = false;
  438. }
  439. spin_unlock_bh(&pl_info->log_lock);
  440. pktlog_cleanup(pl_info);
  441. if (pl_dev) {
  442. kfree(pl_info);
  443. pl_dev->pl_info = NULL;
  444. }
  445. }
  446. static int __pktlog_open(struct inode *i, struct file *f)
  447. {
  448. struct hif_opaque_softc *scn;
  449. struct pktlog_dev_t *pl_dev;
  450. struct ath_pktlog_info *pl_info;
  451. int ret = 0;
  452. PKTLOG_MOD_INC_USE_COUNT;
  453. pl_info = (struct ath_pktlog_info *)
  454. PDE_DATA(f->f_path.dentry->d_inode);
  455. if (!pl_info) {
  456. pr_err("%s: pl_info NULL", __func__);
  457. return -EINVAL;
  458. }
  459. if (pl_info->curr_pkt_state != PKTLOG_OPR_NOT_IN_PROGRESS) {
  460. pr_info("%s: plinfo state (%d) != PKTLOG_OPR_NOT_IN_PROGRESS",
  461. __func__, pl_info->curr_pkt_state);
  462. return -EBUSY;
  463. }
  464. if (cds_is_module_state_transitioning()) {
  465. pr_info("%s: module transition in progress", __func__);
  466. return -EAGAIN;
  467. }
  468. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_START;
  469. scn = cds_get_context(QDF_MODULE_ID_HIF);
  470. if (!scn) {
  471. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  472. qdf_print("%s: Invalid scn context\n", __func__);
  473. ASSERT(0);
  474. return -EINVAL;
  475. }
  476. pl_dev = get_pktlog_handle();
  477. if (!pl_dev) {
  478. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  479. qdf_print("%s: Invalid pktlog handle\n", __func__);
  480. ASSERT(0);
  481. return -ENODEV;
  482. }
  483. pl_info->init_saved_state = pl_info->log_state;
  484. if (!pl_info->log_state) {
  485. /* Pktlog is already disabled.
  486. * Proceed to read directly.
  487. */
  488. pl_info->curr_pkt_state =
  489. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  490. return ret;
  491. }
  492. /* Disbable the pktlog internally. */
  493. ret = pl_dev->pl_funcs->pktlog_disable(scn);
  494. pl_info->log_state = 0;
  495. pl_info->curr_pkt_state =
  496. PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
  497. return ret;
  498. }
  499. static int pktlog_open(struct inode *i, struct file *f)
  500. {
  501. int ret;
  502. cds_ssr_protect(__func__);
  503. ret = __pktlog_open(i, f);
  504. cds_ssr_unprotect(__func__);
  505. return ret;
  506. }
  507. static int __pktlog_release(struct inode *i, struct file *f)
  508. {
  509. struct hif_opaque_softc *scn;
  510. struct pktlog_dev_t *pl_dev;
  511. struct ath_pktlog_info *pl_info;
  512. int ret = 0;
  513. PKTLOG_MOD_DEC_USE_COUNT;
  514. pl_info = (struct ath_pktlog_info *)
  515. PDE_DATA(f->f_path.dentry->d_inode);
  516. if (!pl_info)
  517. return -EINVAL;
  518. if (cds_is_module_state_transitioning()) {
  519. pr_info("%s: module transition in progress", __func__);
  520. return -EAGAIN;
  521. }
  522. scn = cds_get_context(QDF_MODULE_ID_HIF);
  523. if (!scn) {
  524. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  525. qdf_print("%s: Invalid scn context\n", __func__);
  526. ASSERT(0);
  527. return -EINVAL;
  528. }
  529. pl_dev = get_pktlog_handle();
  530. if (!pl_dev) {
  531. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  532. qdf_print("%s: Invalid pktlog handle\n", __func__);
  533. ASSERT(0);
  534. return -ENODEV;
  535. }
  536. pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE;
  537. /*clear pktlog buffer.*/
  538. pktlog_clearbuff(scn, true);
  539. pl_info->log_state = pl_info->init_saved_state;
  540. pl_info->init_saved_state = 0;
  541. /*Enable pktlog again*/
  542. ret = pl_dev->pl_funcs->pktlog_enable(
  543. (struct hif_opaque_softc *)scn, pl_info->log_state,
  544. cds_is_packet_log_enabled(), 0, 1);
  545. if (ret != 0)
  546. pr_warn("%s: pktlog cannot be enabled. ret value %d\n",
  547. __func__, ret);
  548. pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
  549. return ret;
  550. }
  551. static int pktlog_release(struct inode *i, struct file *f)
  552. {
  553. int ret;
  554. cds_ssr_protect(__func__);
  555. ret = __pktlog_release(i, f);
  556. cds_ssr_unprotect(__func__);
  557. return ret;
  558. }
  559. #ifndef MIN
  560. #define MIN(a, b) (((a) < (b)) ? (a) : (b))
  561. #endif
  562. /**
  563. * pktlog_read_proc_entry() - This function is used to read data from the
  564. * proc entry into the readers buffer
  565. * @buf: Readers buffer
  566. * @nbytes: Number of bytes to read
  567. * @ppos: Offset within the drivers buffer
  568. * @pl_info: Packet log information pointer
  569. * @read_complete: Boolean value indication whether read is complete
  570. *
  571. * This function is used to read data from the proc entry into the readers
  572. * buffer. Its functionality is similar to 'pktlog_read' which does
  573. * copy to user to the user space buffer
  574. *
  575. * Return: Number of bytes read from the buffer
  576. *
  577. */
  578. ssize_t
  579. pktlog_read_proc_entry(char *buf, size_t nbytes, loff_t *ppos,
  580. struct ath_pktlog_info *pl_info, bool *read_complete)
  581. {
  582. size_t bufhdr_size;
  583. size_t count = 0, ret_val = 0;
  584. int rem_len;
  585. int start_offset, end_offset;
  586. int fold_offset, ppos_data, cur_rd_offset, cur_wr_offset;
  587. struct ath_pktlog_buf *log_buf;
  588. spin_lock_bh(&pl_info->log_lock);
  589. log_buf = pl_info->buf;
  590. *read_complete = false;
  591. if (log_buf == NULL) {
  592. *read_complete = true;
  593. spin_unlock_bh(&pl_info->log_lock);
  594. return 0;
  595. }
  596. if (*ppos == 0 && pl_info->log_state) {
  597. pl_info->saved_state = pl_info->log_state;
  598. pl_info->log_state = 0;
  599. }
  600. bufhdr_size = sizeof(log_buf->bufhdr);
  601. /* copy valid log entries from circular buffer into user space */
  602. rem_len = nbytes;
  603. count = 0;
  604. if (*ppos < bufhdr_size) {
  605. count = MIN((bufhdr_size - *ppos), rem_len);
  606. qdf_mem_copy(buf, ((char *)&log_buf->bufhdr) + *ppos,
  607. count);
  608. rem_len -= count;
  609. ret_val += count;
  610. }
  611. start_offset = log_buf->rd_offset;
  612. cur_wr_offset = log_buf->wr_offset;
  613. if ((rem_len == 0) || (start_offset < 0))
  614. goto rd_done;
  615. fold_offset = -1;
  616. cur_rd_offset = start_offset;
  617. /* Find the last offset and fold-offset if the buffer is folded */
  618. do {
  619. struct ath_pktlog_hdr *log_hdr;
  620. int log_data_offset;
  621. log_hdr = (struct ath_pktlog_hdr *) (log_buf->log_data +
  622. cur_rd_offset);
  623. log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
  624. if ((fold_offset == -1)
  625. && ((pl_info->buf_size - log_data_offset)
  626. <= log_hdr->size))
  627. fold_offset = log_data_offset - 1;
  628. PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
  629. if ((fold_offset == -1) && (cur_rd_offset == 0)
  630. && (cur_rd_offset != cur_wr_offset))
  631. fold_offset = log_data_offset + log_hdr->size - 1;
  632. end_offset = log_data_offset + log_hdr->size - 1;
  633. } while (cur_rd_offset != cur_wr_offset);
  634. ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
  635. if (fold_offset == -1) {
  636. if (ppos_data > end_offset)
  637. goto rd_done;
  638. count = MIN(rem_len, (end_offset - ppos_data + 1));
  639. qdf_mem_copy(buf + ret_val,
  640. log_buf->log_data + ppos_data,
  641. count);
  642. ret_val += count;
  643. rem_len -= count;
  644. } else {
  645. if (ppos_data <= fold_offset) {
  646. count = MIN(rem_len, (fold_offset - ppos_data + 1));
  647. qdf_mem_copy(buf + ret_val,
  648. log_buf->log_data + ppos_data,
  649. count);
  650. ret_val += count;
  651. rem_len -= count;
  652. }
  653. if (rem_len == 0)
  654. goto rd_done;
  655. ppos_data =
  656. *ppos + ret_val - (bufhdr_size +
  657. (fold_offset - start_offset + 1));
  658. if (ppos_data <= end_offset) {
  659. count = MIN(rem_len, (end_offset - ppos_data + 1));
  660. qdf_mem_copy(buf + ret_val,
  661. log_buf->log_data + ppos_data,
  662. count);
  663. ret_val += count;
  664. rem_len -= count;
  665. }
  666. }
  667. rd_done:
  668. if ((ret_val < nbytes) && pl_info->saved_state) {
  669. pl_info->log_state = pl_info->saved_state;
  670. pl_info->saved_state = 0;
  671. }
  672. *ppos += ret_val;
  673. if (ret_val == 0) {
  674. /* Write pointer might have been updated during the read.
  675. * So, if some data is written into, lets not reset the pointers
  676. * We can continue to read from the offset position
  677. */
  678. if (cur_wr_offset != log_buf->wr_offset) {
  679. *read_complete = false;
  680. } else {
  681. pl_info->buf->rd_offset = -1;
  682. pl_info->buf->wr_offset = 0;
  683. pl_info->buf->bytes_written = 0;
  684. pl_info->buf->offset = PKTLOG_READ_OFFSET;
  685. *read_complete = true;
  686. }
  687. }
  688. spin_unlock_bh(&pl_info->log_lock);
  689. return ret_val;
  690. }
  691. static ssize_t
  692. __pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
  693. {
  694. size_t bufhdr_size;
  695. size_t count = 0, ret_val = 0;
  696. int rem_len;
  697. int start_offset, end_offset;
  698. int fold_offset, ppos_data, cur_rd_offset;
  699. struct ath_pktlog_info *pl_info;
  700. struct ath_pktlog_buf *log_buf;
  701. if (cds_is_module_state_transitioning()) {
  702. pr_info("%s: module transition in progress", __func__);
  703. return -EAGAIN;
  704. }
  705. pl_info = (struct ath_pktlog_info *)
  706. PDE_DATA(file->f_path.dentry->d_inode);
  707. if (!pl_info)
  708. return 0;
  709. spin_lock_bh(&pl_info->log_lock);
  710. log_buf = pl_info->buf;
  711. if (log_buf == NULL) {
  712. spin_unlock_bh(&pl_info->log_lock);
  713. return 0;
  714. }
  715. if (pl_info->log_state) {
  716. /* Read is not allowed when write is going on
  717. * When issuing cat command, ensure to send
  718. * pktlog disable command first.
  719. */
  720. spin_unlock_bh(&pl_info->log_lock);
  721. return -EINVAL;
  722. }
  723. if (*ppos == 0 && pl_info->log_state) {
  724. pl_info->saved_state = pl_info->log_state;
  725. pl_info->log_state = 0;
  726. }
  727. bufhdr_size = sizeof(log_buf->bufhdr);
  728. /* copy valid log entries from circular buffer into user space */
  729. rem_len = nbytes;
  730. count = 0;
  731. if (*ppos < bufhdr_size) {
  732. count = QDF_MIN((bufhdr_size - *ppos), rem_len);
  733. spin_unlock_bh(&pl_info->log_lock);
  734. if (copy_to_user(buf, ((char *)&log_buf->bufhdr) + *ppos,
  735. count)) {
  736. return -EFAULT;
  737. }
  738. rem_len -= count;
  739. ret_val += count;
  740. spin_lock_bh(&pl_info->log_lock);
  741. }
  742. start_offset = log_buf->rd_offset;
  743. if ((rem_len == 0) || (start_offset < 0))
  744. goto rd_done;
  745. fold_offset = -1;
  746. cur_rd_offset = start_offset;
  747. /* Find the last offset and fold-offset if the buffer is folded */
  748. do {
  749. struct ath_pktlog_hdr *log_hdr;
  750. int log_data_offset;
  751. log_hdr = (struct ath_pktlog_hdr *)(log_buf->log_data +
  752. cur_rd_offset);
  753. log_data_offset = cur_rd_offset + sizeof(struct ath_pktlog_hdr);
  754. if ((fold_offset == -1)
  755. && ((pl_info->buf_size - log_data_offset)
  756. <= log_hdr->size))
  757. fold_offset = log_data_offset - 1;
  758. PKTLOG_MOV_RD_IDX(cur_rd_offset, log_buf, pl_info->buf_size);
  759. if ((fold_offset == -1) && (cur_rd_offset == 0)
  760. && (cur_rd_offset != log_buf->wr_offset))
  761. fold_offset = log_data_offset + log_hdr->size - 1;
  762. end_offset = log_data_offset + log_hdr->size - 1;
  763. } while (cur_rd_offset != log_buf->wr_offset);
  764. ppos_data = *ppos + ret_val - bufhdr_size + start_offset;
  765. if (fold_offset == -1) {
  766. if (ppos_data > end_offset)
  767. goto rd_done;
  768. count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
  769. spin_unlock_bh(&pl_info->log_lock);
  770. if (copy_to_user(buf + ret_val,
  771. log_buf->log_data + ppos_data, count)) {
  772. return -EFAULT;
  773. }
  774. ret_val += count;
  775. rem_len -= count;
  776. spin_lock_bh(&pl_info->log_lock);
  777. } else {
  778. if (ppos_data <= fold_offset) {
  779. count = QDF_MIN(rem_len, (fold_offset - ppos_data + 1));
  780. spin_unlock_bh(&pl_info->log_lock);
  781. if (copy_to_user(buf + ret_val,
  782. log_buf->log_data + ppos_data,
  783. count)) {
  784. return -EFAULT;
  785. }
  786. ret_val += count;
  787. rem_len -= count;
  788. spin_lock_bh(&pl_info->log_lock);
  789. }
  790. if (rem_len == 0)
  791. goto rd_done;
  792. ppos_data =
  793. *ppos + ret_val - (bufhdr_size +
  794. (fold_offset - start_offset + 1));
  795. if (ppos_data <= end_offset) {
  796. count = QDF_MIN(rem_len, (end_offset - ppos_data + 1));
  797. spin_unlock_bh(&pl_info->log_lock);
  798. if (copy_to_user(buf + ret_val,
  799. log_buf->log_data + ppos_data,
  800. count)) {
  801. return -EFAULT;
  802. }
  803. ret_val += count;
  804. rem_len -= count;
  805. spin_lock_bh(&pl_info->log_lock);
  806. }
  807. }
  808. rd_done:
  809. if ((ret_val < nbytes) && pl_info->saved_state) {
  810. pl_info->log_state = pl_info->saved_state;
  811. pl_info->saved_state = 0;
  812. }
  813. *ppos += ret_val;
  814. spin_unlock_bh(&pl_info->log_lock);
  815. return ret_val;
  816. }
  817. static ssize_t
  818. pktlog_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
  819. {
  820. size_t ret;
  821. struct ath_pktlog_info *pl_info;
  822. pl_info = (struct ath_pktlog_info *)
  823. PDE_DATA(file->f_path.dentry->d_inode);
  824. if (!pl_info)
  825. return 0;
  826. cds_ssr_protect(__func__);
  827. mutex_lock(&pl_info->pktlog_mutex);
  828. ret = __pktlog_read(file, buf, nbytes, ppos);
  829. mutex_unlock(&pl_info->pktlog_mutex);
  830. cds_ssr_unprotect(__func__);
  831. return ret;
  832. }
  833. int pktlogmod_init(void *context)
  834. {
  835. int ret;
  836. /* create the proc directory entry */
  837. g_pktlog_pde = proc_mkdir(PKTLOG_PROC_DIR, NULL);
  838. if (g_pktlog_pde == NULL) {
  839. printk(PKTLOG_TAG "%s: proc_mkdir failed\n", __func__);
  840. return -EPERM;
  841. }
  842. /* Attach packet log */
  843. ret = pktlog_attach((struct hif_opaque_softc *)context);
  844. /* If packet log init failed */
  845. if (ret)
  846. goto attach_fail;
  847. return ret;
  848. attach_fail:
  849. remove_proc_entry(PKTLOG_PROC_DIR, NULL);
  850. g_pktlog_pde = NULL;
  851. return ret;
  852. }
  853. void pktlogmod_exit(void *context)
  854. {
  855. if (g_pktlog_pde == NULL)
  856. return;
  857. pktlog_detach((struct hif_opaque_softc *)context);
  858. /*
  859. * pdev kill needs to be implemented
  860. */
  861. remove_proc_entry(PKTLOG_PROC_DIR, NULL);
  862. }
  863. #endif