ce_tasklet.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. /*
  2. * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <linux/pci.h>
  19. #include <linux/slab.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/if_arp.h>
  22. #include "qdf_lock.h"
  23. #include "qdf_types.h"
  24. #include "qdf_status.h"
  25. #include "regtable.h"
  26. #include "hif.h"
  27. #include "hif_io32.h"
  28. #include "ce_main.h"
  29. #include "ce_api.h"
  30. #include "ce_reg.h"
  31. #include "ce_internal.h"
  32. #include "ce_tasklet.h"
  33. #include "pld_common.h"
  34. #include "hif_debug.h"
  35. #include "hif_napi.h"
  36. /**
  37. * struct tasklet_work
  38. *
  39. * @id: ce_id
  40. * @work: work
  41. */
  42. struct tasklet_work {
  43. enum ce_id_type id;
  44. void *data;
  45. struct work_struct work;
  46. };
  47. /**
  48. * reschedule_ce_tasklet_work_handler() - reschedule work
  49. * @work: struct work_struct
  50. *
  51. * Return: N/A
  52. */
  53. static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
  54. {
  55. struct tasklet_work *ce_work = container_of(work, struct tasklet_work,
  56. work);
  57. struct hif_softc *scn = ce_work->data;
  58. struct HIF_CE_state *hif_ce_state;
  59. if (!scn) {
  60. hif_err("tasklet scn is null");
  61. return;
  62. }
  63. hif_ce_state = HIF_GET_CE_STATE(scn);
  64. if (scn->hif_init_done == false) {
  65. hif_err("wlan driver is unloaded");
  66. return;
  67. }
  68. if (hif_ce_state->tasklets[ce_work->id].inited)
  69. tasklet_schedule(&hif_ce_state->tasklets[ce_work->id].intr_tq);
  70. }
  71. static struct tasklet_work tasklet_workers[CE_ID_MAX];
  72. static bool work_initialized;
  73. /**
  74. * init_tasklet_work() - init_tasklet_work
  75. * @work: struct work_struct
  76. * @work_handler: work_handler
  77. *
  78. * Return: N/A
  79. */
  80. static void init_tasklet_work(struct work_struct *work,
  81. work_func_t work_handler)
  82. {
  83. INIT_WORK(work, work_handler);
  84. }
  85. /**
  86. * init_tasklet_workers() - init_tasklet_workers
  87. * @scn: HIF Context
  88. *
  89. * Return: N/A
  90. */
  91. void init_tasklet_workers(struct hif_opaque_softc *scn)
  92. {
  93. uint32_t id;
  94. for (id = 0; id < CE_ID_MAX; id++) {
  95. tasklet_workers[id].id = id;
  96. tasklet_workers[id].data = scn;
  97. init_tasklet_work(&tasklet_workers[id].work,
  98. reschedule_ce_tasklet_work_handler);
  99. }
  100. work_initialized = true;
  101. }
  102. /**
  103. * deinit_tasklet_workers() - deinit_tasklet_workers
  104. * @scn: HIF Context
  105. *
  106. * Return: N/A
  107. */
  108. void deinit_tasklet_workers(struct hif_opaque_softc *scn)
  109. {
  110. u32 id;
  111. for (id = 0; id < CE_ID_MAX; id++)
  112. cancel_work_sync(&tasklet_workers[id].work);
  113. work_initialized = false;
  114. }
  115. /**
  116. * ce_schedule_tasklet() - schedule ce tasklet
  117. * @tasklet_entry: struct ce_tasklet_entry
  118. *
  119. * Return: N/A
  120. */
  121. static inline void ce_schedule_tasklet(struct ce_tasklet_entry *tasklet_entry)
  122. {
  123. tasklet_schedule(&tasklet_entry->intr_tq);
  124. }
  125. #ifdef CE_TASKLET_DEBUG_ENABLE
  126. /**
  127. * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
  128. * entry time
  129. * @scn: hif_softc
  130. * @ce_id: ce_id
  131. *
  132. * Return: None
  133. */
  134. static inline void
  135. hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  136. {
  137. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  138. hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
  139. qdf_get_log_timestamp_usecs();
  140. }
  141. /**
  142. * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
  143. * entry time
  144. * @scn: hif_softc
  145. * @ce_id: ce_id
  146. *
  147. * Return: None
  148. */
  149. static inline void
  150. hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  151. {
  152. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  153. hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
  154. qdf_get_log_timestamp_usecs();
  155. }
  156. /**
  157. * hif_ce_latency_stats() - Display ce latency information
  158. * @hif_ctx: hif_softc struct
  159. *
  160. * Return: None
  161. */
  162. static void
  163. hif_ce_latency_stats(struct hif_softc *hif_ctx)
  164. {
  165. uint8_t i, j;
  166. uint32_t index, start_index;
  167. uint64_t secs, usecs;
  168. static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2",
  169. "2 - 5", "5 - 10", " > 10"};
  170. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
  171. struct ce_stats *stats = &hif_ce_state->stats;
  172. hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
  173. for (i = 0; i < CE_COUNT_MAX; i++) {
  174. hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
  175. for (j = 0; j < CE_BUCKET_MAX; j++) {
  176. qdf_log_timestamp_to_secs(
  177. stats->ce_tasklet_exec_last_update[i][j],
  178. &secs, &usecs);
  179. hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
  180. buck_str[j],
  181. stats->ce_tasklet_exec_bucket[i][j],
  182. secs, usecs);
  183. }
  184. hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
  185. for (j = 0; j < CE_BUCKET_MAX; j++) {
  186. qdf_log_timestamp_to_secs(
  187. stats->ce_tasklet_sched_last_update[i][j],
  188. &secs, &usecs);
  189. hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
  190. buck_str[j],
  191. stats->ce_tasklet_sched_bucket[i][j],
  192. secs, usecs);
  193. }
  194. hif_nofl_err("\n\t\t CE RING %d Last %d time records",
  195. i, HIF_REQUESTED_EVENTS);
  196. index = stats->record_index[i];
  197. start_index = stats->record_index[i];
  198. for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
  199. hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
  200. stats->tasklet_exec_time_record[i][index],
  201. stats->
  202. tasklet_sched_time_record[i][index]);
  203. if (index)
  204. index = (index - 1) % HIF_REQUESTED_EVENTS;
  205. else
  206. index = HIF_REQUESTED_EVENTS - 1;
  207. if (index == start_index)
  208. break;
  209. }
  210. }
  211. }
  212. /**
  213. * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
  214. * in corresponding time buckets
  215. * @stats: struct ce_stats
  216. * @ce_id: ce_id_type
  217. * @entry_us: timestamp when tasklet is started to execute
  218. * @exit_us: timestamp when tasklet is completed execution
  219. *
  220. * Return: N/A
  221. */
  222. static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
  223. uint8_t ce_id)
  224. {
  225. uint32_t index;
  226. uint64_t exec_time, exec_ms;
  227. uint64_t sched_time, sched_ms;
  228. uint64_t curr_time = qdf_get_log_timestamp_usecs();
  229. struct ce_stats *stats = &hif_ce_state->stats;
  230. exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
  231. sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
  232. (stats->tasklet_sched_entry_ts[ce_id]);
  233. index = stats->record_index[ce_id];
  234. index = (index + 1) % HIF_REQUESTED_EVENTS;
  235. stats->tasklet_exec_time_record[ce_id][index] = exec_time;
  236. stats->tasklet_sched_time_record[ce_id][index] = sched_time;
  237. stats->record_index[ce_id] = index;
  238. exec_ms = qdf_do_div(exec_time, 1000);
  239. sched_ms = qdf_do_div(sched_time, 1000);
  240. if (exec_ms > 10) {
  241. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
  242. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
  243. = curr_time;
  244. } else if (exec_ms > 5) {
  245. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
  246. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
  247. = curr_time;
  248. } else if (exec_ms > 2) {
  249. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
  250. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
  251. = curr_time;
  252. } else if (exec_ms > 1) {
  253. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
  254. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
  255. = curr_time;
  256. } else if (exec_time > 500) {
  257. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
  258. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
  259. = curr_time;
  260. } else {
  261. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
  262. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
  263. = curr_time;
  264. }
  265. if (sched_ms > 10) {
  266. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
  267. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
  268. = curr_time;
  269. } else if (sched_ms > 5) {
  270. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
  271. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
  272. = curr_time;
  273. } else if (sched_ms > 2) {
  274. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
  275. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
  276. = curr_time;
  277. } else if (sched_ms > 1) {
  278. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
  279. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
  280. = curr_time;
  281. } else if (sched_time > 500) {
  282. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
  283. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
  284. = curr_time;
  285. } else {
  286. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
  287. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
  288. = curr_time;
  289. }
  290. }
  291. #else
  292. static inline void
  293. hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  294. {
  295. }
  296. static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
  297. uint8_t ce_id)
  298. {
  299. }
  300. static inline void
  301. hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  302. {
  303. }
  304. static void
  305. hif_ce_latency_stats(struct hif_softc *hif_ctx)
  306. {
  307. }
  308. #endif /*CE_TASKLET_DEBUG_ENABLE*/
  309. /**
  310. * ce_tasklet() - ce_tasklet
  311. * @data: data
  312. *
  313. * Return: N/A
  314. */
  315. static void ce_tasklet(unsigned long data)
  316. {
  317. struct ce_tasklet_entry *tasklet_entry =
  318. (struct ce_tasklet_entry *)data;
  319. struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
  320. struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
  321. struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
  322. if (scn->ce_latency_stats)
  323. hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
  324. hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
  325. HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
  326. if (qdf_atomic_read(&scn->link_suspended)) {
  327. hif_err("ce %d tasklet fired after link suspend",
  328. tasklet_entry->ce_id);
  329. QDF_BUG(0);
  330. }
  331. ce_per_engine_service(scn, tasklet_entry->ce_id);
  332. if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
  333. /*
  334. * There are frames pending, schedule tasklet to process them.
  335. * Enable the interrupt only when there is no pending frames in
  336. * any of the Copy Engine pipes.
  337. */
  338. hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
  339. HIF_CE_TASKLET_RESCHEDULE, NULL, NULL, -1, 0);
  340. ce_schedule_tasklet(tasklet_entry);
  341. return;
  342. }
  343. if (scn->target_status != TARGET_STATUS_RESET)
  344. hif_irq_enable(scn, tasklet_entry->ce_id);
  345. hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
  346. NULL, NULL, -1, 0);
  347. if (scn->ce_latency_stats)
  348. ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
  349. qdf_atomic_dec(&scn->active_tasklet_cnt);
  350. }
  351. /**
  352. * ce_tasklet_init() - ce_tasklet_init
  353. * @hif_ce_state: hif_ce_state
  354. * @mask: mask
  355. *
  356. * Return: N/A
  357. */
  358. void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
  359. {
  360. int i;
  361. for (i = 0; i < CE_COUNT_MAX; i++) {
  362. if (mask & (1 << i)) {
  363. hif_ce_state->tasklets[i].ce_id = i;
  364. hif_ce_state->tasklets[i].inited = true;
  365. hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
  366. tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
  367. ce_tasklet,
  368. (unsigned long)&hif_ce_state->tasklets[i]);
  369. }
  370. }
  371. }
  372. /**
  373. * ce_tasklet_kill() - ce_tasklet_kill
  374. * @hif_ce_state: hif_ce_state
  375. *
  376. * Context: Non-Atomic context
  377. * Return: N/A
  378. */
  379. void ce_tasklet_kill(struct hif_softc *scn)
  380. {
  381. int i;
  382. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  383. work_initialized = false;
  384. for (i = 0; i < CE_COUNT_MAX; i++) {
  385. if (hif_ce_state->tasklets[i].inited) {
  386. hif_ce_state->tasklets[i].inited = false;
  387. /*
  388. * Cancel the tasklet work before tasklet_disable
  389. * to avoid race between tasklet_schedule and
  390. * tasklet_kill. Here cancel_work_sync() won't
  391. * return before reschedule_ce_tasklet_work_handler()
  392. * completes. Even if tasklet_schedule() happens
  393. * tasklet_disable() will take care of that.
  394. */
  395. cancel_work_sync(&tasklet_workers[i].work);
  396. tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
  397. }
  398. }
  399. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  400. }
  401. #define HIF_CE_DRAIN_WAIT_CNT 20
  402. /**
  403. * hif_drain_tasklets(): wait until no tasklet is pending
  404. * @scn: hif context
  405. *
  406. * Let running tasklets clear pending trafic.
  407. *
  408. * Return: 0 if no bottom half is in progress when it returns.
  409. * -EFAULT if it times out.
  410. */
  411. int hif_drain_tasklets(struct hif_softc *scn)
  412. {
  413. uint32_t ce_drain_wait_cnt = 0;
  414. int32_t tasklet_cnt;
  415. while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
  416. if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
  417. hif_err("CE still not done with access: %d",
  418. tasklet_cnt);
  419. return -EFAULT;
  420. }
  421. HIF_INFO("%s: Waiting for CE to finish access", __func__);
  422. msleep(10);
  423. }
  424. return 0;
  425. }
  426. #ifdef WLAN_SUSPEND_RESUME_TEST
  427. /**
  428. * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
  429. * trigger a unit-test resume.
  430. * @scn: The HIF context to operate on
  431. * @ce_id: The copy engine Id from the originating interrupt
  432. *
  433. * Return: true if the raised irq should trigger a unit-test resume
  434. */
  435. static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
  436. {
  437. int errno;
  438. uint8_t wake_ce_id;
  439. if (!hif_is_ut_suspended(scn))
  440. return false;
  441. /* ensure passed ce_id matches wake ce_id */
  442. errno = hif_get_wake_ce_id(scn, &wake_ce_id);
  443. if (errno) {
  444. hif_err("Failed to get wake CE Id: %d", errno);
  445. return false;
  446. }
  447. return ce_id == wake_ce_id;
  448. }
  449. #else
  450. static inline bool
  451. hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
  452. {
  453. return false;
  454. }
  455. #endif /* WLAN_SUSPEND_RESUME_TEST */
  456. /**
  457. * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
  458. * @irq: irq coming from kernel
  459. * @context: context
  460. *
  461. * Return: N/A
  462. */
  463. static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
  464. {
  465. struct ce_tasklet_entry *tasklet_entry = context;
  466. struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
  467. return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
  468. tasklet_entry);
  469. }
  470. /**
  471. * hif_ce_increment_interrupt_count() - update ce stats
  472. * @hif_ce_state: ce state
  473. * @ce_id: ce id
  474. *
  475. * Return: none
  476. */
  477. static inline void
  478. hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
  479. {
  480. int cpu_id = qdf_get_cpu();
  481. hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
  482. }
  483. /**
  484. * hif_display_ce_stats() - display ce stats
  485. * @hif_ce_state: ce state
  486. *
  487. * Return: none
  488. */
  489. void hif_display_ce_stats(struct hif_softc *hif_ctx)
  490. {
  491. #define STR_SIZE 128
  492. uint8_t i, j, pos;
  493. char str_buffer[STR_SIZE];
  494. int size, ret;
  495. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
  496. qdf_debug("CE interrupt statistics:");
  497. for (i = 0; i < CE_COUNT_MAX; i++) {
  498. size = STR_SIZE;
  499. pos = 0;
  500. for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
  501. ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
  502. j, hif_ce_state->stats.ce_per_cpu[i][j]);
  503. if (ret <= 0 || ret >= size)
  504. break;
  505. size -= ret;
  506. pos += ret;
  507. }
  508. qdf_debug("CE id[%2d] - %s", i, str_buffer);
  509. }
  510. if (hif_ctx->ce_latency_stats)
  511. hif_ce_latency_stats(hif_ctx);
  512. #undef STR_SIZE
  513. }
  514. /**
  515. * hif_clear_ce_stats() - clear ce stats
  516. * @hif_ce_state: ce state
  517. *
  518. * Return: none
  519. */
  520. void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
  521. {
  522. qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
  523. }
  524. /**
  525. * hif_tasklet_schedule() - schedule tasklet
  526. * @hif_ctx: hif context
  527. * @tasklet_entry: ce tasklet entry
  528. *
  529. * Return: false if tasklet already scheduled, otherwise true
  530. */
  531. static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
  532. struct ce_tasklet_entry *tasklet_entry)
  533. {
  534. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  535. if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
  536. HIF_DBG("tasklet scheduled, return");
  537. qdf_atomic_dec(&scn->active_tasklet_cnt);
  538. return false;
  539. }
  540. tasklet_schedule(&tasklet_entry->intr_tq);
  541. if (scn->ce_latency_stats)
  542. hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
  543. return true;
  544. }
  545. /**
  546. * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
  547. * @ce_id: ce_id
  548. * @tasklet_entry: context
  549. *
  550. * Return: N/A
  551. */
  552. irqreturn_t ce_dispatch_interrupt(int ce_id,
  553. struct ce_tasklet_entry *tasklet_entry)
  554. {
  555. struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
  556. struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
  557. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  558. if (tasklet_entry->ce_id != ce_id) {
  559. hif_err("ce_id (expect %d, received %d) does not match",
  560. tasklet_entry->ce_id, ce_id);
  561. return IRQ_NONE;
  562. }
  563. if (unlikely(ce_id >= CE_COUNT_MAX)) {
  564. hif_err("ce_id=%d > CE_COUNT_MAX=%d",
  565. tasklet_entry->ce_id, CE_COUNT_MAX);
  566. return IRQ_NONE;
  567. }
  568. hif_irq_disable(scn, ce_id);
  569. if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
  570. return IRQ_HANDLED;
  571. hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
  572. NULL, NULL, 0, 0);
  573. hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
  574. if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
  575. hif_ut_fw_resume(scn);
  576. hif_irq_enable(scn, ce_id);
  577. return IRQ_HANDLED;
  578. }
  579. qdf_atomic_inc(&scn->active_tasklet_cnt);
  580. if (hif_napi_enabled(hif_hdl, ce_id))
  581. hif_napi_schedule(hif_hdl, ce_id);
  582. else
  583. hif_tasklet_schedule(hif_hdl, tasklet_entry);
  584. return IRQ_HANDLED;
  585. }
  586. /**
  587. * const char *ce_name
  588. *
  589. * @ce_name: ce_name
  590. */
  591. const char *ce_name[] = {
  592. "WLAN_CE_0",
  593. "WLAN_CE_1",
  594. "WLAN_CE_2",
  595. "WLAN_CE_3",
  596. "WLAN_CE_4",
  597. "WLAN_CE_5",
  598. "WLAN_CE_6",
  599. "WLAN_CE_7",
  600. "WLAN_CE_8",
  601. "WLAN_CE_9",
  602. "WLAN_CE_10",
  603. "WLAN_CE_11",
  604. };
  605. /**
  606. * ce_unregister_irq() - ce_unregister_irq
  607. * @hif_ce_state: hif_ce_state copy engine device handle
  608. * @mask: which coppy engines to unregister for.
  609. *
  610. * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
  611. * unregister for copy engine x.
  612. *
  613. * Return: QDF_STATUS
  614. */
  615. QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
  616. {
  617. int id;
  618. int ce_count;
  619. int ret;
  620. struct hif_softc *scn;
  621. if (!hif_ce_state) {
  622. hif_warn("hif_ce_state = NULL");
  623. return QDF_STATUS_SUCCESS;
  624. }
  625. scn = HIF_GET_SOFTC(hif_ce_state);
  626. ce_count = scn->ce_count;
  627. /* we are removing interrupts, so better stop NAPI */
  628. ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
  629. NAPI_EVT_INT_STATE, (void *)0);
  630. if (ret != 0)
  631. hif_err("napi_event INT_STATE returned %d", ret);
  632. /* this is not fatal, continue */
  633. /* filter mask to free only for ce's with irq registered */
  634. mask &= hif_ce_state->ce_register_irq_done;
  635. for (id = 0; id < ce_count; id++) {
  636. if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
  637. ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
  638. &hif_ce_state->tasklets[id]);
  639. if (ret < 0)
  640. hif_err(
  641. "pld_unregister_irq error - ce_id = %d, ret = %d",
  642. id, ret);
  643. }
  644. ce_disable_polling(scn->ce_id_to_state[id]);
  645. }
  646. hif_ce_state->ce_register_irq_done &= ~mask;
  647. return QDF_STATUS_SUCCESS;
  648. }
  649. /**
  650. * ce_register_irq() - ce_register_irq
  651. * @hif_ce_state: hif_ce_state
  652. * @mask: which coppy engines to unregister for.
  653. *
  654. * Registers copy engine irqs matching mask. If a 1 is set at bit x,
  655. * Register for copy engine x.
  656. *
  657. * Return: QDF_STATUS
  658. */
  659. QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
  660. {
  661. int id;
  662. int ce_count;
  663. int ret;
  664. unsigned long irqflags = IRQF_TRIGGER_RISING;
  665. uint32_t done_mask = 0;
  666. struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
  667. ce_count = scn->ce_count;
  668. for (id = 0; id < ce_count; id++) {
  669. if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
  670. ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
  671. hif_snoc_interrupt_handler,
  672. irqflags, ce_name[id],
  673. &hif_ce_state->tasklets[id]);
  674. if (ret) {
  675. hif_err(
  676. "cannot register CE %d irq handler, ret = %d",
  677. id, ret);
  678. ce_unregister_irq(hif_ce_state, done_mask);
  679. return QDF_STATUS_E_FAULT;
  680. }
  681. done_mask |= 1 << id;
  682. }
  683. }
  684. hif_ce_state->ce_register_irq_done |= done_mask;
  685. return QDF_STATUS_SUCCESS;
  686. }