ce_tasklet.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/if_arp.h>
  23. #include "qdf_lock.h"
  24. #include "qdf_types.h"
  25. #include "qdf_status.h"
  26. #include "regtable.h"
  27. #include "hif.h"
  28. #include "hif_io32.h"
  29. #include "ce_main.h"
  30. #include "ce_api.h"
  31. #include "ce_reg.h"
  32. #include "ce_internal.h"
  33. #include "ce_tasklet.h"
  34. #include "pld_common.h"
  35. #include "hif_debug.h"
  36. #include "hif_napi.h"
  37. /**
  38. * struct tasklet_work
  39. *
  40. * @id: ce_id
  41. * @work: work
  42. */
  43. struct tasklet_work {
  44. enum ce_id_type id;
  45. void *data;
  46. qdf_work_t reg_work;
  47. };
  48. /**
  49. * ce_tasklet_schedule() - schedule CE tasklet
  50. * @tasklet_entry: ce tasklet entry
  51. *
  52. * Return: None
  53. */
  54. static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry)
  55. {
  56. if (tasklet_entry->hi_tasklet_ce)
  57. tasklet_hi_schedule(&tasklet_entry->intr_tq);
  58. else
  59. tasklet_schedule(&tasklet_entry->intr_tq);
  60. }
  61. /**
  62. * reschedule_ce_tasklet_work_handler() - reschedule work
  63. * @work: struct work_struct
  64. *
  65. * Return: N/A
  66. */
  67. static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
  68. {
  69. qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work);
  70. struct tasklet_work *ce_work = qdf_container_of(reg_work,
  71. struct tasklet_work,
  72. reg_work);
  73. struct hif_softc *scn = ce_work->data;
  74. struct HIF_CE_state *hif_ce_state;
  75. if (!scn) {
  76. hif_err("tasklet scn is null");
  77. return;
  78. }
  79. hif_ce_state = HIF_GET_CE_STATE(scn);
  80. if (scn->hif_init_done == false) {
  81. hif_err("wlan driver is unloaded");
  82. return;
  83. }
  84. if (hif_ce_state->tasklets[ce_work->id].inited)
  85. ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]);
  86. }
  87. static struct tasklet_work tasklet_workers[CE_ID_MAX];
  88. /**
  89. * init_tasklet_work() - init_tasklet_work
  90. * @work: struct work_struct
  91. * @work_handler: work_handler
  92. *
  93. * Return: N/A
  94. */
  95. static void init_tasklet_work(struct work_struct *work,
  96. work_func_t work_handler)
  97. {
  98. INIT_WORK(work, work_handler);
  99. }
  100. /**
  101. * init_tasklet_workers() - init_tasklet_workers
  102. * @scn: HIF Context
  103. *
  104. * Return: N/A
  105. */
  106. void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id)
  107. {
  108. tasklet_workers[ce_id].id = ce_id;
  109. tasklet_workers[ce_id].data = scn;
  110. init_tasklet_work(&tasklet_workers[ce_id].reg_work.work,
  111. reschedule_ce_tasklet_work_handler);
  112. }
  113. /**
  114. * deinit_tasklet_workers() - deinit_tasklet_workers
  115. * @scn: HIF Context
  116. *
  117. * Return: N/A
  118. */
  119. void deinit_tasklet_workers(struct hif_opaque_softc *scn)
  120. {
  121. u32 id;
  122. for (id = 0; id < CE_ID_MAX; id++)
  123. qdf_cancel_work(&tasklet_workers[id].reg_work);
  124. }
  125. #ifdef CE_TASKLET_DEBUG_ENABLE
  126. /**
  127. * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
  128. * entry time
  129. * @scn: hif_softc
  130. * @ce_id: ce_id
  131. *
  132. * Return: None
  133. */
  134. static inline void
  135. hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  136. {
  137. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  138. hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
  139. qdf_get_log_timestamp_usecs();
  140. }
  141. /**
  142. * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
  143. * entry time
  144. * @scn: hif_softc
  145. * @ce_id: ce_id
  146. *
  147. * Return: None
  148. */
  149. static inline void
  150. hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  151. {
  152. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  153. hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
  154. qdf_get_log_timestamp_usecs();
  155. }
  156. /**
  157. * hif_ce_latency_stats() - Display ce latency information
  158. * @hif_ctx: hif_softc struct
  159. *
  160. * Return: None
  161. */
  162. static void
  163. hif_ce_latency_stats(struct hif_softc *hif_ctx)
  164. {
  165. uint8_t i, j;
  166. uint32_t index, start_index;
  167. uint64_t secs, usecs;
  168. static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2",
  169. "2 - 5", "5 - 10", " > 10"};
  170. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
  171. struct ce_stats *stats = &hif_ce_state->stats;
  172. hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
  173. for (i = 0; i < CE_COUNT_MAX; i++) {
  174. hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
  175. for (j = 0; j < CE_BUCKET_MAX; j++) {
  176. qdf_log_timestamp_to_secs(
  177. stats->ce_tasklet_exec_last_update[i][j],
  178. &secs, &usecs);
  179. hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
  180. buck_str[j],
  181. stats->ce_tasklet_exec_bucket[i][j],
  182. secs, usecs);
  183. }
  184. hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
  185. for (j = 0; j < CE_BUCKET_MAX; j++) {
  186. qdf_log_timestamp_to_secs(
  187. stats->ce_tasklet_sched_last_update[i][j],
  188. &secs, &usecs);
  189. hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
  190. buck_str[j],
  191. stats->ce_tasklet_sched_bucket[i][j],
  192. secs, usecs);
  193. }
  194. hif_nofl_err("\n\t\t CE RING %d Last %d time records",
  195. i, HIF_REQUESTED_EVENTS);
  196. index = stats->record_index[i];
  197. start_index = stats->record_index[i];
  198. for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
  199. hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
  200. stats->tasklet_exec_time_record[i][index],
  201. stats->
  202. tasklet_sched_time_record[i][index]);
  203. if (index)
  204. index = (index - 1) % HIF_REQUESTED_EVENTS;
  205. else
  206. index = HIF_REQUESTED_EVENTS - 1;
  207. if (index == start_index)
  208. break;
  209. }
  210. }
  211. }
  212. /**
  213. * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
  214. * in corresponding time buckets
  215. * @stats: struct ce_stats
  216. * @ce_id: ce_id_type
  217. * @entry_us: timestamp when tasklet is started to execute
  218. * @exit_us: timestamp when tasklet is completed execution
  219. *
  220. * Return: N/A
  221. */
  222. static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
  223. uint8_t ce_id)
  224. {
  225. uint32_t index;
  226. uint64_t exec_time, exec_ms;
  227. uint64_t sched_time, sched_ms;
  228. uint64_t curr_time = qdf_get_log_timestamp_usecs();
  229. struct ce_stats *stats = &hif_ce_state->stats;
  230. exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
  231. sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
  232. (stats->tasklet_sched_entry_ts[ce_id]);
  233. index = stats->record_index[ce_id];
  234. index = (index + 1) % HIF_REQUESTED_EVENTS;
  235. stats->tasklet_exec_time_record[ce_id][index] = exec_time;
  236. stats->tasklet_sched_time_record[ce_id][index] = sched_time;
  237. stats->record_index[ce_id] = index;
  238. exec_ms = qdf_do_div(exec_time, 1000);
  239. sched_ms = qdf_do_div(sched_time, 1000);
  240. if (exec_ms > 10) {
  241. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
  242. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
  243. = curr_time;
  244. } else if (exec_ms > 5) {
  245. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
  246. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
  247. = curr_time;
  248. } else if (exec_ms > 2) {
  249. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
  250. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
  251. = curr_time;
  252. } else if (exec_ms > 1) {
  253. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
  254. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
  255. = curr_time;
  256. } else if (exec_time > 500) {
  257. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
  258. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
  259. = curr_time;
  260. } else {
  261. stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
  262. stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
  263. = curr_time;
  264. }
  265. if (sched_ms > 10) {
  266. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
  267. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
  268. = curr_time;
  269. } else if (sched_ms > 5) {
  270. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
  271. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
  272. = curr_time;
  273. } else if (sched_ms > 2) {
  274. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
  275. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
  276. = curr_time;
  277. } else if (sched_ms > 1) {
  278. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
  279. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
  280. = curr_time;
  281. } else if (sched_time > 500) {
  282. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
  283. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
  284. = curr_time;
  285. } else {
  286. stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
  287. stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
  288. = curr_time;
  289. }
  290. }
  291. #else
  292. static inline void
  293. hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  294. {
  295. }
  296. static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
  297. uint8_t ce_id)
  298. {
  299. }
  300. static inline void
  301. hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
  302. {
  303. }
  304. static void
  305. hif_ce_latency_stats(struct hif_softc *hif_ctx)
  306. {
  307. }
  308. #endif /*CE_TASKLET_DEBUG_ENABLE*/
  309. #if defined(CE_TASKLET_DEBUG_ENABLE) && defined(CE_TASKLET_SCHEDULE_ON_FULL)
  310. /**
  311. * hif_reset_ce_full_count() - Reset ce full count
  312. * @scn: hif_softc
  313. * @ce_id: ce_id
  314. *
  315. * Return: None
  316. */
  317. static inline void
  318. hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
  319. {
  320. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  321. hif_ce_state->stats.ce_ring_full_count[ce_id] = 0;
  322. }
  323. #else
  324. static inline void
  325. hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
  326. {
  327. }
  328. #endif
  329. #ifdef HIF_DETECTION_LATENCY_ENABLE
  330. static inline
  331. void hif_latency_detect_tasklet_sched(
  332. struct hif_softc *scn,
  333. struct ce_tasklet_entry *tasklet_entry)
  334. {
  335. if (tasklet_entry->ce_id != CE_ID_2)
  336. return;
  337. scn->latency_detect.ce2_tasklet_sched_cpuid = qdf_get_cpu();
  338. scn->latency_detect.ce2_tasklet_sched_time = qdf_system_ticks();
  339. }
  340. static inline
  341. void hif_latency_detect_tasklet_exec(
  342. struct hif_softc *scn,
  343. struct ce_tasklet_entry *tasklet_entry)
  344. {
  345. if (tasklet_entry->ce_id != CE_ID_2)
  346. return;
  347. scn->latency_detect.ce2_tasklet_exec_time = qdf_system_ticks();
  348. hif_check_detection_latency(scn, false, BIT(HIF_DETECT_TASKLET));
  349. }
  350. #else
  351. static inline
  352. void hif_latency_detect_tasklet_sched(
  353. struct hif_softc *scn,
  354. struct ce_tasklet_entry *tasklet_entry)
  355. {}
  356. static inline
  357. void hif_latency_detect_tasklet_exec(
  358. struct hif_softc *scn,
  359. struct ce_tasklet_entry *tasklet_entry)
  360. {}
  361. #endif
  362. /**
  363. * ce_tasklet() - ce_tasklet
  364. * @data: data
  365. *
  366. * Return: N/A
  367. */
  368. static void ce_tasklet(unsigned long data)
  369. {
  370. struct ce_tasklet_entry *tasklet_entry =
  371. (struct ce_tasklet_entry *)data;
  372. struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
  373. struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
  374. struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
  375. hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
  376. HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
  377. if (scn->ce_latency_stats)
  378. hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
  379. hif_latency_detect_tasklet_exec(scn, tasklet_entry);
  380. if (qdf_atomic_read(&scn->link_suspended)) {
  381. hif_err("ce %d tasklet fired after link suspend",
  382. tasklet_entry->ce_id);
  383. QDF_BUG(0);
  384. }
  385. ce_per_engine_service(scn, tasklet_entry->ce_id);
  386. if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
  387. /*
  388. * There are frames pending, schedule tasklet to process them.
  389. * Enable the interrupt only when there is no pending frames in
  390. * any of the Copy Engine pipes.
  391. */
  392. if (test_bit(TASKLET_STATE_SCHED,
  393. &tasklet_entry->intr_tq.state)) {
  394. hif_info("ce_id%d tasklet was scheduled, return",
  395. tasklet_entry->ce_id);
  396. qdf_atomic_dec(&scn->active_tasklet_cnt);
  397. return;
  398. }
  399. hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
  400. HIF_CE_TASKLET_RESCHEDULE,
  401. NULL, NULL, -1, 0);
  402. ce_tasklet_schedule(tasklet_entry);
  403. hif_latency_detect_tasklet_sched(scn, tasklet_entry);
  404. hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
  405. if (scn->ce_latency_stats) {
  406. ce_tasklet_update_bucket(hif_ce_state,
  407. tasklet_entry->ce_id);
  408. hif_record_tasklet_sched_entry_ts(scn,
  409. tasklet_entry->ce_id);
  410. }
  411. return;
  412. }
  413. hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
  414. NULL, NULL, -1, 0);
  415. if (scn->ce_latency_stats)
  416. ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
  417. if ((scn->target_status != TARGET_STATUS_RESET) &&
  418. !scn->free_irq_done)
  419. hif_irq_enable(scn, tasklet_entry->ce_id);
  420. qdf_atomic_dec(&scn->active_tasklet_cnt);
  421. }
  422. /**
  423. * ce_tasklet_init() - ce_tasklet_init
  424. * @hif_ce_state: hif_ce_state
  425. * @mask: mask
  426. *
  427. * Return: N/A
  428. */
  429. void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
  430. {
  431. int i;
  432. struct CE_attr *attr;
  433. for (i = 0; i < CE_COUNT_MAX; i++) {
  434. if (mask & (1 << i)) {
  435. hif_ce_state->tasklets[i].ce_id = i;
  436. hif_ce_state->tasklets[i].inited = true;
  437. hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
  438. attr = &hif_ce_state->host_ce_config[i];
  439. if (attr->flags & CE_ATTR_HI_TASKLET)
  440. hif_ce_state->tasklets[i].hi_tasklet_ce = true;
  441. else
  442. hif_ce_state->tasklets[i].hi_tasklet_ce = false;
  443. tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
  444. ce_tasklet,
  445. (unsigned long)&hif_ce_state->tasklets[i]);
  446. }
  447. }
  448. }
  449. /**
  450. * ce_tasklet_kill() - ce_tasklet_kill
  451. * @hif_ce_state: hif_ce_state
  452. *
  453. * Context: Non-Atomic context
  454. * Return: N/A
  455. */
  456. void ce_tasklet_kill(struct hif_softc *scn)
  457. {
  458. int i;
  459. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
  460. for (i = 0; i < CE_COUNT_MAX; i++) {
  461. if (hif_ce_state->tasklets[i].inited) {
  462. hif_ce_state->tasklets[i].inited = false;
  463. /*
  464. * Cancel the tasklet work before tasklet_disable
  465. * to avoid race between tasklet_schedule and
  466. * tasklet_kill. Here cancel_work_sync() won't
  467. * return before reschedule_ce_tasklet_work_handler()
  468. * completes. Even if tasklet_schedule() happens
  469. * tasklet_disable() will take care of that.
  470. */
  471. qdf_cancel_work(&tasklet_workers[i].reg_work);
  472. tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
  473. }
  474. }
  475. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  476. }
  477. /**
  478. * ce_tasklet_entry_dump() - dump tasklet entries info
  479. * @hif_ce_state: ce state
  480. *
  481. * This function will dump all tasklet entries info
  482. *
  483. * Return: None
  484. */
  485. static void ce_tasklet_entry_dump(struct HIF_CE_state *hif_ce_state)
  486. {
  487. struct ce_tasklet_entry *tasklet_entry;
  488. int i;
  489. if (hif_ce_state) {
  490. for (i = 0; i < CE_COUNT_MAX; i++) {
  491. tasklet_entry = &hif_ce_state->tasklets[i];
  492. hif_info("%02d: ce_id=%d, inited=%d, hi_tasklet_ce=%d hif_ce_state=%pK",
  493. i,
  494. tasklet_entry->ce_id,
  495. tasklet_entry->inited,
  496. tasklet_entry->hi_tasklet_ce,
  497. tasklet_entry->hif_ce_state);
  498. }
  499. }
  500. }
  501. #define HIF_CE_DRAIN_WAIT_CNT 20
  502. /**
  503. * hif_drain_tasklets(): wait until no tasklet is pending
  504. * @scn: hif context
  505. *
  506. * Let running tasklets clear pending trafic.
  507. *
  508. * Return: 0 if no bottom half is in progress when it returns.
  509. * -EFAULT if it times out.
  510. */
  511. int hif_drain_tasklets(struct hif_softc *scn)
  512. {
  513. uint32_t ce_drain_wait_cnt = 0;
  514. int32_t tasklet_cnt;
  515. while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
  516. if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
  517. hif_err("CE still not done with access: %d",
  518. tasklet_cnt);
  519. return -EFAULT;
  520. }
  521. hif_info("Waiting for CE to finish access");
  522. msleep(10);
  523. }
  524. return 0;
  525. }
  526. #ifdef WLAN_SUSPEND_RESUME_TEST
  527. /**
  528. * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
  529. * trigger a unit-test resume.
  530. * @scn: The HIF context to operate on
  531. * @ce_id: The copy engine Id from the originating interrupt
  532. *
  533. * Return: true if the raised irq should trigger a unit-test resume
  534. */
  535. static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
  536. {
  537. int errno;
  538. uint8_t wake_ce_id;
  539. if (!hif_is_ut_suspended(scn))
  540. return false;
  541. /* ensure passed ce_id matches wake ce_id */
  542. errno = hif_get_wake_ce_id(scn, &wake_ce_id);
  543. if (errno) {
  544. hif_err("Failed to get wake CE Id: %d", errno);
  545. return false;
  546. }
  547. return ce_id == wake_ce_id;
  548. }
  549. #else
  550. static inline bool
  551. hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
  552. {
  553. return false;
  554. }
  555. #endif /* WLAN_SUSPEND_RESUME_TEST */
  556. /**
  557. * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
  558. * @irq: irq coming from kernel
  559. * @context: context
  560. *
  561. * Return: N/A
  562. */
  563. static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
  564. {
  565. struct ce_tasklet_entry *tasklet_entry = context;
  566. struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
  567. return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
  568. tasklet_entry);
  569. }
  570. /**
  571. * hif_ce_increment_interrupt_count() - update ce stats
  572. * @hif_ce_state: ce state
  573. * @ce_id: ce id
  574. *
  575. * Return: none
  576. */
  577. static inline void
  578. hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
  579. {
  580. int cpu_id = qdf_get_cpu();
  581. hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
  582. }
  583. /**
  584. * hif_display_ce_stats() - display ce stats
  585. * @hif_ce_state: ce state
  586. *
  587. * Return: none
  588. */
  589. void hif_display_ce_stats(struct hif_softc *hif_ctx)
  590. {
  591. #define STR_SIZE 128
  592. uint8_t i, j, pos;
  593. char str_buffer[STR_SIZE];
  594. int size, ret;
  595. struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
  596. qdf_debug("CE interrupt statistics:");
  597. for (i = 0; i < CE_COUNT_MAX; i++) {
  598. size = STR_SIZE;
  599. pos = 0;
  600. for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
  601. ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
  602. j, hif_ce_state->stats.ce_per_cpu[i][j]);
  603. if (ret <= 0 || ret >= size)
  604. break;
  605. size -= ret;
  606. pos += ret;
  607. }
  608. qdf_debug("CE id[%2d] - %s", i, str_buffer);
  609. }
  610. if (hif_ctx->ce_latency_stats)
  611. hif_ce_latency_stats(hif_ctx);
  612. #undef STR_SIZE
  613. }
  614. /**
  615. * hif_clear_ce_stats() - clear ce stats
  616. * @hif_ce_state: ce state
  617. *
  618. * Return: none
  619. */
  620. void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
  621. {
  622. qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
  623. }
  624. #ifdef WLAN_TRACEPOINTS
  625. /**
  626. * hif_set_ce_tasklet_sched_time() - Set tasklet schedule time for
  627. * CE with matching ce_id
  628. * @scn: hif context
  629. * @ce_id: CE id
  630. *
  631. * Return: None
  632. */
  633. static inline
  634. void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
  635. {
  636. struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
  637. ce_state->ce_tasklet_sched_time = qdf_time_sched_clock();
  638. }
  639. #else
  640. static inline
  641. void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
  642. {
  643. }
  644. #endif
  645. /**
  646. * hif_tasklet_schedule() - schedule tasklet
  647. * @hif_ctx: hif context
  648. * @tasklet_entry: ce tasklet entry
  649. *
  650. * Return: false if tasklet already scheduled, otherwise true
  651. */
  652. static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
  653. struct ce_tasklet_entry *tasklet_entry)
  654. {
  655. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  656. if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
  657. hif_debug("tasklet scheduled, return");
  658. qdf_atomic_dec(&scn->active_tasklet_cnt);
  659. return false;
  660. }
  661. hif_set_ce_tasklet_sched_time(scn, tasklet_entry->ce_id);
  662. /* keep it before tasklet_schedule, this is to happy whunt.
  663. * in whunt, tasklet may run before finished hif_tasklet_schedule.
  664. */
  665. hif_latency_detect_tasklet_sched(scn, tasklet_entry);
  666. ce_tasklet_schedule(tasklet_entry);
  667. hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
  668. if (scn->ce_latency_stats)
  669. hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
  670. return true;
  671. }
  672. /**
  673. * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id
  674. * @scn: hif context
  675. * @ce_id: CE id
  676. *
  677. * This function needs to be called once after all the irqs are disabled
  678. * and tasklets are drained during bus suspend.
  679. *
  680. * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
  681. */
  682. static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id)
  683. {
  684. struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
  685. struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
  686. if (scn->ce_latency_stats)
  687. hif_record_tasklet_exec_entry_ts(scn, ce_id);
  688. hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
  689. NULL, NULL, -1, 0);
  690. ce_per_engine_service(scn, ce_id);
  691. /*
  692. * In an unlikely case, if frames are still pending to reap,
  693. * could be an infinite loop, so return -EBUSY.
  694. */
  695. if (ce_check_rx_pending(CE_state))
  696. return -EBUSY;
  697. hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
  698. NULL, NULL, -1, 0);
  699. if (scn->ce_latency_stats)
  700. ce_tasklet_update_bucket(hif_ce_state, ce_id);
  701. return 0;
  702. }
  703. /**
  704. * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE
  705. * @scn: hif context
  706. *
  707. * This function needs to be called once after all the irqs are disabled
  708. * and tasklets are drained during bus suspend.
  709. *
  710. * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
  711. */
  712. int hif_drain_fw_diag_ce(struct hif_softc *scn)
  713. {
  714. uint8_t ce_id;
  715. if (hif_get_fw_diag_ce_id(scn, &ce_id))
  716. return 0;
  717. return ce_poll_reap_by_id(scn, ce_id);
  718. }
  719. /**
  720. * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
  721. * @ce_id: ce_id
  722. * @tasklet_entry: context
  723. *
  724. * Return: N/A
  725. */
  726. irqreturn_t ce_dispatch_interrupt(int ce_id,
  727. struct ce_tasklet_entry *tasklet_entry)
  728. {
  729. struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
  730. struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
  731. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  732. if (tasklet_entry->ce_id != ce_id) {
  733. bool rl;
  734. rl = hif_err_rl("ce_id (expect %d, received %d) does not match, inited=%d, ce_count=%u",
  735. tasklet_entry->ce_id, ce_id,
  736. tasklet_entry->inited,
  737. scn->ce_count);
  738. if (!rl)
  739. ce_tasklet_entry_dump(hif_ce_state);
  740. return IRQ_NONE;
  741. }
  742. if (unlikely(ce_id >= CE_COUNT_MAX)) {
  743. hif_err("ce_id=%d > CE_COUNT_MAX=%d",
  744. tasklet_entry->ce_id, CE_COUNT_MAX);
  745. return IRQ_NONE;
  746. }
  747. hif_irq_disable(scn, ce_id);
  748. if (!TARGET_REGISTER_ACCESS_ALLOWED(scn))
  749. return IRQ_HANDLED;
  750. hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
  751. NULL, NULL, 0, 0);
  752. hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
  753. if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
  754. hif_ut_fw_resume(scn);
  755. hif_irq_enable(scn, ce_id);
  756. return IRQ_HANDLED;
  757. }
  758. qdf_atomic_inc(&scn->active_tasklet_cnt);
  759. if (hif_napi_enabled(hif_hdl, ce_id))
  760. hif_napi_schedule(hif_hdl, ce_id);
  761. else
  762. hif_tasklet_schedule(hif_hdl, tasklet_entry);
  763. return IRQ_HANDLED;
  764. }
  765. /**
  766. * const char *ce_name
  767. *
  768. * @ce_name: ce_name
  769. */
  770. const char *ce_name[CE_COUNT_MAX] = {
  771. "WLAN_CE_0",
  772. "WLAN_CE_1",
  773. "WLAN_CE_2",
  774. "WLAN_CE_3",
  775. "WLAN_CE_4",
  776. "WLAN_CE_5",
  777. "WLAN_CE_6",
  778. "WLAN_CE_7",
  779. "WLAN_CE_8",
  780. "WLAN_CE_9",
  781. "WLAN_CE_10",
  782. "WLAN_CE_11",
  783. #ifdef QCA_WIFI_QCN9224
  784. "WLAN_CE_12",
  785. "WLAN_CE_13",
  786. "WLAN_CE_14",
  787. "WLAN_CE_15",
  788. #endif
  789. };
  790. /**
  791. * ce_unregister_irq() - ce_unregister_irq
  792. * @hif_ce_state: hif_ce_state copy engine device handle
  793. * @mask: which coppy engines to unregister for.
  794. *
  795. * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
  796. * unregister for copy engine x.
  797. *
  798. * Return: QDF_STATUS
  799. */
  800. QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
  801. {
  802. int id;
  803. int ce_count;
  804. int ret;
  805. struct hif_softc *scn;
  806. if (!hif_ce_state) {
  807. hif_warn("hif_ce_state = NULL");
  808. return QDF_STATUS_SUCCESS;
  809. }
  810. scn = HIF_GET_SOFTC(hif_ce_state);
  811. ce_count = scn->ce_count;
  812. /* we are removing interrupts, so better stop NAPI */
  813. ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
  814. NAPI_EVT_INT_STATE, (void *)0);
  815. if (ret != 0)
  816. hif_err("napi_event INT_STATE returned %d", ret);
  817. /* this is not fatal, continue */
  818. /* filter mask to free only for ce's with irq registered */
  819. mask &= hif_ce_state->ce_register_irq_done;
  820. for (id = 0; id < ce_count; id++) {
  821. if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
  822. ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
  823. &hif_ce_state->tasklets[id]);
  824. if (ret < 0)
  825. hif_err(
  826. "pld_unregister_irq error - ce_id = %d, ret = %d",
  827. id, ret);
  828. }
  829. ce_disable_polling(scn->ce_id_to_state[id]);
  830. }
  831. hif_ce_state->ce_register_irq_done &= ~mask;
  832. return QDF_STATUS_SUCCESS;
  833. }
  834. /**
  835. * ce_register_irq() - ce_register_irq
  836. * @hif_ce_state: hif_ce_state
  837. * @mask: which coppy engines to unregister for.
  838. *
  839. * Registers copy engine irqs matching mask. If a 1 is set at bit x,
  840. * Register for copy engine x.
  841. *
  842. * Return: QDF_STATUS
  843. */
  844. QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
  845. {
  846. int id;
  847. int ce_count;
  848. int ret;
  849. unsigned long irqflags = IRQF_TRIGGER_RISING;
  850. uint32_t done_mask = 0;
  851. struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
  852. ce_count = scn->ce_count;
  853. for (id = 0; id < ce_count; id++) {
  854. if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
  855. ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
  856. hif_snoc_interrupt_handler,
  857. irqflags, ce_name[id],
  858. &hif_ce_state->tasklets[id]);
  859. if (ret) {
  860. hif_err(
  861. "cannot register CE %d irq handler, ret = %d",
  862. id, ret);
  863. ce_unregister_irq(hif_ce_state, done_mask);
  864. return QDF_STATUS_E_FAULT;
  865. }
  866. done_mask |= 1 << id;
  867. }
  868. }
  869. hif_ce_state->ce_register_irq_done |= done_mask;
  870. return QDF_STATUS_SUCCESS;
  871. }