hif_exec.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316
  1. /*
  2. * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <hif_exec.h>
  20. #include <ce_main.h>
  21. #include "qdf_module.h"
  22. #include "qdf_net_if.h"
  23. #include <pld_common.h>
  24. #ifdef DP_UMAC_HW_RESET_SUPPORT
  25. #include "if_pci.h"
  26. #endif
  27. #include "qdf_ssr_driver_dump.h"
  28. /* mapping NAPI budget 0 to internal budget 0
  29. * NAPI budget 1 to internal budget [1,scaler -1]
  30. * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
  31. */
  32. #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
  33. (((n) << (s)) - 1)
  34. #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
  35. (((n) + 1) >> (s))
  36. static struct hif_exec_context *hif_exec_tasklet_create(void);
  37. #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
  38. struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
  39. uint32_t hif_event_hist_max = HIF_EVENT_HIST_MAX;
  40. void hif_desc_history_log_register(void)
  41. {
  42. qdf_ssr_driver_dump_register_region("hif_event_history",
  43. hif_event_desc_history,
  44. sizeof(hif_event_desc_history));
  45. qdf_ssr_driver_dump_register_region("hif_event_hist_max",
  46. &hif_event_hist_max,
  47. sizeof(hif_event_hist_max));
  48. }
  49. void hif_desc_history_log_unregister(void)
  50. {
  51. qdf_ssr_driver_dump_unregister_region("hif_event_hist_max");
  52. qdf_ssr_driver_dump_unregister_region("hif_event_history");
  53. }
  54. static inline
  55. int hif_get_next_record_index(qdf_atomic_t *table_index,
  56. int array_size)
  57. {
  58. int record_index = qdf_atomic_inc_return(table_index);
  59. return record_index & (array_size - 1);
  60. }
  61. /**
  62. * hif_hist_is_prev_record() - Check if index is the immediate
  63. * previous record wrt curr_index
  64. * @curr_index: curr index in the event history
  65. * @index: index to be checked
  66. * @hist_size: history size
  67. *
  68. * Return: true if index is immediately behind curr_index else false
  69. */
  70. static inline
  71. bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
  72. uint32_t hist_size)
  73. {
  74. return (((index + 1) & (hist_size - 1)) == curr_index) ?
  75. true : false;
  76. }
  77. /**
  78. * hif_hist_skip_event_record() - Check if current event needs to be
  79. * recorded or not
  80. * @hist_ev: HIF event history
  81. * @event: DP event entry
  82. *
  83. * Return: true if current event needs to be skipped else false
  84. */
  85. static bool
  86. hif_hist_skip_event_record(struct hif_event_history *hist_ev,
  87. struct hif_event_record *event)
  88. {
  89. struct hif_event_record *rec;
  90. struct hif_event_record *last_irq_rec;
  91. int32_t index;
  92. index = qdf_atomic_read(&hist_ev->index);
  93. if (index < 0)
  94. return false;
  95. index &= (HIF_EVENT_HIST_MAX - 1);
  96. rec = &hist_ev->event[index];
  97. switch (event->type) {
  98. case HIF_EVENT_IRQ_TRIGGER:
  99. /*
  100. * The prev record check is to prevent skipping the IRQ event
  101. * record in case where BH got re-scheduled due to force_break
  102. * but there are no entries to be reaped in the rings.
  103. */
  104. if (rec->type == HIF_EVENT_BH_SCHED &&
  105. hif_hist_is_prev_record(index,
  106. hist_ev->misc.last_irq_index,
  107. HIF_EVENT_HIST_MAX)) {
  108. last_irq_rec =
  109. &hist_ev->event[hist_ev->misc.last_irq_index];
  110. last_irq_rec->timestamp = hif_get_log_timestamp();
  111. last_irq_rec->cpu_id = qdf_get_cpu();
  112. last_irq_rec->hp++;
  113. last_irq_rec->tp = last_irq_rec->timestamp -
  114. hist_ev->misc.last_irq_ts;
  115. return true;
  116. }
  117. break;
  118. case HIF_EVENT_BH_SCHED:
  119. if (rec->type == HIF_EVENT_BH_SCHED) {
  120. rec->timestamp = hif_get_log_timestamp();
  121. rec->cpu_id = qdf_get_cpu();
  122. return true;
  123. }
  124. break;
  125. case HIF_EVENT_SRNG_ACCESS_START:
  126. if (event->hp == event->tp)
  127. return true;
  128. break;
  129. case HIF_EVENT_SRNG_ACCESS_END:
  130. if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
  131. return true;
  132. break;
  133. case HIF_EVENT_BH_COMPLETE:
  134. case HIF_EVENT_BH_FORCE_BREAK:
  135. if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
  136. return true;
  137. break;
  138. default:
  139. break;
  140. }
  141. return false;
  142. }
  143. void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
  144. struct hif_event_record *event, uint8_t intr_grp_id)
  145. {
  146. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  147. struct hif_event_history *hist_ev;
  148. struct hif_event_record *record;
  149. int record_index;
  150. if (!(scn->event_enable_mask & BIT(event->type)))
  151. return;
  152. if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
  153. hif_err("Invalid interrupt group id %d", intr_grp_id);
  154. return;
  155. }
  156. hist_ev = scn->evt_hist[intr_grp_id];
  157. if (qdf_unlikely(!hist_ev))
  158. return;
  159. if (hif_hist_skip_event_record(hist_ev, event))
  160. return;
  161. record_index = hif_get_next_record_index(
  162. &hist_ev->index, HIF_EVENT_HIST_MAX);
  163. record = &hist_ev->event[record_index];
  164. if (event->type == HIF_EVENT_IRQ_TRIGGER) {
  165. hist_ev->misc.last_irq_index = record_index;
  166. hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
  167. }
  168. record->hal_ring_id = event->hal_ring_id;
  169. record->hp = event->hp;
  170. record->tp = event->tp;
  171. record->cpu_id = qdf_get_cpu();
  172. record->timestamp = hif_get_log_timestamp();
  173. record->type = event->type;
  174. }
  175. void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
  176. {
  177. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  178. scn->evt_hist[id] = &hif_event_desc_history[id];
  179. qdf_atomic_set(&scn->evt_hist[id]->index, -1);
  180. hif_info("SRNG events history initialized for group: %d", id);
  181. }
  182. void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
  183. {
  184. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  185. scn->evt_hist[id] = NULL;
  186. hif_info("SRNG events history de-initialized for group: %d", id);
  187. }
  188. #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
  189. #ifndef QCA_WIFI_WCN6450
  190. /**
  191. * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
  192. * @hif_state: hif context
  193. *
  194. * return: void
  195. */
  196. #ifdef HIF_LATENCY_PROFILE_ENABLE
  197. static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
  198. {
  199. struct hif_exec_context *hif_ext_group;
  200. int i, j;
  201. int64_t cur_tstamp;
  202. const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = {
  203. "0-2 ms",
  204. "3-10 ms",
  205. "11-20 ms",
  206. "21-50 ms",
  207. "51-100 ms",
  208. "101-250 ms",
  209. "251-500 ms",
  210. "> 500 ms"
  211. };
  212. cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
  213. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
  214. "Current timestamp: %lld", cur_tstamp);
  215. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  216. if (hif_state->hif_ext_group[i]) {
  217. hif_ext_group = hif_state->hif_ext_group[i];
  218. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
  219. "ext grp %d Last serviced timestamp: %lld",
  220. i, hif_ext_group->tstamp);
  221. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
  222. "Latency Bucket | Time elapsed");
  223. for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
  224. if (hif_ext_group->sched_latency_stats[j])
  225. QDF_TRACE(QDF_MODULE_ID_HIF,
  226. QDF_TRACE_LEVEL_INFO_HIGH,
  227. "%s | %lld",
  228. time_str[j],
  229. hif_ext_group->
  230. sched_latency_stats[j]);
  231. }
  232. }
  233. }
  234. }
  235. #else
  236. static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
  237. {
  238. }
  239. #endif
  240. /**
  241. * hif_clear_napi_stats() - reset NAPI stats
  242. * @hif_ctx: hif context
  243. *
  244. * return: void
  245. */
  246. void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
  247. {
  248. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  249. struct hif_exec_context *hif_ext_group;
  250. size_t i;
  251. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  252. hif_ext_group = hif_state->hif_ext_group[i];
  253. if (!hif_ext_group)
  254. return;
  255. qdf_mem_set(hif_ext_group->sched_latency_stats,
  256. sizeof(hif_ext_group->sched_latency_stats),
  257. 0x0);
  258. }
  259. }
  260. qdf_export_symbol(hif_clear_napi_stats);
  261. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  262. /**
  263. * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
  264. * @stats: NAPI stats to get poll time buckets
  265. * @buf: buffer to fill histogram string
  266. * @buf_len: length of the buffer
  267. *
  268. * Return: void
  269. */
  270. static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
  271. uint8_t buf_len)
  272. {
  273. int i;
  274. int str_index = 0;
  275. for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
  276. str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
  277. "%u|", stats->poll_time_buckets[i]);
  278. }
  279. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
  280. {
  281. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  282. struct hif_exec_context *hif_ext_group;
  283. struct qca_napi_stat *napi_stats;
  284. int i, j;
  285. /*
  286. * Max value of uint_32 (poll_time_bucket) = 4294967295
  287. * Thus we need 10 chars + 1 space =11 chars for each bucket value.
  288. * +1 space for '\0'.
  289. */
  290. char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
  291. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
  292. "NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)");
  293. for (i = 0;
  294. (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
  295. i++) {
  296. hif_ext_group = hif_state->hif_ext_group[i];
  297. for (j = 0; j < num_possible_cpus(); j++) {
  298. napi_stats = &hif_ext_group->stats[j];
  299. if (!napi_stats->napi_schedules)
  300. continue;
  301. hif_get_poll_times_hist_str(napi_stats,
  302. hist_str,
  303. sizeof(hist_str));
  304. QDF_TRACE(QDF_MODULE_ID_HIF,
  305. QDF_TRACE_LEVEL_INFO_HIGH,
  306. "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
  307. i, j,
  308. napi_stats->napi_schedules,
  309. napi_stats->napi_polls,
  310. napi_stats->napi_completes,
  311. napi_stats->napi_workdone,
  312. napi_stats->time_limit_reached,
  313. qdf_do_div(napi_stats->napi_max_poll_time,
  314. 1000),
  315. hist_str);
  316. }
  317. }
  318. hif_print_napi_latency_stats(hif_state);
  319. }
  320. qdf_export_symbol(hif_print_napi_stats);
  321. #else
  322. static inline
  323. void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
  324. uint8_t buf_len)
  325. {
  326. }
  327. void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
  328. {
  329. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  330. struct hif_exec_context *hif_ext_group;
  331. struct qca_napi_stat *napi_stats;
  332. int i, j;
  333. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
  334. "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
  335. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  336. if (hif_state->hif_ext_group[i]) {
  337. hif_ext_group = hif_state->hif_ext_group[i];
  338. for (j = 0; j < num_possible_cpus(); j++) {
  339. napi_stats = &(hif_ext_group->stats[j]);
  340. if (napi_stats->napi_schedules != 0)
  341. QDF_TRACE(QDF_MODULE_ID_HIF,
  342. QDF_TRACE_LEVEL_FATAL,
  343. "NAPI[%2d]CPU[%d]: "
  344. "%7d %7d %7d %7d ",
  345. i, j,
  346. napi_stats->napi_schedules,
  347. napi_stats->napi_polls,
  348. napi_stats->napi_completes,
  349. napi_stats->napi_workdone);
  350. }
  351. }
  352. }
  353. hif_print_napi_latency_stats(hif_state);
  354. }
  355. qdf_export_symbol(hif_print_napi_stats);
  356. #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
  357. #endif /* QCA_WIFI_WCN6450 */
  358. #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
  359. /**
  360. * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
  361. * @hif_ext_group: hif_ext_group of type NAPI
  362. *
  363. * The function is called at the end of a NAPI poll to calculate poll time
  364. * buckets.
  365. *
  366. * Return: void
  367. */
  368. static
  369. void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
  370. {
  371. struct qca_napi_stat *napi_stat;
  372. unsigned long long poll_time_ns;
  373. uint32_t poll_time_us;
  374. uint32_t bucket_size_us = 500;
  375. uint32_t bucket;
  376. uint32_t cpu_id = qdf_get_cpu();
  377. poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
  378. poll_time_us = qdf_do_div(poll_time_ns, 1000);
  379. napi_stat = &hif_ext_group->stats[cpu_id];
  380. if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
  381. hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
  382. bucket = poll_time_us / bucket_size_us;
  383. if (bucket >= QCA_NAPI_NUM_BUCKETS)
  384. bucket = QCA_NAPI_NUM_BUCKETS - 1;
  385. ++napi_stat->poll_time_buckets[bucket];
  386. }
  387. /**
  388. * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
  389. * @hif_ext_group: hif_ext_group of type NAPI
  390. *
  391. * Return: true if NAPI needs to yield, else false
  392. */
  393. static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
  394. {
  395. bool time_limit_reached = false;
  396. unsigned long long poll_time_ns;
  397. int cpu_id = qdf_get_cpu();
  398. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  399. struct hif_config_info *cfg = &scn->hif_config;
  400. poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
  401. time_limit_reached =
  402. poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
  403. if (time_limit_reached) {
  404. hif_ext_group->stats[cpu_id].time_limit_reached++;
  405. hif_ext_group->force_break = true;
  406. }
  407. return time_limit_reached;
  408. }
  409. bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
  410. {
  411. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  412. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  413. struct hif_exec_context *hif_ext_group;
  414. bool ret_val = false;
  415. if (!(grp_id < hif_state->hif_num_extgroup) ||
  416. !(grp_id < HIF_MAX_GROUP))
  417. return false;
  418. hif_ext_group = hif_state->hif_ext_group[grp_id];
  419. if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
  420. ret_val = hif_exec_poll_should_yield(hif_ext_group);
  421. return ret_val;
  422. }
  423. /**
  424. * hif_exec_update_service_start_time() - Update NAPI poll start time
  425. * @hif_ext_group: hif_ext_group of type NAPI
  426. *
  427. * The function is called at the beginning of a NAPI poll to record the poll
  428. * start time.
  429. *
  430. * Return: None
  431. */
  432. static inline
  433. void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
  434. {
  435. hif_ext_group->poll_start_time = qdf_time_sched_clock();
  436. }
  437. #else
  438. static inline
  439. void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
  440. {
  441. }
  442. static inline
  443. void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
  444. {
  445. }
  446. #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
  447. static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
  448. {
  449. struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
  450. tasklet_schedule(&t_ctx->tasklet);
  451. }
  452. /**
  453. * hif_exec_tasklet_fn() - grp tasklet
  454. * @data: context
  455. *
  456. * Return: void
  457. */
  458. static void hif_exec_tasklet_fn(unsigned long data)
  459. {
  460. struct hif_exec_context *hif_ext_group =
  461. (struct hif_exec_context *)data;
  462. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  463. unsigned int work_done;
  464. int cpu = smp_processor_id();
  465. work_done =
  466. hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET,
  467. cpu);
  468. if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
  469. qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
  470. hif_ext_group->irq_enable(hif_ext_group);
  471. } else {
  472. hif_exec_tasklet_schedule(hif_ext_group);
  473. }
  474. }
  475. /**
  476. * hif_latency_profile_measure() - calculate latency and update histogram
  477. * @hif_ext_group: hif exec context
  478. *
  479. * Return: None
  480. */
  481. #ifdef HIF_LATENCY_PROFILE_ENABLE
  482. static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
  483. {
  484. int64_t cur_tstamp;
  485. int64_t time_elapsed;
  486. cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
  487. if (cur_tstamp > hif_ext_group->tstamp)
  488. time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
  489. else
  490. time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
  491. hif_ext_group->tstamp = cur_tstamp;
  492. if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
  493. hif_ext_group->sched_latency_stats[0]++;
  494. else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
  495. hif_ext_group->sched_latency_stats[1]++;
  496. else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
  497. hif_ext_group->sched_latency_stats[2]++;
  498. else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
  499. hif_ext_group->sched_latency_stats[3]++;
  500. else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
  501. hif_ext_group->sched_latency_stats[4]++;
  502. else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
  503. hif_ext_group->sched_latency_stats[5]++;
  504. else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
  505. hif_ext_group->sched_latency_stats[6]++;
  506. else
  507. hif_ext_group->sched_latency_stats[7]++;
  508. }
  509. #else
  510. static inline
  511. void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
  512. {
  513. }
  514. #endif
  515. /**
  516. * hif_latency_profile_start() - Update the start timestamp for HIF ext group
  517. * @hif_ext_group: hif exec context
  518. *
  519. * Return: None
  520. */
  521. #ifdef HIF_LATENCY_PROFILE_ENABLE
  522. static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
  523. {
  524. hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
  525. }
  526. #else
  527. static inline
  528. void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
  529. {
  530. }
  531. #endif
  532. #ifdef FEATURE_NAPI
  533. #ifdef FEATURE_IRQ_AFFINITY
  534. static inline int32_t
  535. hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
  536. {
  537. return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete);
  538. }
  539. #else
  540. static inline int32_t
  541. hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
  542. {
  543. return 0;
  544. }
  545. #endif
  546. /**
  547. * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit
  548. * reached for single MSI
  549. * @hif_ext_group: hif exec context
  550. *
  551. * Return: true if reached, else false.
  552. */
  553. static bool
  554. hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group)
  555. {
  556. unsigned long long irq_disabled_duration_ns;
  557. if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE)
  558. return false;
  559. irq_disabled_duration_ns = qdf_time_sched_clock() -
  560. hif_ext_group->irq_disabled_start_time;
  561. if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) {
  562. hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
  563. 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED);
  564. return true;
  565. }
  566. return false;
  567. }
  568. /**
  569. * hif_exec_poll() - napi poll
  570. * @napi: napi struct
  571. * @budget: budget for napi
  572. *
  573. * Return: mapping of internal budget to napi
  574. */
  575. static int hif_exec_poll(struct napi_struct *napi, int budget)
  576. {
  577. struct hif_napi_exec_context *napi_exec_ctx =
  578. qdf_container_of(napi, struct hif_napi_exec_context, napi);
  579. struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
  580. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  581. int work_done;
  582. int normalized_budget = 0;
  583. int actual_dones;
  584. int shift = hif_ext_group->scale_bin_shift;
  585. int cpu = smp_processor_id();
  586. bool force_complete = false;
  587. hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
  588. 0, 0, 0, HIF_EVENT_BH_SCHED);
  589. hif_ext_group->force_break = false;
  590. hif_exec_update_service_start_time(hif_ext_group);
  591. if (budget)
  592. normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
  593. hif_latency_profile_measure(hif_ext_group);
  594. work_done = hif_ext_group->handler(hif_ext_group->context,
  595. normalized_budget, cpu);
  596. actual_dones = work_done;
  597. if (hif_is_force_napi_complete_required(hif_ext_group)) {
  598. force_complete = true;
  599. if (work_done >= normalized_budget)
  600. work_done = normalized_budget - 1;
  601. }
  602. if (qdf_unlikely(force_complete) ||
  603. (!hif_ext_group->force_break && work_done < normalized_budget) ||
  604. ((pld_is_one_msi(scn->qdf_dev->dev) &&
  605. hif_irq_disabled_time_limit_reached(hif_ext_group)))) {
  606. hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
  607. 0, 0, 0, HIF_EVENT_BH_COMPLETE);
  608. napi_complete(napi);
  609. qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
  610. hif_ext_group->irq_enable(hif_ext_group);
  611. hif_ext_group->stats[cpu].napi_completes++;
  612. } else {
  613. /* if the ext_group supports time based yield, claim full work
  614. * done anyways */
  615. hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
  616. 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
  617. work_done = normalized_budget;
  618. }
  619. hif_ext_group->stats[cpu].napi_polls++;
  620. hif_ext_group->stats[cpu].napi_workdone += actual_dones;
  621. /* map internal budget to NAPI budget */
  622. if (work_done)
  623. work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
  624. hif_exec_fill_poll_time_histogram(hif_ext_group);
  625. return work_done;
  626. }
  627. /**
  628. * hif_exec_napi_schedule() - schedule the napi exec instance
  629. * @ctx: a hif_exec_context known to be of napi type
  630. */
  631. static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
  632. {
  633. struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
  634. ctx->stats[smp_processor_id()].napi_schedules++;
  635. napi_schedule(&n_ctx->napi);
  636. }
  637. /**
  638. * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
  639. * @ctx: a hif_exec_context known to be of napi type
  640. */
  641. static void hif_exec_napi_kill(struct hif_exec_context *ctx)
  642. {
  643. struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
  644. int irq_ind;
  645. if (ctx->inited) {
  646. qdf_napi_disable(&n_ctx->napi);
  647. ctx->inited = 0;
  648. }
  649. for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
  650. hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
  651. hif_core_ctl_set_boost(false);
  652. qdf_netif_napi_del(&(n_ctx->napi));
  653. }
  654. struct hif_execution_ops napi_sched_ops = {
  655. .schedule = &hif_exec_napi_schedule,
  656. .kill = &hif_exec_napi_kill,
  657. };
  658. /**
  659. * hif_exec_napi_create() - allocate and initialize a napi exec context
  660. * @scale: a binary shift factor to map NAPI budget from\to internal
  661. * budget
  662. */
  663. static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
  664. {
  665. struct hif_napi_exec_context *ctx;
  666. ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
  667. if (!ctx)
  668. return NULL;
  669. ctx->exec_ctx.sched_ops = &napi_sched_ops;
  670. ctx->exec_ctx.inited = true;
  671. ctx->exec_ctx.scale_bin_shift = scale;
  672. qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
  673. qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
  674. QCA_NAPI_BUDGET);
  675. qdf_napi_enable(&ctx->napi);
  676. return &ctx->exec_ctx;
  677. }
  678. #else
  679. static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
  680. {
  681. hif_warn("FEATURE_NAPI not defined, making tasklet");
  682. return hif_exec_tasklet_create();
  683. }
  684. #endif
  685. /**
  686. * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
  687. * @ctx: a hif_exec_context known to be of tasklet type
  688. */
  689. static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
  690. {
  691. struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
  692. int irq_ind;
  693. if (ctx->inited) {
  694. tasklet_disable(&t_ctx->tasklet);
  695. tasklet_kill(&t_ctx->tasklet);
  696. }
  697. ctx->inited = false;
  698. for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
  699. hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
  700. }
  701. struct hif_execution_ops tasklet_sched_ops = {
  702. .schedule = &hif_exec_tasklet_schedule,
  703. .kill = &hif_exec_tasklet_kill,
  704. };
  705. /**
  706. * hif_exec_tasklet_create() - allocate and initialize a tasklet exec context
  707. */
  708. static struct hif_exec_context *hif_exec_tasklet_create(void)
  709. {
  710. struct hif_tasklet_exec_context *ctx;
  711. ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
  712. if (!ctx)
  713. return NULL;
  714. ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
  715. tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
  716. (unsigned long)ctx);
  717. ctx->exec_ctx.inited = true;
  718. return &ctx->exec_ctx;
  719. }
  720. /**
  721. * hif_exec_get_ctx() - retrieve an exec context based on an id
  722. * @softc: the hif context owning the exec context
  723. * @id: the id of the exec context
  724. *
  725. * mostly added to make it easier to rename or move the context array
  726. */
  727. struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
  728. uint8_t id)
  729. {
  730. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
  731. if (id < hif_state->hif_num_extgroup)
  732. return hif_state->hif_ext_group[id];
  733. return NULL;
  734. }
  735. int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
  736. uint8_t id)
  737. {
  738. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
  739. if (id < hif_state->hif_num_extgroup)
  740. return hif_state->hif_ext_group[id]->os_irq[0];
  741. return -EINVAL;
  742. }
  743. qdf_export_symbol(hif_get_int_ctx_irq_num);
  744. QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
  745. {
  746. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  747. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  748. struct hif_exec_context *hif_ext_group;
  749. int i, status;
  750. if (scn->ext_grp_irq_configured) {
  751. hif_err("Called after ext grp irq configured");
  752. return QDF_STATUS_E_FAILURE;
  753. }
  754. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  755. hif_ext_group = hif_state->hif_ext_group[i];
  756. status = 0;
  757. qdf_spinlock_create(&hif_ext_group->irq_lock);
  758. if (hif_ext_group->configured &&
  759. hif_ext_group->irq_requested == false) {
  760. hif_ext_group->irq_enabled = true;
  761. status = hif_grp_irq_configure(scn, hif_ext_group);
  762. }
  763. if (status != 0) {
  764. hif_err("Failed for group %d", i);
  765. hif_ext_group->irq_enabled = false;
  766. }
  767. }
  768. scn->ext_grp_irq_configured = true;
  769. return QDF_STATUS_SUCCESS;
  770. }
  771. qdf_export_symbol(hif_configure_ext_group_interrupts);
  772. void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
  773. {
  774. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  775. if (!scn || !scn->ext_grp_irq_configured) {
  776. hif_err("scn(%pk) is NULL or grp irq not configured", scn);
  777. return;
  778. }
  779. hif_grp_irq_deconfigure(scn);
  780. scn->ext_grp_irq_configured = false;
  781. }
  782. qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
  783. #ifdef WLAN_SUSPEND_RESUME_TEST
  784. /**
  785. * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
  786. * to trigger fake-suspend command, if yes
  787. * then issue resume procedure.
  788. * @scn: opaque HIF software context
  789. *
  790. * This API checks if unit-test command was used to trigger fake-suspend command
  791. * and if answer is yes then it would trigger resume procedure.
  792. *
  793. * Make this API inline to save API-switch overhead and do branch-prediction to
  794. * optimize performance impact.
  795. *
  796. * Return: void
  797. */
  798. static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
  799. {
  800. if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
  801. hif_ut_fw_resume(scn);
  802. }
  803. #else
  804. static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
  805. {
  806. }
  807. #endif
  808. /**
  809. * hif_check_and_trigger_sys_resume() - Check for bus suspend and
  810. * trigger system resume
  811. * @scn: hif context
  812. * @irq: irq number
  813. *
  814. * Return: None
  815. */
  816. static inline void
  817. hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
  818. {
  819. if (scn->bus_suspended && scn->linkstate_vote) {
  820. hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
  821. qdf_pm_system_wakeup();
  822. }
  823. }
  824. /**
  825. * hif_ext_group_interrupt_handler() - handler for related interrupts
  826. * @irq: irq number of the interrupt
  827. * @context: the associated hif_exec_group context
  828. *
  829. * This callback function takes care of disabling the associated interrupts
  830. * and scheduling the expected bottom half for the exec_context.
  831. * This callback function also helps keep track of the count running contexts.
  832. */
  833. irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
  834. {
  835. struct hif_exec_context *hif_ext_group = context;
  836. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  837. if (hif_ext_group->irq_requested) {
  838. hif_latency_profile_start(hif_ext_group);
  839. hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
  840. 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
  841. hif_ext_group->irq_disable(hif_ext_group);
  842. if (pld_is_one_msi(scn->qdf_dev->dev))
  843. hif_ext_group->irq_disabled_start_time =
  844. qdf_time_sched_clock();
  845. /*
  846. * if private ioctl has issued fake suspend command to put
  847. * FW in D0-WOW state then here is our chance to bring FW out
  848. * of WOW mode.
  849. *
  850. * The reason why you need to explicitly wake-up the FW is here:
  851. * APSS should have been in fully awake through-out when
  852. * fake APSS suspend command was issued (to put FW in WOW mode)
  853. * hence organic way of waking-up the FW
  854. * (as part-of APSS-host wake-up) won't happen because
  855. * in reality APSS didn't really suspend.
  856. */
  857. hif_check_and_trigger_ut_resume(scn);
  858. hif_check_and_trigger_sys_resume(scn, irq);
  859. qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
  860. hif_ext_group->sched_ops->schedule(hif_ext_group);
  861. }
  862. return IRQ_HANDLED;
  863. }
  864. /**
  865. * hif_exec_kill() - grp tasklet kill
  866. * @hif_ctx: hif_softc
  867. *
  868. * return: void
  869. */
  870. void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
  871. {
  872. int i;
  873. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  874. for (i = 0; i < hif_state->hif_num_extgroup; i++)
  875. hif_state->hif_ext_group[i]->sched_ops->kill(
  876. hif_state->hif_ext_group[i]);
  877. qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
  878. }
  879. #ifdef FEATURE_IRQ_AFFINITY
  880. static inline void
  881. hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
  882. {
  883. qdf_atomic_init(&hif_ext_group->force_napi_complete);
  884. }
  885. #else
  886. static inline void
  887. hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
  888. {
  889. }
  890. #endif
  891. /**
  892. * hif_register_ext_group() - API to register external group
  893. * interrupt handler.
  894. * @hif_ctx : HIF Context
  895. * @numirq: number of irq's in the group
  896. * @irq: array of irq values
  897. * @handler: callback interrupt handler function
  898. * @cb_ctx: context to passed in callback
  899. * @context_name: context name
  900. * @type: napi vs tasklet
  901. * @scale:
  902. *
  903. * Return: QDF_STATUS
  904. */
  905. QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
  906. uint32_t numirq, uint32_t irq[],
  907. ext_intr_handler handler,
  908. void *cb_ctx, const char *context_name,
  909. enum hif_exec_type type, uint32_t scale)
  910. {
  911. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  912. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  913. struct hif_exec_context *hif_ext_group;
  914. if (scn->ext_grp_irq_configured) {
  915. hif_err("Called after ext grp irq configured");
  916. return QDF_STATUS_E_FAILURE;
  917. }
  918. if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
  919. hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
  920. return QDF_STATUS_E_FAILURE;
  921. }
  922. if (numirq >= HIF_MAX_GRP_IRQ) {
  923. hif_err("Invalid numirq: %d", numirq);
  924. return QDF_STATUS_E_FAILURE;
  925. }
  926. hif_ext_group = hif_exec_create(type, scale);
  927. if (!hif_ext_group)
  928. return QDF_STATUS_E_FAILURE;
  929. hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
  930. hif_ext_group;
  931. hif_ext_group->numirq = numirq;
  932. qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
  933. hif_ext_group->context = cb_ctx;
  934. hif_ext_group->handler = handler;
  935. hif_ext_group->configured = true;
  936. hif_ext_group->grp_id = hif_state->hif_num_extgroup;
  937. hif_ext_group->hif = hif_ctx;
  938. hif_ext_group->context_name = context_name;
  939. hif_ext_group->type = type;
  940. hif_init_force_napi_complete(hif_ext_group);
  941. hif_state->hif_num_extgroup++;
  942. return QDF_STATUS_SUCCESS;
  943. }
  944. qdf_export_symbol(hif_register_ext_group);
  945. /**
  946. * hif_exec_create() - create an execution context
  947. * @type: the type of execution context to create
  948. * @scale:
  949. */
  950. struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
  951. uint32_t scale)
  952. {
  953. hif_debug("%s: create exec_type %d budget %d",
  954. __func__, type, QCA_NAPI_BUDGET * scale);
  955. switch (type) {
  956. case HIF_EXEC_NAPI_TYPE:
  957. return hif_exec_napi_create(scale);
  958. case HIF_EXEC_TASKLET_TYPE:
  959. return hif_exec_tasklet_create();
  960. default:
  961. return NULL;
  962. }
  963. }
  964. /**
  965. * hif_exec_destroy() - free the hif_exec context
  966. * @ctx: context to free
  967. *
  968. * please kill the context before freeing it to avoid a use after free.
  969. */
  970. void hif_exec_destroy(struct hif_exec_context *ctx)
  971. {
  972. struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
  973. if (scn->ext_grp_irq_configured)
  974. qdf_spinlock_destroy(&ctx->irq_lock);
  975. qdf_mem_free(ctx);
  976. }
  977. /**
  978. * hif_deregister_exec_group() - API to free the exec contexts
  979. * @hif_ctx: HIF context
  980. * @context_name: name of the module whose contexts need to be deregistered
  981. *
  982. * This function deregisters the contexts of the requestor identified
  983. * based on the context_name & frees the memory.
  984. *
  985. * Return: void
  986. */
  987. void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
  988. const char *context_name)
  989. {
  990. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  991. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  992. struct hif_exec_context *hif_ext_group;
  993. int i;
  994. for (i = 0; i < HIF_MAX_GROUP; i++) {
  995. hif_ext_group = hif_state->hif_ext_group[i];
  996. if (!hif_ext_group)
  997. continue;
  998. hif_debug("%s: Deregistering grp id %d name %s",
  999. __func__,
  1000. hif_ext_group->grp_id,
  1001. hif_ext_group->context_name);
  1002. if (strcmp(hif_ext_group->context_name, context_name) == 0) {
  1003. hif_ext_group->sched_ops->kill(hif_ext_group);
  1004. hif_state->hif_ext_group[i] = NULL;
  1005. hif_exec_destroy(hif_ext_group);
  1006. hif_state->hif_num_extgroup--;
  1007. }
  1008. }
  1009. }
  1010. qdf_export_symbol(hif_deregister_exec_group);
  1011. #ifdef DP_UMAC_HW_RESET_SUPPORT
  1012. /**
  1013. * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt
  1014. * @data: UMAC HW reset HIF context
  1015. *
  1016. * return: void
  1017. */
  1018. static void hif_umac_reset_handler_tasklet(unsigned long data)
  1019. {
  1020. struct hif_umac_reset_ctx *umac_reset_ctx =
  1021. (struct hif_umac_reset_ctx *)data;
  1022. /* call the callback handler */
  1023. umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx);
  1024. }
  1025. /**
  1026. * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset
  1027. * @irq: irq coming from kernel
  1028. * @ctx: UMAC HW reset HIF context
  1029. *
  1030. * return: IRQ_HANDLED if success, else IRQ_NONE
  1031. */
  1032. static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx)
  1033. {
  1034. struct hif_umac_reset_ctx *umac_reset_ctx = ctx;
  1035. /* Schedule the tasklet if it is umac reset interrupt and exit */
  1036. if (umac_reset_ctx->irq_handler(umac_reset_ctx->cb_ctx))
  1037. tasklet_hi_schedule(&umac_reset_ctx->intr_tq);
  1038. return IRQ_HANDLED;
  1039. }
  1040. QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
  1041. int *umac_reset_irq)
  1042. {
  1043. int ret;
  1044. struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
  1045. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
  1046. struct platform_device *pdev = (struct platform_device *)sc->pdev;
  1047. ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
  1048. "umac_reset", 0, umac_reset_irq);
  1049. if (ret) {
  1050. hif_err("umac reset get irq failed ret %d", ret);
  1051. return QDF_STATUS_E_FAILURE;
  1052. }
  1053. return QDF_STATUS_SUCCESS;
  1054. }
  1055. qdf_export_symbol(hif_get_umac_reset_irq);
  1056. QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
  1057. bool (*irq_handler)(void *cb_ctx),
  1058. int (*tl_handler)(void *cb_ctx),
  1059. void *cb_ctx, int irq)
  1060. {
  1061. struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
  1062. struct hif_umac_reset_ctx *umac_reset_ctx;
  1063. int ret;
  1064. if (!hif_sc) {
  1065. hif_err("scn is null");
  1066. return QDF_STATUS_E_NULL_VALUE;
  1067. }
  1068. umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1069. umac_reset_ctx->irq_handler = irq_handler;
  1070. umac_reset_ctx->cb_handler = tl_handler;
  1071. umac_reset_ctx->cb_ctx = cb_ctx;
  1072. umac_reset_ctx->os_irq = irq;
  1073. /* Init the tasklet */
  1074. tasklet_init(&umac_reset_ctx->intr_tq,
  1075. hif_umac_reset_handler_tasklet,
  1076. (unsigned long)umac_reset_ctx);
  1077. /* Register the interrupt handler */
  1078. ret = pfrm_request_irq(hif_sc->qdf_dev->dev, irq,
  1079. hif_umac_reset_irq_handler,
  1080. IRQF_NO_SUSPEND,
  1081. "umac_hw_reset_irq",
  1082. umac_reset_ctx);
  1083. if (ret) {
  1084. hif_err("request_irq failed: %d", ret);
  1085. return qdf_status_from_os_return(ret);
  1086. }
  1087. umac_reset_ctx->irq_configured = true;
  1088. return QDF_STATUS_SUCCESS;
  1089. }
  1090. qdf_export_symbol(hif_register_umac_reset_handler);
  1091. QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
  1092. {
  1093. struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
  1094. struct hif_umac_reset_ctx *umac_reset_ctx;
  1095. int ret;
  1096. if (!hif_sc) {
  1097. hif_err("scn is null");
  1098. return QDF_STATUS_E_NULL_VALUE;
  1099. }
  1100. umac_reset_ctx = &hif_sc->umac_reset_ctx;
  1101. if (!umac_reset_ctx->irq_configured) {
  1102. hif_err("unregister called without a prior IRQ configuration");
  1103. return QDF_STATUS_E_FAILURE;
  1104. }
  1105. ret = pfrm_free_irq(hif_sc->qdf_dev->dev,
  1106. umac_reset_ctx->os_irq,
  1107. umac_reset_ctx);
  1108. if (ret) {
  1109. hif_err("free_irq failed: %d", ret);
  1110. return qdf_status_from_os_return(ret);
  1111. }
  1112. umac_reset_ctx->irq_configured = false;
  1113. tasklet_disable(&umac_reset_ctx->intr_tq);
  1114. tasklet_kill(&umac_reset_ctx->intr_tq);
  1115. umac_reset_ctx->cb_handler = NULL;
  1116. umac_reset_ctx->cb_ctx = NULL;
  1117. return QDF_STATUS_SUCCESS;
  1118. }
  1119. qdf_export_symbol(hif_unregister_umac_reset_handler);
  1120. #endif