12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316 |
- /*
- * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
- #include <hif_exec.h>
- #include <ce_main.h>
- #include "qdf_module.h"
- #include "qdf_net_if.h"
- #include <pld_common.h>
- #ifdef DP_UMAC_HW_RESET_SUPPORT
- #include "if_pci.h"
- #endif
- #include "qdf_ssr_driver_dump.h"
- /* mapping NAPI budget 0 to internal budget 0
- * NAPI budget 1 to internal budget [1,scaler -1]
- * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
- */
- #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
- (((n) << (s)) - 1)
- #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
- (((n) + 1) >> (s))
- static struct hif_exec_context *hif_exec_tasklet_create(void);
- #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
- struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
- uint32_t hif_event_hist_max = HIF_EVENT_HIST_MAX;
- void hif_desc_history_log_register(void)
- {
- qdf_ssr_driver_dump_register_region("hif_event_history",
- hif_event_desc_history,
- sizeof(hif_event_desc_history));
- qdf_ssr_driver_dump_register_region("hif_event_hist_max",
- &hif_event_hist_max,
- sizeof(hif_event_hist_max));
- }
- void hif_desc_history_log_unregister(void)
- {
- qdf_ssr_driver_dump_unregister_region("hif_event_hist_max");
- qdf_ssr_driver_dump_unregister_region("hif_event_history");
- }
- static inline
- int hif_get_next_record_index(qdf_atomic_t *table_index,
- int array_size)
- {
- int record_index = qdf_atomic_inc_return(table_index);
- return record_index & (array_size - 1);
- }
- /**
- * hif_hist_is_prev_record() - Check if index is the immediate
- * previous record wrt curr_index
- * @curr_index: curr index in the event history
- * @index: index to be checked
- * @hist_size: history size
- *
- * Return: true if index is immediately behind curr_index else false
- */
- static inline
- bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
- uint32_t hist_size)
- {
- return (((index + 1) & (hist_size - 1)) == curr_index) ?
- true : false;
- }
- /**
- * hif_hist_skip_event_record() - Check if current event needs to be
- * recorded or not
- * @hist_ev: HIF event history
- * @event: DP event entry
- *
- * Return: true if current event needs to be skipped else false
- */
- static bool
- hif_hist_skip_event_record(struct hif_event_history *hist_ev,
- struct hif_event_record *event)
- {
- struct hif_event_record *rec;
- struct hif_event_record *last_irq_rec;
- int32_t index;
- index = qdf_atomic_read(&hist_ev->index);
- if (index < 0)
- return false;
- index &= (HIF_EVENT_HIST_MAX - 1);
- rec = &hist_ev->event[index];
- switch (event->type) {
- case HIF_EVENT_IRQ_TRIGGER:
- /*
- * The prev record check is to prevent skipping the IRQ event
- * record in case where BH got re-scheduled due to force_break
- * but there are no entries to be reaped in the rings.
- */
- if (rec->type == HIF_EVENT_BH_SCHED &&
- hif_hist_is_prev_record(index,
- hist_ev->misc.last_irq_index,
- HIF_EVENT_HIST_MAX)) {
- last_irq_rec =
- &hist_ev->event[hist_ev->misc.last_irq_index];
- last_irq_rec->timestamp = hif_get_log_timestamp();
- last_irq_rec->cpu_id = qdf_get_cpu();
- last_irq_rec->hp++;
- last_irq_rec->tp = last_irq_rec->timestamp -
- hist_ev->misc.last_irq_ts;
- return true;
- }
- break;
- case HIF_EVENT_BH_SCHED:
- if (rec->type == HIF_EVENT_BH_SCHED) {
- rec->timestamp = hif_get_log_timestamp();
- rec->cpu_id = qdf_get_cpu();
- return true;
- }
- break;
- case HIF_EVENT_SRNG_ACCESS_START:
- if (event->hp == event->tp)
- return true;
- break;
- case HIF_EVENT_SRNG_ACCESS_END:
- if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
- return true;
- break;
- case HIF_EVENT_BH_COMPLETE:
- case HIF_EVENT_BH_FORCE_BREAK:
- if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
- return true;
- break;
- default:
- break;
- }
- return false;
- }
- void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
- struct hif_event_record *event, uint8_t intr_grp_id)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct hif_event_history *hist_ev;
- struct hif_event_record *record;
- int record_index;
- if (!(scn->event_enable_mask & BIT(event->type)))
- return;
- if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
- hif_err("Invalid interrupt group id %d", intr_grp_id);
- return;
- }
- hist_ev = scn->evt_hist[intr_grp_id];
- if (qdf_unlikely(!hist_ev))
- return;
- if (hif_hist_skip_event_record(hist_ev, event))
- return;
- record_index = hif_get_next_record_index(
- &hist_ev->index, HIF_EVENT_HIST_MAX);
- record = &hist_ev->event[record_index];
- if (event->type == HIF_EVENT_IRQ_TRIGGER) {
- hist_ev->misc.last_irq_index = record_index;
- hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
- }
- record->hal_ring_id = event->hal_ring_id;
- record->hp = event->hp;
- record->tp = event->tp;
- record->cpu_id = qdf_get_cpu();
- record->timestamp = hif_get_log_timestamp();
- record->type = event->type;
- }
- void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- scn->evt_hist[id] = &hif_event_desc_history[id];
- qdf_atomic_set(&scn->evt_hist[id]->index, -1);
- hif_info("SRNG events history initialized for group: %d", id);
- }
- void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- scn->evt_hist[id] = NULL;
- hif_info("SRNG events history de-initialized for group: %d", id);
- }
- #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
- #ifndef QCA_WIFI_WCN6450
- /**
- * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
- * @hif_state: hif context
- *
- * return: void
- */
- #ifdef HIF_LATENCY_PROFILE_ENABLE
- static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
- {
- struct hif_exec_context *hif_ext_group;
- int i, j;
- int64_t cur_tstamp;
- const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] = {
- "0-2 ms",
- "3-10 ms",
- "11-20 ms",
- "21-50 ms",
- "51-100 ms",
- "101-250 ms",
- "251-500 ms",
- "> 500 ms"
- };
- cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
- QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
- "Current timestamp: %lld", cur_tstamp);
- for (i = 0; i < hif_state->hif_num_extgroup; i++) {
- if (hif_state->hif_ext_group[i]) {
- hif_ext_group = hif_state->hif_ext_group[i];
- QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
- "ext grp %d Last serviced timestamp: %lld",
- i, hif_ext_group->tstamp);
- QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
- "Latency Bucket | Time elapsed");
- for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
- if (hif_ext_group->sched_latency_stats[j])
- QDF_TRACE(QDF_MODULE_ID_HIF,
- QDF_TRACE_LEVEL_INFO_HIGH,
- "%s | %lld",
- time_str[j],
- hif_ext_group->
- sched_latency_stats[j]);
- }
- }
- }
- }
- #else
- static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
- {
- }
- #endif
- /**
- * hif_clear_napi_stats() - reset NAPI stats
- * @hif_ctx: hif context
- *
- * return: void
- */
- void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- struct hif_exec_context *hif_ext_group;
- size_t i;
- for (i = 0; i < hif_state->hif_num_extgroup; i++) {
- hif_ext_group = hif_state->hif_ext_group[i];
- if (!hif_ext_group)
- return;
- qdf_mem_set(hif_ext_group->sched_latency_stats,
- sizeof(hif_ext_group->sched_latency_stats),
- 0x0);
- }
- }
- qdf_export_symbol(hif_clear_napi_stats);
- #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
- /**
- * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
- * @stats: NAPI stats to get poll time buckets
- * @buf: buffer to fill histogram string
- * @buf_len: length of the buffer
- *
- * Return: void
- */
- static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
- uint8_t buf_len)
- {
- int i;
- int str_index = 0;
- for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
- str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
- "%u|", stats->poll_time_buckets[i]);
- }
- void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- struct hif_exec_context *hif_ext_group;
- struct qca_napi_stat *napi_stats;
- int i, j;
- /*
- * Max value of uint_32 (poll_time_bucket) = 4294967295
- * Thus we need 10 chars + 1 space =11 chars for each bucket value.
- * +1 space for '\0'.
- */
- char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
- QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
- "NAPI[#]CPU[#] |scheds |polls |comps |dones |t-lim |max(us)|hist(500us buckets)");
- for (i = 0;
- (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
- i++) {
- hif_ext_group = hif_state->hif_ext_group[i];
- for (j = 0; j < num_possible_cpus(); j++) {
- napi_stats = &hif_ext_group->stats[j];
- if (!napi_stats->napi_schedules)
- continue;
- hif_get_poll_times_hist_str(napi_stats,
- hist_str,
- sizeof(hist_str));
- QDF_TRACE(QDF_MODULE_ID_HIF,
- QDF_TRACE_LEVEL_INFO_HIGH,
- "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
- i, j,
- napi_stats->napi_schedules,
- napi_stats->napi_polls,
- napi_stats->napi_completes,
- napi_stats->napi_workdone,
- napi_stats->time_limit_reached,
- qdf_do_div(napi_stats->napi_max_poll_time,
- 1000),
- hist_str);
- }
- }
- hif_print_napi_latency_stats(hif_state);
- }
- qdf_export_symbol(hif_print_napi_stats);
- #else
- static inline
- void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
- uint8_t buf_len)
- {
- }
- void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- struct hif_exec_context *hif_ext_group;
- struct qca_napi_stat *napi_stats;
- int i, j;
- QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
- "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
- for (i = 0; i < hif_state->hif_num_extgroup; i++) {
- if (hif_state->hif_ext_group[i]) {
- hif_ext_group = hif_state->hif_ext_group[i];
- for (j = 0; j < num_possible_cpus(); j++) {
- napi_stats = &(hif_ext_group->stats[j]);
- if (napi_stats->napi_schedules != 0)
- QDF_TRACE(QDF_MODULE_ID_HIF,
- QDF_TRACE_LEVEL_FATAL,
- "NAPI[%2d]CPU[%d]: "
- "%7d %7d %7d %7d ",
- i, j,
- napi_stats->napi_schedules,
- napi_stats->napi_polls,
- napi_stats->napi_completes,
- napi_stats->napi_workdone);
- }
- }
- }
- hif_print_napi_latency_stats(hif_state);
- }
- qdf_export_symbol(hif_print_napi_stats);
- #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
- #endif /* QCA_WIFI_WCN6450 */
- #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
- /**
- * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
- * @hif_ext_group: hif_ext_group of type NAPI
- *
- * The function is called at the end of a NAPI poll to calculate poll time
- * buckets.
- *
- * Return: void
- */
- static
- void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
- {
- struct qca_napi_stat *napi_stat;
- unsigned long long poll_time_ns;
- uint32_t poll_time_us;
- uint32_t bucket_size_us = 500;
- uint32_t bucket;
- uint32_t cpu_id = qdf_get_cpu();
- poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
- poll_time_us = qdf_do_div(poll_time_ns, 1000);
- napi_stat = &hif_ext_group->stats[cpu_id];
- if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
- hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
- bucket = poll_time_us / bucket_size_us;
- if (bucket >= QCA_NAPI_NUM_BUCKETS)
- bucket = QCA_NAPI_NUM_BUCKETS - 1;
- ++napi_stat->poll_time_buckets[bucket];
- }
- /**
- * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
- * @hif_ext_group: hif_ext_group of type NAPI
- *
- * Return: true if NAPI needs to yield, else false
- */
- static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
- {
- bool time_limit_reached = false;
- unsigned long long poll_time_ns;
- int cpu_id = qdf_get_cpu();
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
- struct hif_config_info *cfg = &scn->hif_config;
- poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
- time_limit_reached =
- poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
- if (time_limit_reached) {
- hif_ext_group->stats[cpu_id].time_limit_reached++;
- hif_ext_group->force_break = true;
- }
- return time_limit_reached;
- }
- bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- struct hif_exec_context *hif_ext_group;
- bool ret_val = false;
- if (!(grp_id < hif_state->hif_num_extgroup) ||
- !(grp_id < HIF_MAX_GROUP))
- return false;
- hif_ext_group = hif_state->hif_ext_group[grp_id];
- if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
- ret_val = hif_exec_poll_should_yield(hif_ext_group);
- return ret_val;
- }
- /**
- * hif_exec_update_service_start_time() - Update NAPI poll start time
- * @hif_ext_group: hif_ext_group of type NAPI
- *
- * The function is called at the beginning of a NAPI poll to record the poll
- * start time.
- *
- * Return: None
- */
- static inline
- void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
- {
- hif_ext_group->poll_start_time = qdf_time_sched_clock();
- }
- #else
- static inline
- void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
- {
- }
- static inline
- void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
- {
- }
- #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
- static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
- {
- struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
- tasklet_schedule(&t_ctx->tasklet);
- }
- /**
- * hif_exec_tasklet_fn() - grp tasklet
- * @data: context
- *
- * Return: void
- */
- static void hif_exec_tasklet_fn(unsigned long data)
- {
- struct hif_exec_context *hif_ext_group =
- (struct hif_exec_context *)data;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
- unsigned int work_done;
- int cpu = smp_processor_id();
- work_done =
- hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET,
- cpu);
- if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
- qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
- hif_ext_group->irq_enable(hif_ext_group);
- } else {
- hif_exec_tasklet_schedule(hif_ext_group);
- }
- }
- /**
- * hif_latency_profile_measure() - calculate latency and update histogram
- * @hif_ext_group: hif exec context
- *
- * Return: None
- */
- #ifdef HIF_LATENCY_PROFILE_ENABLE
- static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
- {
- int64_t cur_tstamp;
- int64_t time_elapsed;
- cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
- if (cur_tstamp > hif_ext_group->tstamp)
- time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
- else
- time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
- hif_ext_group->tstamp = cur_tstamp;
- if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
- hif_ext_group->sched_latency_stats[0]++;
- else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
- hif_ext_group->sched_latency_stats[1]++;
- else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
- hif_ext_group->sched_latency_stats[2]++;
- else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
- hif_ext_group->sched_latency_stats[3]++;
- else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
- hif_ext_group->sched_latency_stats[4]++;
- else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
- hif_ext_group->sched_latency_stats[5]++;
- else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
- hif_ext_group->sched_latency_stats[6]++;
- else
- hif_ext_group->sched_latency_stats[7]++;
- }
- #else
- static inline
- void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
- {
- }
- #endif
- /**
- * hif_latency_profile_start() - Update the start timestamp for HIF ext group
- * @hif_ext_group: hif exec context
- *
- * Return: None
- */
- #ifdef HIF_LATENCY_PROFILE_ENABLE
- static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
- {
- hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
- }
- #else
- static inline
- void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
- {
- }
- #endif
- #ifdef FEATURE_NAPI
- #ifdef FEATURE_IRQ_AFFINITY
- static inline int32_t
- hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
- {
- return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete);
- }
- #else
- static inline int32_t
- hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
- {
- return 0;
- }
- #endif
- /**
- * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit
- * reached for single MSI
- * @hif_ext_group: hif exec context
- *
- * Return: true if reached, else false.
- */
- static bool
- hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group)
- {
- unsigned long long irq_disabled_duration_ns;
- if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE)
- return false;
- irq_disabled_duration_ns = qdf_time_sched_clock() -
- hif_ext_group->irq_disabled_start_time;
- if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) {
- hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
- 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED);
- return true;
- }
- return false;
- }
- /**
- * hif_exec_poll() - napi poll
- * @napi: napi struct
- * @budget: budget for napi
- *
- * Return: mapping of internal budget to napi
- */
- static int hif_exec_poll(struct napi_struct *napi, int budget)
- {
- struct hif_napi_exec_context *napi_exec_ctx =
- qdf_container_of(napi, struct hif_napi_exec_context, napi);
- struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
- int work_done;
- int normalized_budget = 0;
- int actual_dones;
- int shift = hif_ext_group->scale_bin_shift;
- int cpu = smp_processor_id();
- bool force_complete = false;
- hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
- 0, 0, 0, HIF_EVENT_BH_SCHED);
- hif_ext_group->force_break = false;
- hif_exec_update_service_start_time(hif_ext_group);
- if (budget)
- normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
- hif_latency_profile_measure(hif_ext_group);
- work_done = hif_ext_group->handler(hif_ext_group->context,
- normalized_budget, cpu);
- actual_dones = work_done;
- if (hif_is_force_napi_complete_required(hif_ext_group)) {
- force_complete = true;
- if (work_done >= normalized_budget)
- work_done = normalized_budget - 1;
- }
- if (qdf_unlikely(force_complete) ||
- (!hif_ext_group->force_break && work_done < normalized_budget) ||
- ((pld_is_one_msi(scn->qdf_dev->dev) &&
- hif_irq_disabled_time_limit_reached(hif_ext_group)))) {
- hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
- 0, 0, 0, HIF_EVENT_BH_COMPLETE);
- napi_complete(napi);
- qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
- hif_ext_group->irq_enable(hif_ext_group);
- hif_ext_group->stats[cpu].napi_completes++;
- } else {
- /* if the ext_group supports time based yield, claim full work
- * done anyways */
- hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
- 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
- work_done = normalized_budget;
- }
- hif_ext_group->stats[cpu].napi_polls++;
- hif_ext_group->stats[cpu].napi_workdone += actual_dones;
- /* map internal budget to NAPI budget */
- if (work_done)
- work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
- hif_exec_fill_poll_time_histogram(hif_ext_group);
- return work_done;
- }
- /**
- * hif_exec_napi_schedule() - schedule the napi exec instance
- * @ctx: a hif_exec_context known to be of napi type
- */
- static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
- {
- struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
- ctx->stats[smp_processor_id()].napi_schedules++;
- napi_schedule(&n_ctx->napi);
- }
- /**
- * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
- * @ctx: a hif_exec_context known to be of napi type
- */
- static void hif_exec_napi_kill(struct hif_exec_context *ctx)
- {
- struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
- int irq_ind;
- if (ctx->inited) {
- qdf_napi_disable(&n_ctx->napi);
- ctx->inited = 0;
- }
- for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
- hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
- hif_core_ctl_set_boost(false);
- qdf_netif_napi_del(&(n_ctx->napi));
- }
- struct hif_execution_ops napi_sched_ops = {
- .schedule = &hif_exec_napi_schedule,
- .kill = &hif_exec_napi_kill,
- };
- /**
- * hif_exec_napi_create() - allocate and initialize a napi exec context
- * @scale: a binary shift factor to map NAPI budget from\to internal
- * budget
- */
- static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
- {
- struct hif_napi_exec_context *ctx;
- ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
- if (!ctx)
- return NULL;
- ctx->exec_ctx.sched_ops = &napi_sched_ops;
- ctx->exec_ctx.inited = true;
- ctx->exec_ctx.scale_bin_shift = scale;
- qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
- qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
- QCA_NAPI_BUDGET);
- qdf_napi_enable(&ctx->napi);
- return &ctx->exec_ctx;
- }
- #else
- static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
- {
- hif_warn("FEATURE_NAPI not defined, making tasklet");
- return hif_exec_tasklet_create();
- }
- #endif
- /**
- * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
- * @ctx: a hif_exec_context known to be of tasklet type
- */
- static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
- {
- struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
- int irq_ind;
- if (ctx->inited) {
- tasklet_disable(&t_ctx->tasklet);
- tasklet_kill(&t_ctx->tasklet);
- }
- ctx->inited = false;
- for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
- hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
- }
- struct hif_execution_ops tasklet_sched_ops = {
- .schedule = &hif_exec_tasklet_schedule,
- .kill = &hif_exec_tasklet_kill,
- };
- /**
- * hif_exec_tasklet_create() - allocate and initialize a tasklet exec context
- */
- static struct hif_exec_context *hif_exec_tasklet_create(void)
- {
- struct hif_tasklet_exec_context *ctx;
- ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
- if (!ctx)
- return NULL;
- ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
- tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
- (unsigned long)ctx);
- ctx->exec_ctx.inited = true;
- return &ctx->exec_ctx;
- }
- /**
- * hif_exec_get_ctx() - retrieve an exec context based on an id
- * @softc: the hif context owning the exec context
- * @id: the id of the exec context
- *
- * mostly added to make it easier to rename or move the context array
- */
- struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
- uint8_t id)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
- if (id < hif_state->hif_num_extgroup)
- return hif_state->hif_ext_group[id];
- return NULL;
- }
- int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
- uint8_t id)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
- if (id < hif_state->hif_num_extgroup)
- return hif_state->hif_ext_group[id]->os_irq[0];
- return -EINVAL;
- }
- qdf_export_symbol(hif_get_int_ctx_irq_num);
- QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- struct hif_exec_context *hif_ext_group;
- int i, status;
- if (scn->ext_grp_irq_configured) {
- hif_err("Called after ext grp irq configured");
- return QDF_STATUS_E_FAILURE;
- }
- for (i = 0; i < hif_state->hif_num_extgroup; i++) {
- hif_ext_group = hif_state->hif_ext_group[i];
- status = 0;
- qdf_spinlock_create(&hif_ext_group->irq_lock);
- if (hif_ext_group->configured &&
- hif_ext_group->irq_requested == false) {
- hif_ext_group->irq_enabled = true;
- status = hif_grp_irq_configure(scn, hif_ext_group);
- }
- if (status != 0) {
- hif_err("Failed for group %d", i);
- hif_ext_group->irq_enabled = false;
- }
- }
- scn->ext_grp_irq_configured = true;
- return QDF_STATUS_SUCCESS;
- }
- qdf_export_symbol(hif_configure_ext_group_interrupts);
- void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- if (!scn || !scn->ext_grp_irq_configured) {
- hif_err("scn(%pk) is NULL or grp irq not configured", scn);
- return;
- }
- hif_grp_irq_deconfigure(scn);
- scn->ext_grp_irq_configured = false;
- }
- qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
- #ifdef WLAN_SUSPEND_RESUME_TEST
- /**
- * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
- * to trigger fake-suspend command, if yes
- * then issue resume procedure.
- * @scn: opaque HIF software context
- *
- * This API checks if unit-test command was used to trigger fake-suspend command
- * and if answer is yes then it would trigger resume procedure.
- *
- * Make this API inline to save API-switch overhead and do branch-prediction to
- * optimize performance impact.
- *
- * Return: void
- */
- static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
- {
- if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
- hif_ut_fw_resume(scn);
- }
- #else
- static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
- {
- }
- #endif
- /**
- * hif_check_and_trigger_sys_resume() - Check for bus suspend and
- * trigger system resume
- * @scn: hif context
- * @irq: irq number
- *
- * Return: None
- */
- static inline void
- hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
- {
- if (scn->bus_suspended && scn->linkstate_vote) {
- hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
- qdf_pm_system_wakeup();
- }
- }
- /**
- * hif_ext_group_interrupt_handler() - handler for related interrupts
- * @irq: irq number of the interrupt
- * @context: the associated hif_exec_group context
- *
- * This callback function takes care of disabling the associated interrupts
- * and scheduling the expected bottom half for the exec_context.
- * This callback function also helps keep track of the count running contexts.
- */
- irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
- {
- struct hif_exec_context *hif_ext_group = context;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
- if (hif_ext_group->irq_requested) {
- hif_latency_profile_start(hif_ext_group);
- hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
- 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
- hif_ext_group->irq_disable(hif_ext_group);
- if (pld_is_one_msi(scn->qdf_dev->dev))
- hif_ext_group->irq_disabled_start_time =
- qdf_time_sched_clock();
- /*
- * if private ioctl has issued fake suspend command to put
- * FW in D0-WOW state then here is our chance to bring FW out
- * of WOW mode.
- *
- * The reason why you need to explicitly wake-up the FW is here:
- * APSS should have been in fully awake through-out when
- * fake APSS suspend command was issued (to put FW in WOW mode)
- * hence organic way of waking-up the FW
- * (as part-of APSS-host wake-up) won't happen because
- * in reality APSS didn't really suspend.
- */
- hif_check_and_trigger_ut_resume(scn);
- hif_check_and_trigger_sys_resume(scn, irq);
- qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
- hif_ext_group->sched_ops->schedule(hif_ext_group);
- }
- return IRQ_HANDLED;
- }
- /**
- * hif_exec_kill() - grp tasklet kill
- * @hif_ctx: hif_softc
- *
- * return: void
- */
- void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
- {
- int i;
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- for (i = 0; i < hif_state->hif_num_extgroup; i++)
- hif_state->hif_ext_group[i]->sched_ops->kill(
- hif_state->hif_ext_group[i]);
- qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
- }
- #ifdef FEATURE_IRQ_AFFINITY
- static inline void
- hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
- {
- qdf_atomic_init(&hif_ext_group->force_napi_complete);
- }
- #else
- static inline void
- hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
- {
- }
- #endif
- /**
- * hif_register_ext_group() - API to register external group
- * interrupt handler.
- * @hif_ctx : HIF Context
- * @numirq: number of irq's in the group
- * @irq: array of irq values
- * @handler: callback interrupt handler function
- * @cb_ctx: context to passed in callback
- * @context_name: context name
- * @type: napi vs tasklet
- * @scale:
- *
- * Return: QDF_STATUS
- */
- QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
- uint32_t numirq, uint32_t irq[],
- ext_intr_handler handler,
- void *cb_ctx, const char *context_name,
- enum hif_exec_type type, uint32_t scale)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- struct hif_exec_context *hif_ext_group;
- if (scn->ext_grp_irq_configured) {
- hif_err("Called after ext grp irq configured");
- return QDF_STATUS_E_FAILURE;
- }
- if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
- hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
- return QDF_STATUS_E_FAILURE;
- }
- if (numirq >= HIF_MAX_GRP_IRQ) {
- hif_err("Invalid numirq: %d", numirq);
- return QDF_STATUS_E_FAILURE;
- }
- hif_ext_group = hif_exec_create(type, scale);
- if (!hif_ext_group)
- return QDF_STATUS_E_FAILURE;
- hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
- hif_ext_group;
- hif_ext_group->numirq = numirq;
- qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
- hif_ext_group->context = cb_ctx;
- hif_ext_group->handler = handler;
- hif_ext_group->configured = true;
- hif_ext_group->grp_id = hif_state->hif_num_extgroup;
- hif_ext_group->hif = hif_ctx;
- hif_ext_group->context_name = context_name;
- hif_ext_group->type = type;
- hif_init_force_napi_complete(hif_ext_group);
- hif_state->hif_num_extgroup++;
- return QDF_STATUS_SUCCESS;
- }
- qdf_export_symbol(hif_register_ext_group);
- /**
- * hif_exec_create() - create an execution context
- * @type: the type of execution context to create
- * @scale:
- */
- struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
- uint32_t scale)
- {
- hif_debug("%s: create exec_type %d budget %d",
- __func__, type, QCA_NAPI_BUDGET * scale);
- switch (type) {
- case HIF_EXEC_NAPI_TYPE:
- return hif_exec_napi_create(scale);
- case HIF_EXEC_TASKLET_TYPE:
- return hif_exec_tasklet_create();
- default:
- return NULL;
- }
- }
- /**
- * hif_exec_destroy() - free the hif_exec context
- * @ctx: context to free
- *
- * please kill the context before freeing it to avoid a use after free.
- */
- void hif_exec_destroy(struct hif_exec_context *ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
- if (scn->ext_grp_irq_configured)
- qdf_spinlock_destroy(&ctx->irq_lock);
- qdf_mem_free(ctx);
- }
- /**
- * hif_deregister_exec_group() - API to free the exec contexts
- * @hif_ctx: HIF context
- * @context_name: name of the module whose contexts need to be deregistered
- *
- * This function deregisters the contexts of the requestor identified
- * based on the context_name & frees the memory.
- *
- * Return: void
- */
- void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
- const char *context_name)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- struct hif_exec_context *hif_ext_group;
- int i;
- for (i = 0; i < HIF_MAX_GROUP; i++) {
- hif_ext_group = hif_state->hif_ext_group[i];
- if (!hif_ext_group)
- continue;
- hif_debug("%s: Deregistering grp id %d name %s",
- __func__,
- hif_ext_group->grp_id,
- hif_ext_group->context_name);
- if (strcmp(hif_ext_group->context_name, context_name) == 0) {
- hif_ext_group->sched_ops->kill(hif_ext_group);
- hif_state->hif_ext_group[i] = NULL;
- hif_exec_destroy(hif_ext_group);
- hif_state->hif_num_extgroup--;
- }
- }
- }
- qdf_export_symbol(hif_deregister_exec_group);
- #ifdef DP_UMAC_HW_RESET_SUPPORT
- /**
- * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt
- * @data: UMAC HW reset HIF context
- *
- * return: void
- */
- static void hif_umac_reset_handler_tasklet(unsigned long data)
- {
- struct hif_umac_reset_ctx *umac_reset_ctx =
- (struct hif_umac_reset_ctx *)data;
- /* call the callback handler */
- umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx);
- }
- /**
- * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset
- * @irq: irq coming from kernel
- * @ctx: UMAC HW reset HIF context
- *
- * return: IRQ_HANDLED if success, else IRQ_NONE
- */
- static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx)
- {
- struct hif_umac_reset_ctx *umac_reset_ctx = ctx;
- /* Schedule the tasklet if it is umac reset interrupt and exit */
- if (umac_reset_ctx->irq_handler(umac_reset_ctx->cb_ctx))
- tasklet_hi_schedule(&umac_reset_ctx->intr_tq);
- return IRQ_HANDLED;
- }
- QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
- int *umac_reset_irq)
- {
- int ret;
- struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
- struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
- struct platform_device *pdev = (struct platform_device *)sc->pdev;
- ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
- "umac_reset", 0, umac_reset_irq);
- if (ret) {
- hif_err("umac reset get irq failed ret %d", ret);
- return QDF_STATUS_E_FAILURE;
- }
- return QDF_STATUS_SUCCESS;
- }
- qdf_export_symbol(hif_get_umac_reset_irq);
- QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
- bool (*irq_handler)(void *cb_ctx),
- int (*tl_handler)(void *cb_ctx),
- void *cb_ctx, int irq)
- {
- struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
- struct hif_umac_reset_ctx *umac_reset_ctx;
- int ret;
- if (!hif_sc) {
- hif_err("scn is null");
- return QDF_STATUS_E_NULL_VALUE;
- }
- umac_reset_ctx = &hif_sc->umac_reset_ctx;
- umac_reset_ctx->irq_handler = irq_handler;
- umac_reset_ctx->cb_handler = tl_handler;
- umac_reset_ctx->cb_ctx = cb_ctx;
- umac_reset_ctx->os_irq = irq;
- /* Init the tasklet */
- tasklet_init(&umac_reset_ctx->intr_tq,
- hif_umac_reset_handler_tasklet,
- (unsigned long)umac_reset_ctx);
- /* Register the interrupt handler */
- ret = pfrm_request_irq(hif_sc->qdf_dev->dev, irq,
- hif_umac_reset_irq_handler,
- IRQF_NO_SUSPEND,
- "umac_hw_reset_irq",
- umac_reset_ctx);
- if (ret) {
- hif_err("request_irq failed: %d", ret);
- return qdf_status_from_os_return(ret);
- }
- umac_reset_ctx->irq_configured = true;
- return QDF_STATUS_SUCCESS;
- }
- qdf_export_symbol(hif_register_umac_reset_handler);
- QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
- {
- struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
- struct hif_umac_reset_ctx *umac_reset_ctx;
- int ret;
- if (!hif_sc) {
- hif_err("scn is null");
- return QDF_STATUS_E_NULL_VALUE;
- }
- umac_reset_ctx = &hif_sc->umac_reset_ctx;
- if (!umac_reset_ctx->irq_configured) {
- hif_err("unregister called without a prior IRQ configuration");
- return QDF_STATUS_E_FAILURE;
- }
- ret = pfrm_free_irq(hif_sc->qdf_dev->dev,
- umac_reset_ctx->os_irq,
- umac_reset_ctx);
- if (ret) {
- hif_err("free_irq failed: %d", ret);
- return qdf_status_from_os_return(ret);
- }
- umac_reset_ctx->irq_configured = false;
- tasklet_disable(&umac_reset_ctx->intr_tq);
- tasklet_kill(&umac_reset_ctx->intr_tq);
- umac_reset_ctx->cb_handler = NULL;
- umac_reset_ctx->cb_ctx = NULL;
- return QDF_STATUS_SUCCESS;
- }
- qdf_export_symbol(hif_unregister_umac_reset_handler);
- #endif
|