123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463 |
- /*
- * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
- *
- * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
- *
- *
- * Permission to use, copy, modify, and/or distribute this software for
- * any purpose with or without fee is hereby granted, provided that the
- * above copyright notice and this permission notice appear in all
- * copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
- * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
- /*
- * This file was originally distributed by Qualcomm Atheros, Inc.
- * under proprietary terms before Copyright ownership was assigned
- * to the Linux Foundation.
- */
- #include "targcfg.h"
- #include "qdf_lock.h"
- #include "qdf_status.h"
- #include "qdf_status.h"
- #include <qdf_atomic.h> /* qdf_atomic_read */
- #include <targaddrs.h>
- #include <bmi_msg.h>
- #include "hif_io32.h"
- #include <hif.h>
- #include "regtable.h"
- #define ATH_MODULE_NAME hif
- #include <a_debug.h>
- #include "hif_main.h"
- #include "ce_api.h"
- #include "qdf_trace.h"
- #ifdef CONFIG_CNSS
- #include <net/cnss.h>
- #endif
- #include "epping_main.h"
- #include "hif_debug.h"
- #include "ce_internal.h"
- #include "ce_reg.h"
- #include "ce_assignment.h"
- #include "ce_tasklet.h"
- #include "platform_icnss.h"
- #include "qwlan_version.h"
- #include <cds_api.h>
- #define CE_POLL_TIMEOUT 10 /* ms */
- /* Forward references */
- static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
- /*
- * Fix EV118783, poll to check whether a BMI response comes
- * other than waiting for the interruption which may be lost.
- */
- /* #define BMI_RSP_POLLING */
- #define BMI_RSP_TO_MILLISEC 1000
- #ifdef CONFIG_BYPASS_QMI
- #define BYPASS_QMI 1
- #else
- #define BYPASS_QMI 0
- #endif
- static int hif_post_recv_buffers(struct hif_softc *scn);
- static void hif_config_rri_on_ddr(struct hif_softc *scn);
- static void ce_poll_timeout(void *arg)
- {
- struct CE_state *CE_state = (struct CE_state *)arg;
- if (CE_state->timer_inited) {
- ce_per_engine_service(CE_state->scn, CE_state->id);
- qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
- }
- }
- static unsigned int roundup_pwr2(unsigned int n)
- {
- int i;
- unsigned int test_pwr2;
- if (!(n & (n - 1)))
- return n; /* already a power of 2 */
- test_pwr2 = 4;
- for (i = 0; i < 29; i++) {
- if (test_pwr2 > n)
- return test_pwr2;
- test_pwr2 = test_pwr2 << 1;
- }
- QDF_ASSERT(0); /* n too large */
- return 0;
- }
- #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
- #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
- static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
- { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
- { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
- { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
- { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
- { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
- { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
- { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
- { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
- { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
- };
- /* CE_PCI TABLE */
- /*
- * NOTE: the table below is out of date, though still a useful reference.
- * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
- * mapping of HTC services to HIF pipes.
- */
- /*
- * This authoritative table defines Copy Engine configuration and the mapping
- * of services/endpoints to CEs. A subset of this information is passed to
- * the Target during startup as a prerequisite to entering BMI phase.
- * See:
- * target_service_to_ce_map - Target-side mapping
- * hif_map_service_to_pipe - Host-side mapping
- * target_ce_config - Target-side configuration
- * host_ce_config - Host-side configuration
- ============================================================================
- Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
- | | | ctio | Size | Frequency
- | | | n | |
- ============================================================================
- tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
- descriptor | | | | O(100B) | and regular
- download | | | | |
- ----------------------------------------------------------------------------
- rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
- indication | | | | O(10B) | regular
- upload | | | | |
- ----------------------------------------------------------------------------
- MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
- upload | | | | O(1000B) | (frequent
- e.g. noise | | | | | during IP1.0
- packets | | | | | testing)
- ----------------------------------------------------------------------------
- MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
- download | | | | O(1000B) | (frequent
- e.g. | | | | | during IP1.0
- misdirecte | | | | | testing)
- d EAPOL | | | | |
- packets | | | | |
- ----------------------------------------------------------------------------
- n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
- | DATA_VO (uplink) | | | |
- ----------------------------------------------------------------------------
- n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
- | DATA_VO (downlink) | | | |
- ----------------------------------------------------------------------------
- WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
- | | | | O(100B) |
- ----------------------------------------------------------------------------
- WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
- messages | (downlink) | | | O(100B) |
- | | | | |
- ----------------------------------------------------------------------------
- n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
- | HTC_RAW_STREAMS | | | |
- | (uplink) | | | |
- ----------------------------------------------------------------------------
- n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
- | HTC_RAW_STREAMS | | | |
- | (downlink) | | | |
- ----------------------------------------------------------------------------
- diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
- | | | | | infrequent
- ============================================================================
- */
- /*
- * Map from service/endpoint to Copy Engine.
- * This table is derived from the CE_PCI TABLE, above.
- * It is passed to the Target at startup for use by firmware.
- */
- static struct service_to_pipe target_service_to_ce_map_wlan[] = {
- {
- WMI_DATA_VO_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- WMI_DATA_VO_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- WMI_DATA_BK_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- WMI_DATA_BK_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- WMI_DATA_BE_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- WMI_DATA_BE_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- WMI_DATA_VI_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- WMI_DATA_VI_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- WMI_CONTROL_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 3,
- },
- {
- WMI_CONTROL_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- HTC_CTRL_RSVD_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 0, /* could be moved to 3 (share with WMI) */
- },
- {
- HTC_CTRL_RSVD_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- HTC_RAW_STREAMS_SVC, /* not currently used */
- PIPEDIR_OUT, /* out = UL = host -> target */
- 0,
- },
- {
- HTC_RAW_STREAMS_SVC, /* not currently used */
- PIPEDIR_IN, /* in = DL = target -> host */
- 2,
- },
- {
- HTT_DATA_MSG_SVC,
- PIPEDIR_OUT, /* out = UL = host -> target */
- 4,
- },
- {
- HTT_DATA_MSG_SVC,
- PIPEDIR_IN, /* in = DL = target -> host */
- 1,
- },
- {
- WDI_IPA_TX_SVC,
- PIPEDIR_OUT, /* in = DL = target -> host */
- 5,
- },
- /* (Additions here) */
- { /* Must be last */
- 0,
- 0,
- 0,
- },
- };
- static struct service_to_pipe *target_service_to_ce_map =
- target_service_to_ce_map_wlan;
- static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
- static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
- static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
- static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
- {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
- {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
- {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
- {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
- {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
- {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
- {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
- {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
- {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
- {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
- {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
- {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
- {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
- {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
- {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
- {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
- {0, 0, 0,}, /* Must be last */
- };
- /**
- * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
- * @ce_state : pointer to the state context of the CE
- *
- * Description:
- * Sets htt_rx_data attribute of the state structure if the
- * CE serves one of the HTT DATA services.
- *
- * Return:
- * false (attribute set to false)
- * true (attribute set to true);
- */
- bool ce_mark_datapath(struct CE_state *ce_state)
- {
- struct service_to_pipe *svc_map;
- size_t map_sz;
- int i;
- bool rc = false;
- if (ce_state != NULL) {
- if (WLAN_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
- svc_map = target_service_to_ce_map_wlan_epping;
- map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
- sizeof(struct service_to_pipe);
- } else {
- svc_map = target_service_to_ce_map_wlan;
- map_sz = sizeof(target_service_to_ce_map_wlan) /
- sizeof(struct service_to_pipe);
- }
- for (i = 0; i < map_sz; i++) {
- if ((svc_map[i].pipenum == ce_state->id) &&
- ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
- (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
- (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
- /* HTT CEs are unidirectional */
- if (svc_map[i].pipedir == PIPEDIR_IN)
- ce_state->htt_rx_data = true;
- else
- ce_state->htt_tx_data = true;
- rc = true;
- }
- }
- }
- return rc;
- }
- /*
- * Initialize a Copy Engine based on caller-supplied attributes.
- * This may be called once to initialize both source and destination
- * rings or it may be called twice for separate source and destination
- * initialization. It may be that only one side or the other is
- * initialized by software/firmware.
- *
- * This should be called durring the initialization sequence before
- * interupts are enabled, so we don't have to worry about thread safety.
- */
- struct CE_handle *ce_init(struct hif_softc *scn,
- unsigned int CE_id, struct CE_attr *attr)
- {
- struct CE_state *CE_state;
- uint32_t ctrl_addr;
- unsigned int nentries;
- qdf_dma_addr_t base_addr;
- bool malloc_CE_state = false;
- bool malloc_src_ring = false;
- QDF_ASSERT(CE_id < scn->ce_count);
- ctrl_addr = CE_BASE_ADDRESS(CE_id);
- CE_state = scn->ce_id_to_state[CE_id];
- if (!CE_state) {
- CE_state =
- (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
- if (!CE_state) {
- HIF_ERROR("%s: CE_state has no mem", __func__);
- return NULL;
- }
- malloc_CE_state = true;
- qdf_mem_zero(CE_state, sizeof(*CE_state));
- scn->ce_id_to_state[CE_id] = CE_state;
- qdf_spinlock_create(&CE_state->ce_index_lock);
- CE_state->id = CE_id;
- CE_state->ctrl_addr = ctrl_addr;
- CE_state->state = CE_RUNNING;
- CE_state->attr_flags = attr->flags;
- }
- CE_state->scn = scn;
- qdf_atomic_init(&CE_state->rx_pending);
- if (attr == NULL) {
- /* Already initialized; caller wants the handle */
- return (struct CE_handle *)CE_state;
- }
- #ifdef ADRASTEA_SHADOW_REGISTERS
- HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
- __func__);
- #endif
- if (CE_state->src_sz_max)
- QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
- else
- CE_state->src_sz_max = attr->src_sz_max;
- ce_init_ce_desc_event_log(CE_id,
- attr->src_nentries + attr->dest_nentries);
- /* source ring setup */
- nentries = attr->src_nentries;
- if (nentries) {
- struct CE_ring_state *src_ring;
- unsigned CE_nbytes;
- char *ptr;
- uint64_t dma_addr;
- nentries = roundup_pwr2(nentries);
- if (CE_state->src_ring) {
- QDF_ASSERT(CE_state->src_ring->nentries == nentries);
- } else {
- CE_nbytes = sizeof(struct CE_ring_state)
- + (nentries * sizeof(void *));
- ptr = qdf_mem_malloc(CE_nbytes);
- if (!ptr) {
- /* cannot allocate src ring. If the
- * CE_state is allocated locally free
- * CE_State and return error.
- */
- HIF_ERROR("%s: src ring has no mem", __func__);
- if (malloc_CE_state) {
- /* allocated CE_state locally */
- scn->ce_id_to_state[CE_id] = NULL;
- qdf_mem_free(CE_state);
- malloc_CE_state = false;
- }
- return NULL;
- } else {
- /* we can allocate src ring.
- * Mark that the src ring is
- * allocated locally
- */
- malloc_src_ring = true;
- }
- qdf_mem_zero(ptr, CE_nbytes);
- src_ring = CE_state->src_ring =
- (struct CE_ring_state *)ptr;
- ptr += sizeof(struct CE_ring_state);
- src_ring->nentries = nentries;
- src_ring->nentries_mask = nentries - 1;
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- goto error_target_access;
- src_ring->hw_index =
- CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
- src_ring->sw_index = src_ring->hw_index;
- src_ring->write_index =
- CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- goto error_target_access;
- src_ring->low_water_mark_nentries = 0;
- src_ring->high_water_mark_nentries = nentries;
- src_ring->per_transfer_context = (void **)ptr;
- /* Legacy platforms that do not support cache
- * coherent DMA are unsupported
- */
- src_ring->base_addr_owner_space_unaligned =
- qdf_mem_alloc_consistent(scn->qdf_dev,
- scn->qdf_dev->dev,
- (nentries *
- sizeof(struct CE_src_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
- if (src_ring->base_addr_owner_space_unaligned
- == NULL) {
- HIF_ERROR("%s: src ring has no DMA mem",
- __func__);
- goto error_no_dma_mem;
- }
- src_ring->base_addr_CE_space_unaligned = base_addr;
- if (src_ring->
- base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
- - 1)) {
- src_ring->base_addr_CE_space =
- (src_ring->base_addr_CE_space_unaligned
- + CE_DESC_RING_ALIGN -
- 1) & ~(CE_DESC_RING_ALIGN - 1);
- src_ring->base_addr_owner_space =
- (void
- *)(((size_t) src_ring->
- base_addr_owner_space_unaligned +
- CE_DESC_RING_ALIGN -
- 1) & ~(CE_DESC_RING_ALIGN - 1));
- } else {
- src_ring->base_addr_CE_space =
- src_ring->base_addr_CE_space_unaligned;
- src_ring->base_addr_owner_space =
- src_ring->
- base_addr_owner_space_unaligned;
- }
- /*
- * Also allocate a shadow src ring in
- * regular mem to use for faster access.
- */
- src_ring->shadow_base_unaligned =
- qdf_mem_malloc(nentries *
- sizeof(struct CE_src_desc) +
- CE_DESC_RING_ALIGN);
- if (src_ring->shadow_base_unaligned == NULL) {
- HIF_ERROR("%s: src ring no shadow_base mem",
- __func__);
- goto error_no_dma_mem;
- }
- src_ring->shadow_base = (struct CE_src_desc *)
- (((size_t) src_ring->shadow_base_unaligned +
- CE_DESC_RING_ALIGN - 1) &
- ~(CE_DESC_RING_ALIGN - 1));
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- goto error_target_access;
- dma_addr = src_ring->base_addr_CE_space;
- CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
- (uint32_t)(dma_addr & 0xFFFFFFFF));
- #ifdef WLAN_ENABLE_QCA6180
- {
- uint32_t tmp;
- tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
- scn, ctrl_addr);
- tmp &= ~0x1F;
- dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
- CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
- ctrl_addr, (uint32_t)dma_addr);
- }
- #endif
- CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
- CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
- #ifdef BIG_ENDIAN_HOST
- /* Enable source ring byte swap for big endian host */
- CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
- #endif
- CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
- CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- goto error_target_access;
- }
- }
- /* destination ring setup */
- nentries = attr->dest_nentries;
- if (nentries) {
- struct CE_ring_state *dest_ring;
- unsigned CE_nbytes;
- char *ptr;
- uint64_t dma_addr;
- nentries = roundup_pwr2(nentries);
- if (CE_state->dest_ring) {
- QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
- } else {
- CE_nbytes = sizeof(struct CE_ring_state)
- + (nentries * sizeof(void *));
- ptr = qdf_mem_malloc(CE_nbytes);
- if (!ptr) {
- /* cannot allocate dst ring. If the CE_state
- * or src ring is allocated locally free
- * CE_State and src ring and return error.
- */
- HIF_ERROR("%s: dest ring has no mem",
- __func__);
- if (malloc_src_ring) {
- qdf_mem_free(CE_state->src_ring);
- CE_state->src_ring = NULL;
- malloc_src_ring = false;
- }
- if (malloc_CE_state) {
- /* allocated CE_state locally */
- scn->ce_id_to_state[CE_id] = NULL;
- qdf_mem_free(CE_state);
- malloc_CE_state = false;
- }
- return NULL;
- }
- qdf_mem_zero(ptr, CE_nbytes);
- dest_ring = CE_state->dest_ring =
- (struct CE_ring_state *)ptr;
- ptr += sizeof(struct CE_ring_state);
- dest_ring->nentries = nentries;
- dest_ring->nentries_mask = nentries - 1;
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- goto error_target_access;
- dest_ring->sw_index =
- CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
- dest_ring->write_index =
- CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- goto error_target_access;
- dest_ring->low_water_mark_nentries = 0;
- dest_ring->high_water_mark_nentries = nentries;
- dest_ring->per_transfer_context = (void **)ptr;
- /* Legacy platforms that do not support cache
- * coherent DMA are unsupported */
- dest_ring->base_addr_owner_space_unaligned =
- qdf_mem_alloc_consistent(scn->qdf_dev,
- scn->qdf_dev->dev,
- (nentries *
- sizeof(struct CE_dest_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr);
- if (dest_ring->base_addr_owner_space_unaligned
- == NULL) {
- HIF_ERROR("%s: dest ring has no DMA mem",
- __func__);
- goto error_no_dma_mem;
- }
- dest_ring->base_addr_CE_space_unaligned = base_addr;
- /* Correctly initialize memory to 0 to
- * prevent garbage data crashing system
- * when download firmware
- */
- qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
- nentries * sizeof(struct CE_dest_desc) +
- CE_DESC_RING_ALIGN);
- if (dest_ring->
- base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
- 1)) {
- dest_ring->base_addr_CE_space =
- (dest_ring->
- base_addr_CE_space_unaligned +
- CE_DESC_RING_ALIGN -
- 1) & ~(CE_DESC_RING_ALIGN - 1);
- dest_ring->base_addr_owner_space =
- (void
- *)(((size_t) dest_ring->
- base_addr_owner_space_unaligned +
- CE_DESC_RING_ALIGN -
- 1) & ~(CE_DESC_RING_ALIGN - 1));
- } else {
- dest_ring->base_addr_CE_space =
- dest_ring->base_addr_CE_space_unaligned;
- dest_ring->base_addr_owner_space =
- dest_ring->
- base_addr_owner_space_unaligned;
- }
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- goto error_target_access;
- dma_addr = dest_ring->base_addr_CE_space;
- CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
- (uint32_t)(dma_addr & 0xFFFFFFFF));
- #ifdef WLAN_ENABLE_QCA6180
- {
- uint32_t tmp;
- tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
- ctrl_addr);
- tmp &= ~0x1F;
- dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
- CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
- ctrl_addr, (uint32_t)dma_addr);
- }
- #endif
- CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
- #ifdef BIG_ENDIAN_HOST
- /* Enable Dest ring byte swap for big endian host */
- CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
- #endif
- CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
- CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- goto error_target_access;
- /* epping */
- /* poll timer */
- if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
- qdf_timer_init(scn->qdf_dev,
- &CE_state->poll_timer,
- ce_poll_timeout,
- CE_state,
- QDF_TIMER_TYPE_SW);
- CE_state->timer_inited = true;
- qdf_timer_mod(&CE_state->poll_timer,
- CE_POLL_TIMEOUT);
- }
- }
- }
- /* Enable CE error interrupts */
- if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
- goto error_target_access;
- CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
- if (Q_TARGET_ACCESS_END(scn) < 0)
- goto error_target_access;
- /* update the htt_data attribute */
- ce_mark_datapath(CE_state);
- return (struct CE_handle *)CE_state;
- error_target_access:
- error_no_dma_mem:
- ce_fini((struct CE_handle *)CE_state);
- return NULL;
- }
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
- * No processing is required inside this function.
- * @ce_hdl: Cope engine handle
- * Using an assert, this function makes sure that,
- * the TX CE has been processed completely.
- *
- * This is called while dismantling CE structures. No other thread
- * should be using these structures while dismantling is occuring
- * therfore no locking is needed.
- *
- * Return: none
- */
- void
- ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
- {
- struct CE_state *ce_state = (struct CE_state *)ce_hdl;
- struct CE_ring_state *src_ring = ce_state->src_ring;
- struct hif_softc *sc = ce_state->scn;
- uint32_t sw_index, write_index;
- if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
- HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
- __func__, __LINE__);
- sw_index = src_ring->sw_index;
- write_index = src_ring->sw_index;
- /* At this point Tx CE should be clean */
- qdf_assert_always(sw_index == write_index);
- }
- }
- /**
- * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
- * @ce_hdl: Handle to CE
- *
- * These buffers are never allocated on the fly, but
- * are allocated only once during HIF start and freed
- * only once during HIF stop.
- * NOTE:
- * The assumption here is there is no in-flight DMA in progress
- * currently, so that buffers can be freed up safely.
- *
- * Return: NONE
- */
- void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
- {
- struct CE_state *ce_state = (struct CE_state *)ce_hdl;
- struct CE_ring_state *dst_ring = ce_state->dest_ring;
- qdf_nbuf_t nbuf;
- int i;
- if (!ce_state->fastpath_handler)
- return;
- /*
- * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
- * this CE is completely full: does not leave one blank space, to
- * distinguish between empty queue & full queue. So free all the
- * entries.
- */
- for (i = 0; i < dst_ring->nentries; i++) {
- nbuf = dst_ring->per_transfer_context[i];
- /*
- * The reasons for doing this check are:
- * 1) Protect against calling cleanup before allocating buffers
- * 2) In a corner case, FASTPATH_mode_on may be set, but we
- * could have a partially filled ring, because of a memory
- * allocation failure in the middle of allocating ring.
- * This check accounts for that case, checking
- * fastpath_mode_on flag or started flag would not have
- * covered that case. This is not in performance path,
- * so OK to do this.
- */
- if (nbuf)
- qdf_nbuf_free(nbuf);
- }
- }
- #else
- void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
- {
- }
- void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
- {
- }
- #endif /* WLAN_FEATURE_FASTPATH */
- void ce_fini(struct CE_handle *copyeng)
- {
- struct CE_state *CE_state = (struct CE_state *)copyeng;
- unsigned int CE_id = CE_state->id;
- struct hif_softc *scn = CE_state->scn;
- CE_state->state = CE_UNUSED;
- scn->ce_id_to_state[CE_id] = NULL;
- if (CE_state->src_ring) {
- /* Cleanup the datapath Tx ring */
- ce_h2t_tx_ce_cleanup(copyeng);
- if (CE_state->src_ring->shadow_base_unaligned)
- qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
- if (CE_state->src_ring->base_addr_owner_space_unaligned)
- qdf_mem_free_consistent(scn->qdf_dev,
- scn->qdf_dev->dev,
- (CE_state->src_ring->nentries *
- sizeof(struct CE_src_desc) +
- CE_DESC_RING_ALIGN),
- CE_state->src_ring->
- base_addr_owner_space_unaligned,
- CE_state->src_ring->
- base_addr_CE_space, 0);
- qdf_mem_free(CE_state->src_ring);
- }
- if (CE_state->dest_ring) {
- /* Cleanup the datapath Rx ring */
- ce_t2h_msg_ce_cleanup(copyeng);
- if (CE_state->dest_ring->base_addr_owner_space_unaligned)
- qdf_mem_free_consistent(scn->qdf_dev,
- scn->qdf_dev->dev,
- (CE_state->dest_ring->nentries *
- sizeof(struct CE_dest_desc) +
- CE_DESC_RING_ALIGN),
- CE_state->dest_ring->
- base_addr_owner_space_unaligned,
- CE_state->dest_ring->
- base_addr_CE_space, 0);
- qdf_mem_free(CE_state->dest_ring);
- /* epping */
- if (CE_state->timer_inited) {
- CE_state->timer_inited = false;
- qdf_timer_free(&CE_state->poll_timer);
- }
- }
- qdf_mem_free(CE_state);
- }
- void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- qdf_mem_zero(&hif_state->msg_callbacks_pending,
- sizeof(hif_state->msg_callbacks_pending));
- qdf_mem_zero(&hif_state->msg_callbacks_current,
- sizeof(hif_state->msg_callbacks_current));
- }
- /* Send the first nbytes bytes of the buffer */
- QDF_STATUS
- hif_send_head(struct hif_opaque_softc *hif_ctx,
- uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
- qdf_nbuf_t nbuf, unsigned int data_attr)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
- struct CE_handle *ce_hdl = pipe_info->ce_hdl;
- int bytes = nbytes, nfrags = 0;
- struct ce_sendlist sendlist;
- int status, i = 0;
- unsigned int mux_id = 0;
- QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
- transfer_id =
- (mux_id & MUX_ID_MASK) |
- (transfer_id & TRANSACTION_ID_MASK);
- data_attr &= DESC_DATA_FLAG_MASK;
- /*
- * The common case involves sending multiple fragments within a
- * single download (the tx descriptor and the tx frame header).
- * So, optimize for the case of multiple fragments by not even
- * checking whether it's necessary to use a sendlist.
- * The overhead of using a sendlist for a single buffer download
- * is not a big deal, since it happens rarely (for WMI messages).
- */
- ce_sendlist_init(&sendlist);
- do {
- qdf_dma_addr_t frag_paddr;
- int frag_bytes;
- frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
- frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
- /*
- * Clear the packet offset for all but the first CE desc.
- */
- if (i++ > 0)
- data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
- status = ce_sendlist_buf_add(&sendlist, frag_paddr,
- frag_bytes >
- bytes ? bytes : frag_bytes,
- qdf_nbuf_get_frag_is_wordstream
- (nbuf,
- nfrags) ? 0 :
- CE_SEND_FLAG_SWAP_DISABLE,
- data_attr);
- if (status != QDF_STATUS_SUCCESS) {
- HIF_ERROR("%s: error, frag_num %d larger than limit",
- __func__, nfrags);
- return status;
- }
- bytes -= frag_bytes;
- nfrags++;
- } while (bytes > 0);
- /* Make sure we have resources to handle this request */
- qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
- if (pipe_info->num_sends_allowed < nfrags) {
- qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
- ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
- return QDF_STATUS_E_RESOURCES;
- }
- pipe_info->num_sends_allowed -= nfrags;
- qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
- if (qdf_unlikely(ce_hdl == NULL)) {
- HIF_ERROR("%s: error CE handle is null", __func__);
- return A_ERROR;
- }
- QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
- DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
- (uint8_t *)(qdf_nbuf_data(nbuf)),
- sizeof(qdf_nbuf_data(nbuf))));
- status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
- QDF_ASSERT(status == QDF_STATUS_SUCCESS);
- return status;
- }
- void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
- int force)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- if (!force) {
- int resources;
- /*
- * Decide whether to actually poll for completions, or just
- * wait for a later chance. If there seem to be plenty of
- * resources left, then just wait, since checking involves
- * reading a CE register, which is a relatively expensive
- * operation.
- */
- resources = hif_get_free_queue_number(hif_ctx, pipe);
- /*
- * If at least 50% of the total resources are still available,
- * don't bother checking again yet.
- */
- if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
- return;
- }
- }
- #ifdef ATH_11AC_TXCOMPACT
- ce_per_engine_servicereap(scn, pipe);
- #else
- ce_per_engine_service(scn, pipe);
- #endif
- }
- uint16_t
- hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
- uint16_t rv;
- qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
- rv = pipe_info->num_sends_allowed;
- qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
- return rv;
- }
- /* Called by lower (CE) layer when a send to Target completes. */
- void
- hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
- void *transfer_context, qdf_dma_addr_t CE_data,
- unsigned int nbytes, unsigned int transfer_id,
- unsigned int sw_index, unsigned int hw_index,
- unsigned int toeplitz_hash_result)
- {
- struct HIF_CE_pipe_info *pipe_info =
- (struct HIF_CE_pipe_info *)ce_context;
- struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
- unsigned int sw_idx = sw_index, hw_idx = hw_index;
- struct hif_msg_callbacks *msg_callbacks =
- &hif_state->msg_callbacks_current;
- do {
- /*
- * The upper layer callback will be triggered
- * when last fragment is complteted.
- */
- if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
- if (scn->target_status
- == OL_TRGET_STATUS_RESET)
- qdf_nbuf_free(transfer_context);
- else
- msg_callbacks->txCompletionHandler(
- msg_callbacks->Context,
- transfer_context, transfer_id,
- toeplitz_hash_result);
- }
- qdf_spin_lock(&pipe_info->completion_freeq_lock);
- pipe_info->num_sends_allowed++;
- qdf_spin_unlock(&pipe_info->completion_freeq_lock);
- } while (ce_completed_send_next(copyeng,
- &ce_context, &transfer_context,
- &CE_data, &nbytes, &transfer_id,
- &sw_idx, &hw_idx,
- &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
- }
- /**
- * hif_ce_do_recv(): send message from copy engine to upper layers
- * @msg_callbacks: structure containing callback and callback context
- * @netbuff: skb containing message
- * @nbytes: number of bytes in the message
- * @pipe_info: used for the pipe_number info
- *
- * Checks the packet length, configures the lenght in the netbuff,
- * and calls the upper layer callback.
- *
- * return: None
- */
- static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
- qdf_nbuf_t netbuf, int nbytes,
- struct HIF_CE_pipe_info *pipe_info) {
- if (nbytes <= pipe_info->buf_sz) {
- qdf_nbuf_set_pktlen(netbuf, nbytes);
- msg_callbacks->
- rxCompletionHandler(msg_callbacks->Context,
- netbuf, pipe_info->pipe_num);
- } else {
- HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
- __func__, netbuf, nbytes);
- qdf_nbuf_free(netbuf);
- }
- }
- /* Called by lower (CE) layer when data is received from the Target. */
- void
- hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
- void *transfer_context, qdf_dma_addr_t CE_data,
- unsigned int nbytes, unsigned int transfer_id,
- unsigned int flags)
- {
- struct HIF_CE_pipe_info *pipe_info =
- (struct HIF_CE_pipe_info *)ce_context;
- struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
- struct CE_state *ce_state = (struct CE_state *) copyeng;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
- #ifdef HIF_PCI
- struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
- #endif
- struct hif_msg_callbacks *msg_callbacks =
- &hif_state->msg_callbacks_current;
- uint32_t count;
- do {
- #ifdef HIF_PCI
- hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
- #endif
- qdf_nbuf_unmap_single(scn->qdf_dev,
- (qdf_nbuf_t) transfer_context,
- QDF_DMA_FROM_DEVICE);
- atomic_inc(&pipe_info->recv_bufs_needed);
- hif_post_recv_buffers_for_pipe(pipe_info);
- if (scn->target_status == OL_TRGET_STATUS_RESET)
- qdf_nbuf_free(transfer_context);
- else
- hif_ce_do_recv(msg_callbacks, transfer_context,
- nbytes, pipe_info);
- /* Set up force_break flag if num of receices reaches
- * MAX_NUM_OF_RECEIVES */
- ce_state->receive_count++;
- count = ce_state->receive_count;
- if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
- ce_state->force_break = 1;
- break;
- }
- } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
- &CE_data, &nbytes, &transfer_id,
- &flags) == QDF_STATUS_SUCCESS);
- }
- /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
- void
- hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
- struct hif_msg_callbacks *callbacks)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
- spin_lock_init(&pcie_access_log_lock);
- #endif
- /* Save callbacks for later installation */
- qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
- sizeof(hif_state->msg_callbacks_pending));
- }
- int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
- {
- struct CE_handle *ce_diag = hif_state->ce_diag;
- int pipe_num;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
- struct hif_msg_callbacks *hif_msg_callbacks =
- &hif_state->msg_callbacks_current;
- /* daemonize("hif_compl_thread"); */
- if (scn->ce_count == 0) {
- HIF_ERROR("%s: Invalid ce_count\n", __func__);
- return -EINVAL;
- }
- if (!hif_msg_callbacks ||
- !hif_msg_callbacks->rxCompletionHandler ||
- !hif_msg_callbacks->txCompletionHandler) {
- HIF_ERROR("%s: no completion handler registered", __func__);
- return -EFAULT;
- }
- A_TARGET_ACCESS_LIKELY(scn);
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct CE_attr attr;
- struct HIF_CE_pipe_info *pipe_info;
- pipe_info = &hif_state->pipe_info[pipe_num];
- if (pipe_info->ce_hdl == ce_diag) {
- continue; /* Handle Diagnostic CE specially */
- }
- attr = host_ce_config[pipe_num];
- if (attr.src_nentries) {
- /* pipe used to send to target */
- HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
- __func__, pipe_num, pipe_info);
- ce_send_cb_register(pipe_info->ce_hdl,
- hif_pci_ce_send_done, pipe_info,
- attr.flags & CE_ATTR_DISABLE_INTR);
- pipe_info->num_sends_allowed = attr.src_nentries - 1;
- }
- if (attr.dest_nentries) {
- /* pipe used to receive from target */
- ce_recv_cb_register(pipe_info->ce_hdl,
- hif_pci_ce_recv_data, pipe_info,
- attr.flags & CE_ATTR_DISABLE_INTR);
- }
- if (attr.src_nentries)
- qdf_spinlock_create(&pipe_info->completion_freeq_lock);
- }
- A_TARGET_ACCESS_UNLIKELY(scn);
- return 0;
- }
- /*
- * Install pending msg callbacks.
- *
- * TBDXXX: This hack is needed because upper layers install msg callbacks
- * for use with HTC before BMI is done; yet this HIF implementation
- * needs to continue to use BMI msg callbacks. Really, upper layers
- * should not register HTC callbacks until AFTER BMI phase.
- */
- static void hif_msg_callbacks_install(struct hif_softc *scn)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- qdf_mem_copy(&hif_state->msg_callbacks_current,
- &hif_state->msg_callbacks_pending,
- sizeof(hif_state->msg_callbacks_pending));
- }
- void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
- uint8_t *DLPipe)
- {
- int ul_is_polled, dl_is_polled;
- (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
- ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
- }
- /**
- * hif_dump_pipe_debug_count() - Log error count
- * @scn: hif_softc pointer.
- *
- * Output the pipe error counts of each pipe to log file
- *
- * Return: N/A
- */
- void hif_dump_pipe_debug_count(struct hif_softc *scn)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- int pipe_num;
- if (hif_state == NULL) {
- HIF_ERROR("%s hif_state is NULL", __func__);
- return;
- }
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct HIF_CE_pipe_info *pipe_info;
- pipe_info = &hif_state->pipe_info[pipe_num];
- if (pipe_info->nbuf_alloc_err_count > 0 ||
- pipe_info->nbuf_dma_err_count > 0 ||
- pipe_info->nbuf_ce_enqueue_err_count)
- HIF_ERROR(
- "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
- __func__, pipe_info->pipe_num,
- atomic_read(&pipe_info->recv_bufs_needed),
- pipe_info->nbuf_alloc_err_count,
- pipe_info->nbuf_dma_err_count,
- pipe_info->nbuf_ce_enqueue_err_count);
- }
- }
- static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
- {
- struct CE_handle *ce_hdl;
- qdf_size_t buf_sz;
- struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
- QDF_STATUS ret;
- uint32_t bufs_posted = 0;
- buf_sz = pipe_info->buf_sz;
- if (buf_sz == 0) {
- /* Unused Copy Engine */
- return 0;
- }
- ce_hdl = pipe_info->ce_hdl;
- qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
- while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
- qdf_dma_addr_t CE_data; /* CE space buffer address */
- qdf_nbuf_t nbuf;
- int status;
- atomic_dec(&pipe_info->recv_bufs_needed);
- qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
- nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
- if (!nbuf) {
- qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
- pipe_info->nbuf_alloc_err_count++;
- qdf_spin_unlock_bh(
- &pipe_info->recv_bufs_needed_lock);
- HIF_ERROR(
- "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
- __func__, pipe_info->pipe_num,
- atomic_read(&pipe_info->recv_bufs_needed),
- pipe_info->nbuf_alloc_err_count);
- atomic_inc(&pipe_info->recv_bufs_needed);
- return 1;
- }
- /*
- * qdf_nbuf_peek_header(nbuf, &data, &unused);
- * CE_data = dma_map_single(dev, data, buf_sz, );
- * DMA_FROM_DEVICE);
- */
- ret =
- qdf_nbuf_map_single(scn->qdf_dev, nbuf,
- QDF_DMA_FROM_DEVICE);
- if (unlikely(ret != QDF_STATUS_SUCCESS)) {
- qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
- pipe_info->nbuf_dma_err_count++;
- qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
- HIF_ERROR(
- "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
- __func__, pipe_info->pipe_num,
- atomic_read(&pipe_info->recv_bufs_needed),
- pipe_info->nbuf_dma_err_count);
- qdf_nbuf_free(nbuf);
- atomic_inc(&pipe_info->recv_bufs_needed);
- return 1;
- }
- CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
- qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
- buf_sz, DMA_FROM_DEVICE);
- status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
- QDF_ASSERT(status == QDF_STATUS_SUCCESS);
- if (status != EOK) {
- qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
- pipe_info->nbuf_ce_enqueue_err_count++;
- qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
- HIF_ERROR(
- "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
- __func__, pipe_info->pipe_num,
- atomic_read(&pipe_info->recv_bufs_needed),
- pipe_info->nbuf_ce_enqueue_err_count);
- atomic_inc(&pipe_info->recv_bufs_needed);
- qdf_nbuf_free(nbuf);
- return 1;
- }
- qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
- bufs_posted++;
- }
- pipe_info->nbuf_alloc_err_count =
- (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
- pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
- pipe_info->nbuf_dma_err_count =
- (pipe_info->nbuf_dma_err_count > bufs_posted) ?
- pipe_info->nbuf_dma_err_count - bufs_posted : 0;
- pipe_info->nbuf_ce_enqueue_err_count =
- (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
- pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
- qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
- return 0;
- }
- /*
- * Try to post all desired receive buffers for all pipes.
- * Returns 0 if all desired buffers are posted,
- * non-zero if were were unable to completely
- * replenish receive buffers.
- */
- static int hif_post_recv_buffers(struct hif_softc *scn)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- int pipe_num, rv = 0;
- A_TARGET_ACCESS_LIKELY(scn);
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct HIF_CE_pipe_info *pipe_info;
- pipe_info = &hif_state->pipe_info[pipe_num];
- if (hif_post_recv_buffers_for_pipe(pipe_info)) {
- rv = 1;
- goto done;
- }
- }
- done:
- A_TARGET_ACCESS_UNLIKELY(scn);
- return rv;
- }
- QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- hif_msg_callbacks_install(scn);
- if (hif_completion_thread_startup(hif_state))
- return QDF_STATUS_E_FAILURE;
- /* Post buffers once to start things off. */
- (void)hif_post_recv_buffers(scn);
- hif_state->started = true;
- return QDF_STATUS_SUCCESS;
- }
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * hif_enable_fastpath() Update that we have enabled fastpath mode
- * @hif_ctx: HIF context
- *
- * For use in data path
- *
- * Retrun: void
- */
- void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- HIF_INFO("Enabling fastpath mode\n");
- scn->fastpath_mode_on = true;
- }
- /**
- * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
- * @hif_ctx: HIF Context
- *
- * For use in data path to skip HTC
- *
- * Return: bool
- */
- bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- return scn->fastpath_mode_on;
- }
- /**
- * hif_get_ce_handle - API to get CE handle for FastPath mode
- * @hif_ctx: HIF Context
- * @id: CopyEngine Id
- *
- * API to return CE handle for fastpath mode
- *
- * Return: void
- */
- void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- return scn->ce_id_to_state[id];
- }
- #endif /* WLAN_FEATURE_FASTPATH */
- void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
- {
- struct hif_softc *scn;
- struct CE_handle *ce_hdl;
- uint32_t buf_sz;
- struct HIF_CE_state *hif_state;
- qdf_nbuf_t netbuf;
- qdf_dma_addr_t CE_data;
- void *per_CE_context;
- buf_sz = pipe_info->buf_sz;
- if (buf_sz == 0) {
- /* Unused Copy Engine */
- return;
- }
- hif_state = pipe_info->HIF_CE_state;
- if (!hif_state->started) {
- return;
- }
- scn = HIF_GET_SOFTC(hif_state);
- ce_hdl = pipe_info->ce_hdl;
- if (scn->qdf_dev == NULL) {
- return;
- }
- while (ce_revoke_recv_next
- (ce_hdl, &per_CE_context, (void **)&netbuf,
- &CE_data) == QDF_STATUS_SUCCESS) {
- qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
- QDF_DMA_FROM_DEVICE);
- qdf_nbuf_free(netbuf);
- }
- }
- void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
- {
- struct CE_handle *ce_hdl;
- struct HIF_CE_state *hif_state;
- struct hif_softc *scn;
- qdf_nbuf_t netbuf;
- void *per_CE_context;
- qdf_dma_addr_t CE_data;
- unsigned int nbytes;
- unsigned int id;
- uint32_t buf_sz;
- uint32_t toeplitz_hash_result;
- buf_sz = pipe_info->buf_sz;
- if (buf_sz == 0) {
- /* Unused Copy Engine */
- return;
- }
- hif_state = pipe_info->HIF_CE_state;
- if (!hif_state->started) {
- return;
- }
- scn = HIF_GET_SOFTC(hif_state);
- ce_hdl = pipe_info->ce_hdl;
- while (ce_cancel_send_next
- (ce_hdl, &per_CE_context,
- (void **)&netbuf, &CE_data, &nbytes,
- &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
- if (netbuf != CE_SENDLIST_ITEM_CTXT) {
- /*
- * Packets enqueued by htt_h2t_ver_req_msg() and
- * htt_h2t_rx_ring_cfg_msg_ll() have already been
- * freed in htt_htc_misc_pkt_pool_free() in
- * wlantl_close(), so do not free them here again
- * by checking whether it's the endpoint
- * which they are queued in.
- */
- if (id == scn->htc_endpoint)
- return;
- /* Indicate the completion to higer
- * layer to free the buffer */
- hif_state->msg_callbacks_current.
- txCompletionHandler(hif_state->
- msg_callbacks_current.Context,
- netbuf, id, toeplitz_hash_result);
- }
- }
- }
- /*
- * Cleanup residual buffers for device shutdown:
- * buffers that were enqueued for receive
- * buffers that were to be sent
- * Note: Buffers that had completed but which were
- * not yet processed are on a completion queue. They
- * are handled when the completion thread shuts down.
- */
- void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
- {
- int pipe_num;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct HIF_CE_pipe_info *pipe_info;
- pipe_info = &hif_state->pipe_info[pipe_num];
- hif_recv_buffer_cleanup_on_pipe(pipe_info);
- hif_send_buffer_cleanup_on_pipe(pipe_info);
- }
- }
- void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- hif_buffer_cleanup(hif_state);
- }
- void hif_stop(struct hif_opaque_softc *hif_ctx)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
- int pipe_num;
- scn->hif_init_done = false;
- /*
- * At this point, asynchronous threads are stopped,
- * The Target should not DMA nor interrupt, Host code may
- * not initiate anything more. So we just need to clean
- * up Host-side state.
- */
- if (scn->athdiag_procfs_inited) {
- athdiag_procfs_remove();
- scn->athdiag_procfs_inited = false;
- }
- hif_buffer_cleanup(hif_state);
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct HIF_CE_pipe_info *pipe_info;
- pipe_info = &hif_state->pipe_info[pipe_num];
- if (pipe_info->ce_hdl) {
- ce_fini(pipe_info->ce_hdl);
- pipe_info->ce_hdl = NULL;
- pipe_info->buf_sz = 0;
- }
- }
- if (hif_state->sleep_timer_init) {
- qdf_timer_stop(&hif_state->sleep_timer);
- qdf_timer_free(&hif_state->sleep_timer);
- hif_state->sleep_timer_init = false;
- }
- hif_state->started = false;
- }
- /**
- * hif_get_target_ce_config() - get copy engine configuration
- * @target_ce_config_ret: basic copy engine configuration
- * @target_ce_config_sz_ret: size of the basic configuration in bytes
- * @target_service_to_ce_map_ret: service mapping for the copy engines
- * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
- * @target_shadow_reg_cfg_ret: shadow register configuration
- * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
- *
- * providing accessor to these values outside of this file.
- * currently these are stored in static pointers to const sections.
- * there are multiple configurations that are selected from at compile time.
- * Runtime selection would need to consider mode, target type and bus type.
- *
- * Return: return by parameter.
- */
- void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
- int *target_ce_config_sz_ret,
- struct service_to_pipe **target_service_to_ce_map_ret,
- int *target_service_to_ce_map_sz_ret,
- struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
- int *shadow_cfg_sz_ret)
- {
- *target_ce_config_ret = target_ce_config;
- *target_ce_config_sz_ret = target_ce_config_sz;
- *target_service_to_ce_map_ret = target_service_to_ce_map;
- *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
- if (target_shadow_reg_cfg_ret)
- *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
- if (shadow_cfg_sz_ret)
- *shadow_cfg_sz_ret = shadow_cfg_sz;
- }
- /**
- * hif_wlan_enable(): call the platform driver to enable wlan
- * @scn: HIF Context
- *
- * This function passes the con_mode and CE configuration to
- * platform driver to enable wlan.
- *
- * Return: linux error code
- */
- int hif_wlan_enable(struct hif_softc *scn)
- {
- struct icnss_wlan_enable_cfg cfg;
- enum icnss_driver_mode mode;
- uint32_t con_mode = hif_get_conparam(scn);
- hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
- &cfg.num_ce_tgt_cfg,
- (struct service_to_pipe **)&cfg.ce_svc_cfg,
- &cfg.num_ce_svc_pipe_cfg,
- (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
- &cfg.num_shadow_reg_cfg);
- /* translate from structure size to array size */
- cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
- cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
- cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
- if (QDF_GLOBAL_FTM_MODE == con_mode)
- mode = ICNSS_FTM;
- else if (WLAN_IS_EPPING_ENABLED(con_mode))
- mode = ICNSS_EPPING;
- else
- mode = ICNSS_MISSION;
- if (BYPASS_QMI)
- return 0;
- else
- return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
- }
- /**
- * hif_ce_prepare_config() - load the correct static tables.
- * @scn: hif context
- *
- * Epping uses different static attribute tables than mission mode.
- */
- void hif_ce_prepare_config(struct hif_softc *scn)
- {
- uint32_t mode = hif_get_conparam(scn);
- /* if epping is enabled we need to use the epping configuration. */
- if (WLAN_IS_EPPING_ENABLED(mode)) {
- if (WLAN_IS_EPPING_IRQ(mode))
- host_ce_config = host_ce_config_wlan_epping_irq;
- else
- host_ce_config = host_ce_config_wlan_epping_poll;
- target_ce_config = target_ce_config_wlan_epping;
- target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
- target_service_to_ce_map =
- target_service_to_ce_map_wlan_epping;
- target_service_to_ce_map_sz =
- sizeof(target_service_to_ce_map_wlan_epping);
- }
- }
- /**
- * hif_ce_open() - do ce specific allocations
- * @hif_sc: pointer to hif context
- *
- * return: 0 for success or QDF_STATUS_E_NOMEM
- */
- QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
- qdf_spinlock_create(&hif_state->keep_awake_lock);
- return QDF_STATUS_SUCCESS;
- }
- /**
- * hif_ce_close() - do ce specific free
- * @hif_sc: pointer to hif context
- */
- void hif_ce_close(struct hif_softc *hif_sc)
- {
- }
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
- * @scn: Handle to HIF context
- *
- * Return: true if fastpath is enabled else false.
- */
- bool ce_is_fastpath_enabled(struct hif_opaque_softc *hif_hdl)
- {
- return HIF_GET_SOFTC(hif_hdl)->fastpath_mode_on;
- }
- /**
- * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
- * fastpath is enabled.
- * @ce_state: handle to copy engine
- *
- * Return: true if fastpath handler is registered for datapath CE.
- */
- bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
- {
- if (ce_state->fastpath_handler)
- return true;
- else
- return false;
- }
- /**
- * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
- * @scn: HIF handle
- *
- * Datapath Rx CEs are special case, where we reuse all the message buffers.
- * Hence we have to post all the entries in the pipe, even, in the beginning
- * unlike for other CE pipes where one less than dest_nentries are filled in
- * the beginning.
- *
- * Return: None
- */
- void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *hif_hdl)
- {
- int pipe_num;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- if (scn->fastpath_mode_on == false)
- return;
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct HIF_CE_pipe_info *pipe_info =
- &hif_state->pipe_info[pipe_num];
- struct CE_state *ce_state =
- scn->ce_id_to_state[pipe_info->pipe_num];
- if (ce_state->htt_rx_data)
- atomic_inc(&pipe_info->recv_bufs_needed);
- }
- }
- #else
- bool ce_is_fastpath_enabled(struct hif_opaque_softc *scn)
- {
- return false;
- }
- void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *scn)
- {
- }
- bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
- {
- return false;
- }
- #endif /* WLAN_FEATURE_FASTPATH */
- /**
- * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
- * @hif_sc: hif context
- *
- * uses state variables to support cleaning up when hif_config_ce fails.
- */
- void hif_unconfig_ce(struct hif_softc *hif_sc)
- {
- int pipe_num;
- struct HIF_CE_pipe_info *pipe_info;
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
- for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
- pipe_info = &hif_state->pipe_info[pipe_num];
- if (pipe_info->ce_hdl) {
- ce_unregister_irq(hif_state, (1 << pipe_num));
- hif_sc->request_irq_done = false;
- ce_fini(pipe_info->ce_hdl);
- pipe_info->ce_hdl = NULL;
- pipe_info->buf_sz = 0;
- }
- }
- if (hif_sc->athdiag_procfs_inited) {
- athdiag_procfs_remove();
- hif_sc->athdiag_procfs_inited = false;
- }
- }
- #ifdef CONFIG_BYPASS_QMI
- #define FW_SHARED_MEM (2 * 1024 * 1024)
- /**
- * hif_post_static_buf_to_target() - post static buffer to WLAN FW
- * @scn: pointer to HIF structure
- *
- * WLAN FW needs 2MB memory from DDR when QMI is disabled.
- *
- * Return: void
- */
- static void hif_post_static_buf_to_target(struct hif_softc *scn)
- {
- uint32_t CE_data;
- uint8_t *g_fw_mem;
- uint32_t phys_addr;
- g_fw_mem = kzalloc(FW_SHARED_MEM, GFP_KERNEL);
- CE_data = dma_map_single(scn->cdf_dev->dev, g_fw_mem,
- FW_SHARED_MEM, CDF_DMA_FROM_DEVICE);
- HIF_TRACE("g_fw_mem %p physical 0x%x\n", g_fw_mem, CE_data);
- if (dma_mapping_error(scn->cdf_dev->dev, CE_data)) {
- pr_err("DMA map failed\n");
- return;
- }
- phys_addr = virt_to_phys((scn->mem + BYPASS_QMI_TEMP_REGISTER));
- hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, CE_data);
- HIF_TRACE("Write phy address 0x%x into scratch reg %p phy add 0x%x",
- CE_data, (scn->mem + BYPASS_QMI_TEMP_REGISTER), phys_addr);
- }
- #else
- static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
- {
- return;
- }
- #endif
- /**
- * hif_config_ce() - configure copy engines
- * @scn: hif context
- *
- * Prepares fw, copy engine hardware and host sw according
- * to the attributes selected by hif_ce_prepare_config.
- *
- * also calls athdiag_procfs_init
- *
- * return: 0 for success nonzero for failure.
- */
- int hif_config_ce(struct hif_softc *scn)
- {
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
- struct HIF_CE_pipe_info *pipe_info;
- int pipe_num;
- #ifdef ADRASTEA_SHADOW_REGISTERS
- int i;
- #endif
- QDF_STATUS rv = QDF_STATUS_SUCCESS;
- scn->notice_send = true;
- hif_post_static_buf_to_target(scn);
- hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
- hif_config_rri_on_ddr(scn);
- /* During CE initializtion */
- scn->ce_count = HOST_CE_COUNT;
- for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
- struct CE_attr *attr;
- pipe_info = &hif_state->pipe_info[pipe_num];
- pipe_info->pipe_num = pipe_num;
- pipe_info->HIF_CE_state = hif_state;
- attr = &host_ce_config[pipe_num];
- pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
- QDF_ASSERT(pipe_info->ce_hdl != NULL);
- if (pipe_info->ce_hdl == NULL) {
- rv = QDF_STATUS_E_FAILURE;
- A_TARGET_ACCESS_UNLIKELY(scn);
- goto err;
- }
- if (pipe_num == DIAG_CE_ID) {
- /* Reserve the ultimate CE for
- * Diagnostic Window support */
- hif_state->ce_diag =
- hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
- continue;
- }
- pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
- qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
- if (attr->dest_nentries > 0) {
- atomic_set(&pipe_info->recv_bufs_needed,
- init_buffer_count(attr->dest_nentries - 1));
- } else {
- atomic_set(&pipe_info->recv_bufs_needed, 0);
- }
- ce_tasklet_init(hif_state, (1 << pipe_num));
- ce_register_irq(hif_state, (1 << pipe_num));
- scn->request_irq_done = true;
- }
- if (athdiag_procfs_init(scn) != 0) {
- A_TARGET_ACCESS_UNLIKELY(scn);
- goto err;
- }
- scn->athdiag_procfs_inited = true;
- HIF_INFO_MED("%s: ce_init done", __func__);
- init_tasklet_workers(hif_hdl);
- HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
- #ifdef ADRASTEA_SHADOW_REGISTERS
- HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
- for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
- HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
- __func__, i,
- (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
- }
- #endif
- return rv != QDF_STATUS_SUCCESS;
- err:
- /* Failure, so clean up */
- hif_unconfig_ce(scn);
- HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
- return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
- }
- #ifdef WLAN_FEATURE_FASTPATH
- /**
- * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
- * @handler: Callback funtcion
- * @context: handle for callback function
- *
- * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
- */
- int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
- {
- struct hif_softc *scn =
- (struct hif_softc *)cds_get_context(QDF_MODULE_ID_HIF);
- struct CE_state *ce_state;
- int i;
- QDF_ASSERT(scn != NULL);
- if (!scn->fastpath_mode_on) {
- HIF_WARN("Fastpath mode disabled\n");
- return QDF_STATUS_E_FAILURE;
- }
- for (i = 0; i < CE_COUNT_MAX; i++) {
- ce_state = scn->ce_id_to_state[i];
- if (ce_state->htt_rx_data) {
- ce_state->fastpath_handler = handler;
- ce_state->context = context;
- }
- }
- return QDF_STATUS_SUCCESS;
- }
- #else
- int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
- {
- return QDF_STATUS_SUCCESS;
- }
- #endif
- #ifdef IPA_OFFLOAD
- /**
- * hif_ipa_get_ce_resource() - get uc resource on hif
- * @scn: bus context
- * @ce_sr_base_paddr: copyengine source ring base physical address
- * @ce_sr_ring_size: copyengine source ring size
- * @ce_reg_paddr: copyengine register physical address
- *
- * IPA micro controller data path offload feature enabled,
- * HIF should release copy engine related resource information to IPA UC
- * IPA UC will access hardware resource with released information
- *
- * Return: None
- */
- void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
- qdf_dma_addr_t *ce_sr_base_paddr,
- uint32_t *ce_sr_ring_size,
- qdf_dma_addr_t *ce_reg_paddr)
- {
- struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- struct HIF_CE_pipe_info *pipe_info =
- &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
- struct CE_handle *ce_hdl = pipe_info->ce_hdl;
- ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
- ce_reg_paddr);
- return;
- }
- #endif /* IPA_OFFLOAD */
- #ifdef ADRASTEA_SHADOW_REGISTERS
- /*
- Current shadow register config
- -----------------------------------------------------------
- Shadow Register | CE | src/dst write index
- -----------------------------------------------------------
- 0 | 0 | src
- 1 No Config - Doesn't point to anything
- 2 No Config - Doesn't point to anything
- 3 | 3 | src
- 4 | 4 | src
- 5 | 5 | src
- 6 No Config - Doesn't point to anything
- 7 | 7 | src
- 8 No Config - Doesn't point to anything
- 9 No Config - Doesn't point to anything
- 10 No Config - Doesn't point to anything
- 11 No Config - Doesn't point to anything
- -----------------------------------------------------------
- 12 No Config - Doesn't point to anything
- 13 | 1 | dst
- 14 | 2 | dst
- 15 No Config - Doesn't point to anything
- 16 No Config - Doesn't point to anything
- 17 No Config - Doesn't point to anything
- 18 No Config - Doesn't point to anything
- 19 | 7 | dst
- 20 | 8 | dst
- 21 No Config - Doesn't point to anything
- 22 No Config - Doesn't point to anything
- 23 No Config - Doesn't point to anything
- -----------------------------------------------------------
- ToDo - Move shadow register config to following in the future
- This helps free up a block of shadow registers towards the end.
- Can be used for other purposes
- -----------------------------------------------------------
- Shadow Register | CE | src/dst write index
- -----------------------------------------------------------
- 0 | 0 | src
- 1 | 3 | src
- 2 | 4 | src
- 3 | 5 | src
- 4 | 7 | src
- -----------------------------------------------------------
- 5 | 1 | dst
- 6 | 2 | dst
- 7 | 7 | dst
- 8 | 8 | dst
- -----------------------------------------------------------
- 9 No Config - Doesn't point to anything
- 12 No Config - Doesn't point to anything
- 13 No Config - Doesn't point to anything
- 14 No Config - Doesn't point to anything
- 15 No Config - Doesn't point to anything
- 16 No Config - Doesn't point to anything
- 17 No Config - Doesn't point to anything
- 18 No Config - Doesn't point to anything
- 19 No Config - Doesn't point to anything
- 20 No Config - Doesn't point to anything
- 21 No Config - Doesn't point to anything
- 22 No Config - Doesn't point to anything
- 23 No Config - Doesn't point to anything
- -----------------------------------------------------------
- */
- u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
- {
- u32 addr = 0;
- switch (COPY_ENGINE_ID(ctrl_addr)) {
- case 0:
- addr = SHADOW_VALUE0;
- break;
- case 3:
- addr = SHADOW_VALUE3;
- break;
- case 4:
- addr = SHADOW_VALUE4;
- break;
- case 5:
- addr = SHADOW_VALUE5;
- break;
- case 7:
- addr = SHADOW_VALUE7;
- break;
- default:
- HIF_ERROR("invalid CE ctrl_addr\n");
- QDF_ASSERT(0);
- }
- return addr;
- }
- u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
- {
- u32 addr = 0;
- switch (COPY_ENGINE_ID(ctrl_addr)) {
- case 1:
- addr = SHADOW_VALUE13;
- break;
- case 2:
- addr = SHADOW_VALUE14;
- break;
- case 7:
- addr = SHADOW_VALUE19;
- break;
- case 8:
- addr = SHADOW_VALUE20;
- break;
- default:
- HIF_ERROR("invalid CE ctrl_addr\n");
- QDF_ASSERT(0);
- }
- return addr;
- }
- #endif
- #if defined(FEATURE_LRO)
- /**
- * ce_lro_flush_cb_register() - register the LRO flush
- * callback
- * @scn: HIF context
- * @handler: callback function
- * @data: opaque data pointer to be passed back
- *
- * Store the LRO flush callback provided
- *
- * Return: none
- */
- void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
- void (handler)(void *), void *data)
- {
- int i;
- struct CE_state *ce_state;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
- QDF_ASSERT(scn != NULL);
- for (i = 0; i < CE_COUNT_MAX; i++) {
- ce_state = scn->ce_id_to_state[i];
- if (ce_state->htt_rx_data) {
- ce_state->lro_flush_cb = handler;
- ce_state->lro_data = data;
- }
- }
- }
- /**
- * ce_lro_flush_cb_deregister() - deregister the LRO flush
- * callback
- * @scn: HIF context
- *
- * Remove the LRO flush callback
- *
- * Return: none
- */
- void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
- {
- int i;
- struct CE_state *ce_state;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
- QDF_ASSERT(scn != NULL);
- for (i = 0; i < CE_COUNT_MAX; i++) {
- ce_state = scn->ce_id_to_state[i];
- if (ce_state->htt_rx_data) {
- ce_state->lro_flush_cb = NULL;
- ce_state->lro_data = NULL;
- }
- }
- }
- #endif
- /**
- * hif_map_service_to_pipe() - returns the ce ids pertaining to
- * this service
- * @scn: hif_softc pointer.
- * @svc_id: Service ID for which the mapping is needed.
- * @ul_pipe: address of the container in which ul pipe is returned.
- * @dl_pipe: address of the container in which dl pipe is returned.
- * @ul_is_polled: address of the container in which a bool
- * indicating if the UL CE for this service
- * is polled is returned.
- * @dl_is_polled: address of the container in which a bool
- * indicating if the DL CE for this service
- * is polled is returned.
- *
- * Return: Indicates whether this operation was successful.
- */
- int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
- uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
- int *dl_is_polled)
- {
- int status = QDF_STATUS_SUCCESS;
- unsigned int i;
- struct service_to_pipe element;
- struct service_to_pipe *tgt_svc_map_to_use;
- size_t sz_tgt_svc_map_to_use;
- struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
- uint32_t mode = hif_get_conparam(scn);
- if (WLAN_IS_EPPING_ENABLED(mode)) {
- tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
- sz_tgt_svc_map_to_use =
- sizeof(target_service_to_ce_map_wlan_epping);
- } else {
- tgt_svc_map_to_use = target_service_to_ce_map_wlan;
- sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
- }
- *dl_is_polled = 0; /* polling for received messages not supported */
- for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
- memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
- if (element.service_id == svc_id) {
- if (element.pipedir == PIPEDIR_OUT)
- *ul_pipe = element.pipenum;
- else if (element.pipedir == PIPEDIR_IN)
- *dl_pipe = element.pipenum;
- }
- }
- *ul_is_polled =
- (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
- return status;
- }
- #ifdef SHADOW_REG_DEBUG
- inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
- uint32_t CE_ctrl_addr)
- {
- uint32_t read_from_hw, srri_from_ddr = 0;
- read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
- srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
- if (read_from_hw != srri_from_ddr) {
- HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
- srri_from_ddr, read_from_hw,
- CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
- QDF_ASSERT(0);
- }
- return srri_from_ddr;
- }
- inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
- uint32_t CE_ctrl_addr)
- {
- uint32_t read_from_hw, drri_from_ddr = 0;
- read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
- drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
- if (read_from_hw != drri_from_ddr) {
- HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
- drri_from_ddr, read_from_hw,
- CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
- QDF_ASSERT(0);
- }
- return drri_from_ddr;
- }
- #endif
- #ifdef ADRASTEA_RRI_ON_DDR
- /**
- * hif_get_src_ring_read_index(): Called to get the SRRI
- *
- * @scn: hif_softc pointer
- * @CE_ctrl_addr: base address of the CE whose RRI is to be read
- *
- * This function returns the SRRI to the caller. For CEs that
- * dont have interrupts enabled, we look at the DDR based SRRI
- *
- * Return: SRRI
- */
- inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
- uint32_t CE_ctrl_addr)
- {
- struct CE_attr attr;
- attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
- if (attr.flags & CE_ATTR_DISABLE_INTR)
- return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
- else
- return A_TARGET_READ(scn,
- (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
- }
- /**
- * hif_get_dst_ring_read_index(): Called to get the DRRI
- *
- * @scn: hif_softc pointer
- * @CE_ctrl_addr: base address of the CE whose RRI is to be read
- *
- * This function returns the DRRI to the caller. For CEs that
- * dont have interrupts enabled, we look at the DDR based DRRI
- *
- * Return: DRRI
- */
- inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
- uint32_t CE_ctrl_addr)
- {
- struct CE_attr attr;
- attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
- if (attr.flags & CE_ATTR_DISABLE_INTR)
- return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
- else
- return A_TARGET_READ(scn,
- (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
- }
- /**
- * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
- *
- * @scn: hif_softc pointer
- *
- * This function allocates non cached memory on ddr and sends
- * the physical address of this memory to the CE hardware. The
- * hardware updates the RRI on this particular location.
- *
- * Return: None
- */
- static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
- {
- unsigned int i;
- qdf_dma_addr_t paddr_rri_on_ddr;
- uint32_t high_paddr, low_paddr;
- scn->vaddr_rri_on_ddr =
- (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
- scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
- &paddr_rri_on_ddr);
- low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
- high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
- HIF_ERROR("%s using srri and drri from DDR\n", __func__);
- WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
- WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
- for (i = 0; i < CE_COUNT; i++)
- CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
- qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
- return;
- }
- #else
- /**
- * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
- *
- * @scn: hif_softc pointer
- *
- * This is a dummy implementation for platforms that don't
- * support this functionality.
- *
- * Return: None
- */
- static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
- {
- return;
- }
- #endif
- /**
- * hif_dump_ce_registers() - dump ce registers
- * @scn: hif_opaque_softc pointer.
- *
- * Output the copy engine registers
- *
- * Return: 0 for success or error code
- */
- int hif_dump_ce_registers(struct hif_softc *scn)
- {
- struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
- uint32_t ce_reg_address = CE0_BASE_ADDRESS;
- uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
- uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
- uint16_t i;
- QDF_STATUS status;
- for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
- status = hif_diag_read_mem(hif_hdl, ce_reg_address,
- (uint8_t *) &ce_reg_values[i][0],
- ce_reg_word_size * sizeof(uint32_t));
- if (status != QDF_STATUS_SUCCESS) {
- HIF_ERROR("Dumping CE register failed!");
- return -EACCES;
- }
- HIF_ERROR("CE%d Registers:", i);
- qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
- (uint8_t *) &ce_reg_values[i][0],
- ce_reg_word_size * sizeof(uint32_t));
- }
- return 0;
- }
|