ce_main.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463
  1. /*
  2. * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include "targcfg.h"
  27. #include "qdf_lock.h"
  28. #include "qdf_status.h"
  29. #include "qdf_status.h"
  30. #include <qdf_atomic.h> /* qdf_atomic_read */
  31. #include <targaddrs.h>
  32. #include <bmi_msg.h>
  33. #include "hif_io32.h"
  34. #include <hif.h>
  35. #include "regtable.h"
  36. #define ATH_MODULE_NAME hif
  37. #include <a_debug.h>
  38. #include "hif_main.h"
  39. #include "ce_api.h"
  40. #include "qdf_trace.h"
  41. #ifdef CONFIG_CNSS
  42. #include <net/cnss.h>
  43. #endif
  44. #include "epping_main.h"
  45. #include "hif_debug.h"
  46. #include "ce_internal.h"
  47. #include "ce_reg.h"
  48. #include "ce_assignment.h"
  49. #include "ce_tasklet.h"
  50. #include "platform_icnss.h"
  51. #include "qwlan_version.h"
  52. #include <cds_api.h>
  53. #define CE_POLL_TIMEOUT 10 /* ms */
  54. /* Forward references */
  55. static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
  56. /*
  57. * Fix EV118783, poll to check whether a BMI response comes
  58. * other than waiting for the interruption which may be lost.
  59. */
  60. /* #define BMI_RSP_POLLING */
  61. #define BMI_RSP_TO_MILLISEC 1000
  62. #ifdef CONFIG_BYPASS_QMI
  63. #define BYPASS_QMI 1
  64. #else
  65. #define BYPASS_QMI 0
  66. #endif
  67. static int hif_post_recv_buffers(struct hif_softc *scn);
  68. static void hif_config_rri_on_ddr(struct hif_softc *scn);
  69. static void ce_poll_timeout(void *arg)
  70. {
  71. struct CE_state *CE_state = (struct CE_state *)arg;
  72. if (CE_state->timer_inited) {
  73. ce_per_engine_service(CE_state->scn, CE_state->id);
  74. qdf_timer_mod(&CE_state->poll_timer, CE_POLL_TIMEOUT);
  75. }
  76. }
  77. static unsigned int roundup_pwr2(unsigned int n)
  78. {
  79. int i;
  80. unsigned int test_pwr2;
  81. if (!(n & (n - 1)))
  82. return n; /* already a power of 2 */
  83. test_pwr2 = 4;
  84. for (i = 0; i < 29; i++) {
  85. if (test_pwr2 > n)
  86. return test_pwr2;
  87. test_pwr2 = test_pwr2 << 1;
  88. }
  89. QDF_ASSERT(0); /* n too large */
  90. return 0;
  91. }
  92. #define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
  93. #define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
  94. static struct shadow_reg_cfg target_shadow_reg_cfg_map[] = {
  95. { 0, ADRASTEA_SRC_WR_INDEX_OFFSET},
  96. { 3, ADRASTEA_SRC_WR_INDEX_OFFSET},
  97. { 4, ADRASTEA_SRC_WR_INDEX_OFFSET},
  98. { 5, ADRASTEA_SRC_WR_INDEX_OFFSET},
  99. { 7, ADRASTEA_SRC_WR_INDEX_OFFSET},
  100. { 1, ADRASTEA_DST_WR_INDEX_OFFSET},
  101. { 2, ADRASTEA_DST_WR_INDEX_OFFSET},
  102. { 7, ADRASTEA_DST_WR_INDEX_OFFSET},
  103. { 8, ADRASTEA_DST_WR_INDEX_OFFSET},
  104. };
  105. /* CE_PCI TABLE */
  106. /*
  107. * NOTE: the table below is out of date, though still a useful reference.
  108. * Refer to target_service_to_ce_map and hif_map_service_to_pipe for the actual
  109. * mapping of HTC services to HIF pipes.
  110. */
  111. /*
  112. * This authoritative table defines Copy Engine configuration and the mapping
  113. * of services/endpoints to CEs. A subset of this information is passed to
  114. * the Target during startup as a prerequisite to entering BMI phase.
  115. * See:
  116. * target_service_to_ce_map - Target-side mapping
  117. * hif_map_service_to_pipe - Host-side mapping
  118. * target_ce_config - Target-side configuration
  119. * host_ce_config - Host-side configuration
  120. ============================================================================
  121. Purpose | Service / Endpoint | CE | Dire | Xfer | Xfer
  122. | | | ctio | Size | Frequency
  123. | | | n | |
  124. ============================================================================
  125. tx | HTT_DATA (downlink) | CE 0 | h->t | medium - | very frequent
  126. descriptor | | | | O(100B) | and regular
  127. download | | | | |
  128. ----------------------------------------------------------------------------
  129. rx | HTT_DATA (uplink) | CE 1 | t->h | small - | frequent and
  130. indication | | | | O(10B) | regular
  131. upload | | | | |
  132. ----------------------------------------------------------------------------
  133. MSDU | DATA_BK (uplink) | CE 2 | t->h | large - | rare
  134. upload | | | | O(1000B) | (frequent
  135. e.g. noise | | | | | during IP1.0
  136. packets | | | | | testing)
  137. ----------------------------------------------------------------------------
  138. MSDU | DATA_BK (downlink) | CE 3 | h->t | large - | very rare
  139. download | | | | O(1000B) | (frequent
  140. e.g. | | | | | during IP1.0
  141. misdirecte | | | | | testing)
  142. d EAPOL | | | | |
  143. packets | | | | |
  144. ----------------------------------------------------------------------------
  145. n/a | DATA_BE, DATA_VI | CE 2 | t->h | | never(?)
  146. | DATA_VO (uplink) | | | |
  147. ----------------------------------------------------------------------------
  148. n/a | DATA_BE, DATA_VI | CE 3 | h->t | | never(?)
  149. | DATA_VO (downlink) | | | |
  150. ----------------------------------------------------------------------------
  151. WMI events | WMI_CONTROL (uplink) | CE 4 | t->h | medium - | infrequent
  152. | | | | O(100B) |
  153. ----------------------------------------------------------------------------
  154. WMI | WMI_CONTROL | CE 5 | h->t | medium - | infrequent
  155. messages | (downlink) | | | O(100B) |
  156. | | | | |
  157. ----------------------------------------------------------------------------
  158. n/a | HTC_CTRL_RSVD, | CE 1 | t->h | | never(?)
  159. | HTC_RAW_STREAMS | | | |
  160. | (uplink) | | | |
  161. ----------------------------------------------------------------------------
  162. n/a | HTC_CTRL_RSVD, | CE 0 | h->t | | never(?)
  163. | HTC_RAW_STREAMS | | | |
  164. | (downlink) | | | |
  165. ----------------------------------------------------------------------------
  166. diag | none (raw CE) | CE 7 | t<>h | 4 | Diag Window
  167. | | | | | infrequent
  168. ============================================================================
  169. */
  170. /*
  171. * Map from service/endpoint to Copy Engine.
  172. * This table is derived from the CE_PCI TABLE, above.
  173. * It is passed to the Target at startup for use by firmware.
  174. */
  175. static struct service_to_pipe target_service_to_ce_map_wlan[] = {
  176. {
  177. WMI_DATA_VO_SVC,
  178. PIPEDIR_OUT, /* out = UL = host -> target */
  179. 3,
  180. },
  181. {
  182. WMI_DATA_VO_SVC,
  183. PIPEDIR_IN, /* in = DL = target -> host */
  184. 2,
  185. },
  186. {
  187. WMI_DATA_BK_SVC,
  188. PIPEDIR_OUT, /* out = UL = host -> target */
  189. 3,
  190. },
  191. {
  192. WMI_DATA_BK_SVC,
  193. PIPEDIR_IN, /* in = DL = target -> host */
  194. 2,
  195. },
  196. {
  197. WMI_DATA_BE_SVC,
  198. PIPEDIR_OUT, /* out = UL = host -> target */
  199. 3,
  200. },
  201. {
  202. WMI_DATA_BE_SVC,
  203. PIPEDIR_IN, /* in = DL = target -> host */
  204. 2,
  205. },
  206. {
  207. WMI_DATA_VI_SVC,
  208. PIPEDIR_OUT, /* out = UL = host -> target */
  209. 3,
  210. },
  211. {
  212. WMI_DATA_VI_SVC,
  213. PIPEDIR_IN, /* in = DL = target -> host */
  214. 2,
  215. },
  216. {
  217. WMI_CONTROL_SVC,
  218. PIPEDIR_OUT, /* out = UL = host -> target */
  219. 3,
  220. },
  221. {
  222. WMI_CONTROL_SVC,
  223. PIPEDIR_IN, /* in = DL = target -> host */
  224. 2,
  225. },
  226. {
  227. HTC_CTRL_RSVD_SVC,
  228. PIPEDIR_OUT, /* out = UL = host -> target */
  229. 0, /* could be moved to 3 (share with WMI) */
  230. },
  231. {
  232. HTC_CTRL_RSVD_SVC,
  233. PIPEDIR_IN, /* in = DL = target -> host */
  234. 2,
  235. },
  236. {
  237. HTC_RAW_STREAMS_SVC, /* not currently used */
  238. PIPEDIR_OUT, /* out = UL = host -> target */
  239. 0,
  240. },
  241. {
  242. HTC_RAW_STREAMS_SVC, /* not currently used */
  243. PIPEDIR_IN, /* in = DL = target -> host */
  244. 2,
  245. },
  246. {
  247. HTT_DATA_MSG_SVC,
  248. PIPEDIR_OUT, /* out = UL = host -> target */
  249. 4,
  250. },
  251. {
  252. HTT_DATA_MSG_SVC,
  253. PIPEDIR_IN, /* in = DL = target -> host */
  254. 1,
  255. },
  256. {
  257. WDI_IPA_TX_SVC,
  258. PIPEDIR_OUT, /* in = DL = target -> host */
  259. 5,
  260. },
  261. /* (Additions here) */
  262. { /* Must be last */
  263. 0,
  264. 0,
  265. 0,
  266. },
  267. };
  268. static struct service_to_pipe *target_service_to_ce_map =
  269. target_service_to_ce_map_wlan;
  270. static int target_service_to_ce_map_sz = sizeof(target_service_to_ce_map_wlan);
  271. static struct shadow_reg_cfg *target_shadow_reg_cfg = target_shadow_reg_cfg_map;
  272. static int shadow_cfg_sz = sizeof(target_shadow_reg_cfg_map);
  273. static struct service_to_pipe target_service_to_ce_map_wlan_epping[] = {
  274. {WMI_DATA_VO_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
  275. {WMI_DATA_VO_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
  276. {WMI_DATA_BK_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
  277. {WMI_DATA_BK_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
  278. {WMI_DATA_BE_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
  279. {WMI_DATA_BE_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
  280. {WMI_DATA_VI_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
  281. {WMI_DATA_VI_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
  282. {WMI_CONTROL_SVC, PIPEDIR_OUT, 3,}, /* out = UL = host -> target */
  283. {WMI_CONTROL_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
  284. {HTC_CTRL_RSVD_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
  285. {HTC_CTRL_RSVD_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
  286. {HTC_RAW_STREAMS_SVC, PIPEDIR_OUT, 0,}, /* out = UL = host -> target */
  287. {HTC_RAW_STREAMS_SVC, PIPEDIR_IN, 2,}, /* in = DL = target -> host */
  288. {HTT_DATA_MSG_SVC, PIPEDIR_OUT, 4,}, /* out = UL = host -> target */
  289. {HTT_DATA_MSG_SVC, PIPEDIR_IN, 1,}, /* in = DL = target -> host */
  290. {0, 0, 0,}, /* Must be last */
  291. };
  292. /**
  293. * ce_mark_datapath() - marks the ce_state->htt_rx_data accordingly
  294. * @ce_state : pointer to the state context of the CE
  295. *
  296. * Description:
  297. * Sets htt_rx_data attribute of the state structure if the
  298. * CE serves one of the HTT DATA services.
  299. *
  300. * Return:
  301. * false (attribute set to false)
  302. * true (attribute set to true);
  303. */
  304. bool ce_mark_datapath(struct CE_state *ce_state)
  305. {
  306. struct service_to_pipe *svc_map;
  307. size_t map_sz;
  308. int i;
  309. bool rc = false;
  310. if (ce_state != NULL) {
  311. if (WLAN_IS_EPPING_ENABLED(hif_get_conparam(ce_state->scn))) {
  312. svc_map = target_service_to_ce_map_wlan_epping;
  313. map_sz = sizeof(target_service_to_ce_map_wlan_epping) /
  314. sizeof(struct service_to_pipe);
  315. } else {
  316. svc_map = target_service_to_ce_map_wlan;
  317. map_sz = sizeof(target_service_to_ce_map_wlan) /
  318. sizeof(struct service_to_pipe);
  319. }
  320. for (i = 0; i < map_sz; i++) {
  321. if ((svc_map[i].pipenum == ce_state->id) &&
  322. ((svc_map[i].service_id == HTT_DATA_MSG_SVC) ||
  323. (svc_map[i].service_id == HTT_DATA2_MSG_SVC) ||
  324. (svc_map[i].service_id == HTT_DATA3_MSG_SVC))) {
  325. /* HTT CEs are unidirectional */
  326. if (svc_map[i].pipedir == PIPEDIR_IN)
  327. ce_state->htt_rx_data = true;
  328. else
  329. ce_state->htt_tx_data = true;
  330. rc = true;
  331. }
  332. }
  333. }
  334. return rc;
  335. }
  336. /*
  337. * Initialize a Copy Engine based on caller-supplied attributes.
  338. * This may be called once to initialize both source and destination
  339. * rings or it may be called twice for separate source and destination
  340. * initialization. It may be that only one side or the other is
  341. * initialized by software/firmware.
  342. *
  343. * This should be called durring the initialization sequence before
  344. * interupts are enabled, so we don't have to worry about thread safety.
  345. */
  346. struct CE_handle *ce_init(struct hif_softc *scn,
  347. unsigned int CE_id, struct CE_attr *attr)
  348. {
  349. struct CE_state *CE_state;
  350. uint32_t ctrl_addr;
  351. unsigned int nentries;
  352. qdf_dma_addr_t base_addr;
  353. bool malloc_CE_state = false;
  354. bool malloc_src_ring = false;
  355. QDF_ASSERT(CE_id < scn->ce_count);
  356. ctrl_addr = CE_BASE_ADDRESS(CE_id);
  357. CE_state = scn->ce_id_to_state[CE_id];
  358. if (!CE_state) {
  359. CE_state =
  360. (struct CE_state *)qdf_mem_malloc(sizeof(*CE_state));
  361. if (!CE_state) {
  362. HIF_ERROR("%s: CE_state has no mem", __func__);
  363. return NULL;
  364. }
  365. malloc_CE_state = true;
  366. qdf_mem_zero(CE_state, sizeof(*CE_state));
  367. scn->ce_id_to_state[CE_id] = CE_state;
  368. qdf_spinlock_create(&CE_state->ce_index_lock);
  369. CE_state->id = CE_id;
  370. CE_state->ctrl_addr = ctrl_addr;
  371. CE_state->state = CE_RUNNING;
  372. CE_state->attr_flags = attr->flags;
  373. }
  374. CE_state->scn = scn;
  375. qdf_atomic_init(&CE_state->rx_pending);
  376. if (attr == NULL) {
  377. /* Already initialized; caller wants the handle */
  378. return (struct CE_handle *)CE_state;
  379. }
  380. #ifdef ADRASTEA_SHADOW_REGISTERS
  381. HIF_ERROR("%s: Using Shadow Registers instead of CE Registers\n",
  382. __func__);
  383. #endif
  384. if (CE_state->src_sz_max)
  385. QDF_ASSERT(CE_state->src_sz_max == attr->src_sz_max);
  386. else
  387. CE_state->src_sz_max = attr->src_sz_max;
  388. ce_init_ce_desc_event_log(CE_id,
  389. attr->src_nentries + attr->dest_nentries);
  390. /* source ring setup */
  391. nentries = attr->src_nentries;
  392. if (nentries) {
  393. struct CE_ring_state *src_ring;
  394. unsigned CE_nbytes;
  395. char *ptr;
  396. uint64_t dma_addr;
  397. nentries = roundup_pwr2(nentries);
  398. if (CE_state->src_ring) {
  399. QDF_ASSERT(CE_state->src_ring->nentries == nentries);
  400. } else {
  401. CE_nbytes = sizeof(struct CE_ring_state)
  402. + (nentries * sizeof(void *));
  403. ptr = qdf_mem_malloc(CE_nbytes);
  404. if (!ptr) {
  405. /* cannot allocate src ring. If the
  406. * CE_state is allocated locally free
  407. * CE_State and return error.
  408. */
  409. HIF_ERROR("%s: src ring has no mem", __func__);
  410. if (malloc_CE_state) {
  411. /* allocated CE_state locally */
  412. scn->ce_id_to_state[CE_id] = NULL;
  413. qdf_mem_free(CE_state);
  414. malloc_CE_state = false;
  415. }
  416. return NULL;
  417. } else {
  418. /* we can allocate src ring.
  419. * Mark that the src ring is
  420. * allocated locally
  421. */
  422. malloc_src_ring = true;
  423. }
  424. qdf_mem_zero(ptr, CE_nbytes);
  425. src_ring = CE_state->src_ring =
  426. (struct CE_ring_state *)ptr;
  427. ptr += sizeof(struct CE_ring_state);
  428. src_ring->nentries = nentries;
  429. src_ring->nentries_mask = nentries - 1;
  430. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  431. goto error_target_access;
  432. src_ring->hw_index =
  433. CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
  434. src_ring->sw_index = src_ring->hw_index;
  435. src_ring->write_index =
  436. CE_SRC_RING_WRITE_IDX_GET(scn, ctrl_addr);
  437. if (Q_TARGET_ACCESS_END(scn) < 0)
  438. goto error_target_access;
  439. src_ring->low_water_mark_nentries = 0;
  440. src_ring->high_water_mark_nentries = nentries;
  441. src_ring->per_transfer_context = (void **)ptr;
  442. /* Legacy platforms that do not support cache
  443. * coherent DMA are unsupported
  444. */
  445. src_ring->base_addr_owner_space_unaligned =
  446. qdf_mem_alloc_consistent(scn->qdf_dev,
  447. scn->qdf_dev->dev,
  448. (nentries *
  449. sizeof(struct CE_src_desc) +
  450. CE_DESC_RING_ALIGN),
  451. &base_addr);
  452. if (src_ring->base_addr_owner_space_unaligned
  453. == NULL) {
  454. HIF_ERROR("%s: src ring has no DMA mem",
  455. __func__);
  456. goto error_no_dma_mem;
  457. }
  458. src_ring->base_addr_CE_space_unaligned = base_addr;
  459. if (src_ring->
  460. base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN
  461. - 1)) {
  462. src_ring->base_addr_CE_space =
  463. (src_ring->base_addr_CE_space_unaligned
  464. + CE_DESC_RING_ALIGN -
  465. 1) & ~(CE_DESC_RING_ALIGN - 1);
  466. src_ring->base_addr_owner_space =
  467. (void
  468. *)(((size_t) src_ring->
  469. base_addr_owner_space_unaligned +
  470. CE_DESC_RING_ALIGN -
  471. 1) & ~(CE_DESC_RING_ALIGN - 1));
  472. } else {
  473. src_ring->base_addr_CE_space =
  474. src_ring->base_addr_CE_space_unaligned;
  475. src_ring->base_addr_owner_space =
  476. src_ring->
  477. base_addr_owner_space_unaligned;
  478. }
  479. /*
  480. * Also allocate a shadow src ring in
  481. * regular mem to use for faster access.
  482. */
  483. src_ring->shadow_base_unaligned =
  484. qdf_mem_malloc(nentries *
  485. sizeof(struct CE_src_desc) +
  486. CE_DESC_RING_ALIGN);
  487. if (src_ring->shadow_base_unaligned == NULL) {
  488. HIF_ERROR("%s: src ring no shadow_base mem",
  489. __func__);
  490. goto error_no_dma_mem;
  491. }
  492. src_ring->shadow_base = (struct CE_src_desc *)
  493. (((size_t) src_ring->shadow_base_unaligned +
  494. CE_DESC_RING_ALIGN - 1) &
  495. ~(CE_DESC_RING_ALIGN - 1));
  496. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  497. goto error_target_access;
  498. dma_addr = src_ring->base_addr_CE_space;
  499. CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
  500. (uint32_t)(dma_addr & 0xFFFFFFFF));
  501. #ifdef WLAN_ENABLE_QCA6180
  502. {
  503. uint32_t tmp;
  504. tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
  505. scn, ctrl_addr);
  506. tmp &= ~0x1F;
  507. dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
  508. CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
  509. ctrl_addr, (uint32_t)dma_addr);
  510. }
  511. #endif
  512. CE_SRC_RING_SZ_SET(scn, ctrl_addr, nentries);
  513. CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
  514. #ifdef BIG_ENDIAN_HOST
  515. /* Enable source ring byte swap for big endian host */
  516. CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
  517. #endif
  518. CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
  519. CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
  520. if (Q_TARGET_ACCESS_END(scn) < 0)
  521. goto error_target_access;
  522. }
  523. }
  524. /* destination ring setup */
  525. nentries = attr->dest_nentries;
  526. if (nentries) {
  527. struct CE_ring_state *dest_ring;
  528. unsigned CE_nbytes;
  529. char *ptr;
  530. uint64_t dma_addr;
  531. nentries = roundup_pwr2(nentries);
  532. if (CE_state->dest_ring) {
  533. QDF_ASSERT(CE_state->dest_ring->nentries == nentries);
  534. } else {
  535. CE_nbytes = sizeof(struct CE_ring_state)
  536. + (nentries * sizeof(void *));
  537. ptr = qdf_mem_malloc(CE_nbytes);
  538. if (!ptr) {
  539. /* cannot allocate dst ring. If the CE_state
  540. * or src ring is allocated locally free
  541. * CE_State and src ring and return error.
  542. */
  543. HIF_ERROR("%s: dest ring has no mem",
  544. __func__);
  545. if (malloc_src_ring) {
  546. qdf_mem_free(CE_state->src_ring);
  547. CE_state->src_ring = NULL;
  548. malloc_src_ring = false;
  549. }
  550. if (malloc_CE_state) {
  551. /* allocated CE_state locally */
  552. scn->ce_id_to_state[CE_id] = NULL;
  553. qdf_mem_free(CE_state);
  554. malloc_CE_state = false;
  555. }
  556. return NULL;
  557. }
  558. qdf_mem_zero(ptr, CE_nbytes);
  559. dest_ring = CE_state->dest_ring =
  560. (struct CE_ring_state *)ptr;
  561. ptr += sizeof(struct CE_ring_state);
  562. dest_ring->nentries = nentries;
  563. dest_ring->nentries_mask = nentries - 1;
  564. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  565. goto error_target_access;
  566. dest_ring->sw_index =
  567. CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
  568. dest_ring->write_index =
  569. CE_DEST_RING_WRITE_IDX_GET(scn, ctrl_addr);
  570. if (Q_TARGET_ACCESS_END(scn) < 0)
  571. goto error_target_access;
  572. dest_ring->low_water_mark_nentries = 0;
  573. dest_ring->high_water_mark_nentries = nentries;
  574. dest_ring->per_transfer_context = (void **)ptr;
  575. /* Legacy platforms that do not support cache
  576. * coherent DMA are unsupported */
  577. dest_ring->base_addr_owner_space_unaligned =
  578. qdf_mem_alloc_consistent(scn->qdf_dev,
  579. scn->qdf_dev->dev,
  580. (nentries *
  581. sizeof(struct CE_dest_desc) +
  582. CE_DESC_RING_ALIGN),
  583. &base_addr);
  584. if (dest_ring->base_addr_owner_space_unaligned
  585. == NULL) {
  586. HIF_ERROR("%s: dest ring has no DMA mem",
  587. __func__);
  588. goto error_no_dma_mem;
  589. }
  590. dest_ring->base_addr_CE_space_unaligned = base_addr;
  591. /* Correctly initialize memory to 0 to
  592. * prevent garbage data crashing system
  593. * when download firmware
  594. */
  595. qdf_mem_zero(dest_ring->base_addr_owner_space_unaligned,
  596. nentries * sizeof(struct CE_dest_desc) +
  597. CE_DESC_RING_ALIGN);
  598. if (dest_ring->
  599. base_addr_CE_space_unaligned & (CE_DESC_RING_ALIGN -
  600. 1)) {
  601. dest_ring->base_addr_CE_space =
  602. (dest_ring->
  603. base_addr_CE_space_unaligned +
  604. CE_DESC_RING_ALIGN -
  605. 1) & ~(CE_DESC_RING_ALIGN - 1);
  606. dest_ring->base_addr_owner_space =
  607. (void
  608. *)(((size_t) dest_ring->
  609. base_addr_owner_space_unaligned +
  610. CE_DESC_RING_ALIGN -
  611. 1) & ~(CE_DESC_RING_ALIGN - 1));
  612. } else {
  613. dest_ring->base_addr_CE_space =
  614. dest_ring->base_addr_CE_space_unaligned;
  615. dest_ring->base_addr_owner_space =
  616. dest_ring->
  617. base_addr_owner_space_unaligned;
  618. }
  619. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  620. goto error_target_access;
  621. dma_addr = dest_ring->base_addr_CE_space;
  622. CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
  623. (uint32_t)(dma_addr & 0xFFFFFFFF));
  624. #ifdef WLAN_ENABLE_QCA6180
  625. {
  626. uint32_t tmp;
  627. tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
  628. ctrl_addr);
  629. tmp &= ~0x1F;
  630. dma_addr = ((dma_addr >> 32) & 0x1F)|tmp;
  631. CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
  632. ctrl_addr, (uint32_t)dma_addr);
  633. }
  634. #endif
  635. CE_DEST_RING_SZ_SET(scn, ctrl_addr, nentries);
  636. #ifdef BIG_ENDIAN_HOST
  637. /* Enable Dest ring byte swap for big endian host */
  638. CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
  639. #endif
  640. CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
  641. CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, nentries);
  642. if (Q_TARGET_ACCESS_END(scn) < 0)
  643. goto error_target_access;
  644. /* epping */
  645. /* poll timer */
  646. if ((CE_state->attr_flags & CE_ATTR_ENABLE_POLL)) {
  647. qdf_timer_init(scn->qdf_dev,
  648. &CE_state->poll_timer,
  649. ce_poll_timeout,
  650. CE_state,
  651. QDF_TIMER_TYPE_SW);
  652. CE_state->timer_inited = true;
  653. qdf_timer_mod(&CE_state->poll_timer,
  654. CE_POLL_TIMEOUT);
  655. }
  656. }
  657. }
  658. /* Enable CE error interrupts */
  659. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  660. goto error_target_access;
  661. CE_ERROR_INTR_ENABLE(scn, ctrl_addr);
  662. if (Q_TARGET_ACCESS_END(scn) < 0)
  663. goto error_target_access;
  664. /* update the htt_data attribute */
  665. ce_mark_datapath(CE_state);
  666. return (struct CE_handle *)CE_state;
  667. error_target_access:
  668. error_no_dma_mem:
  669. ce_fini((struct CE_handle *)CE_state);
  670. return NULL;
  671. }
  672. #ifdef WLAN_FEATURE_FASTPATH
  673. /**
  674. * ce_h2t_tx_ce_cleanup() Place holder function for H2T CE cleanup.
  675. * No processing is required inside this function.
  676. * @ce_hdl: Cope engine handle
  677. * Using an assert, this function makes sure that,
  678. * the TX CE has been processed completely.
  679. *
  680. * This is called while dismantling CE structures. No other thread
  681. * should be using these structures while dismantling is occuring
  682. * therfore no locking is needed.
  683. *
  684. * Return: none
  685. */
  686. void
  687. ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
  688. {
  689. struct CE_state *ce_state = (struct CE_state *)ce_hdl;
  690. struct CE_ring_state *src_ring = ce_state->src_ring;
  691. struct hif_softc *sc = ce_state->scn;
  692. uint32_t sw_index, write_index;
  693. if (sc->fastpath_mode_on && ce_state->htt_tx_data) {
  694. HIF_INFO("%s %d Fastpath mode ON, Cleaning up HTT Tx CE\n",
  695. __func__, __LINE__);
  696. sw_index = src_ring->sw_index;
  697. write_index = src_ring->sw_index;
  698. /* At this point Tx CE should be clean */
  699. qdf_assert_always(sw_index == write_index);
  700. }
  701. }
  702. /**
  703. * ce_t2h_msg_ce_cleanup() - Cleanup buffers on the t2h datapath msg queue.
  704. * @ce_hdl: Handle to CE
  705. *
  706. * These buffers are never allocated on the fly, but
  707. * are allocated only once during HIF start and freed
  708. * only once during HIF stop.
  709. * NOTE:
  710. * The assumption here is there is no in-flight DMA in progress
  711. * currently, so that buffers can be freed up safely.
  712. *
  713. * Return: NONE
  714. */
  715. void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
  716. {
  717. struct CE_state *ce_state = (struct CE_state *)ce_hdl;
  718. struct CE_ring_state *dst_ring = ce_state->dest_ring;
  719. qdf_nbuf_t nbuf;
  720. int i;
  721. if (!ce_state->fastpath_handler)
  722. return;
  723. /*
  724. * when fastpath_mode is on and for datapath CEs. Unlike other CE's,
  725. * this CE is completely full: does not leave one blank space, to
  726. * distinguish between empty queue & full queue. So free all the
  727. * entries.
  728. */
  729. for (i = 0; i < dst_ring->nentries; i++) {
  730. nbuf = dst_ring->per_transfer_context[i];
  731. /*
  732. * The reasons for doing this check are:
  733. * 1) Protect against calling cleanup before allocating buffers
  734. * 2) In a corner case, FASTPATH_mode_on may be set, but we
  735. * could have a partially filled ring, because of a memory
  736. * allocation failure in the middle of allocating ring.
  737. * This check accounts for that case, checking
  738. * fastpath_mode_on flag or started flag would not have
  739. * covered that case. This is not in performance path,
  740. * so OK to do this.
  741. */
  742. if (nbuf)
  743. qdf_nbuf_free(nbuf);
  744. }
  745. }
  746. #else
  747. void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
  748. {
  749. }
  750. void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
  751. {
  752. }
  753. #endif /* WLAN_FEATURE_FASTPATH */
  754. void ce_fini(struct CE_handle *copyeng)
  755. {
  756. struct CE_state *CE_state = (struct CE_state *)copyeng;
  757. unsigned int CE_id = CE_state->id;
  758. struct hif_softc *scn = CE_state->scn;
  759. CE_state->state = CE_UNUSED;
  760. scn->ce_id_to_state[CE_id] = NULL;
  761. if (CE_state->src_ring) {
  762. /* Cleanup the datapath Tx ring */
  763. ce_h2t_tx_ce_cleanup(copyeng);
  764. if (CE_state->src_ring->shadow_base_unaligned)
  765. qdf_mem_free(CE_state->src_ring->shadow_base_unaligned);
  766. if (CE_state->src_ring->base_addr_owner_space_unaligned)
  767. qdf_mem_free_consistent(scn->qdf_dev,
  768. scn->qdf_dev->dev,
  769. (CE_state->src_ring->nentries *
  770. sizeof(struct CE_src_desc) +
  771. CE_DESC_RING_ALIGN),
  772. CE_state->src_ring->
  773. base_addr_owner_space_unaligned,
  774. CE_state->src_ring->
  775. base_addr_CE_space, 0);
  776. qdf_mem_free(CE_state->src_ring);
  777. }
  778. if (CE_state->dest_ring) {
  779. /* Cleanup the datapath Rx ring */
  780. ce_t2h_msg_ce_cleanup(copyeng);
  781. if (CE_state->dest_ring->base_addr_owner_space_unaligned)
  782. qdf_mem_free_consistent(scn->qdf_dev,
  783. scn->qdf_dev->dev,
  784. (CE_state->dest_ring->nentries *
  785. sizeof(struct CE_dest_desc) +
  786. CE_DESC_RING_ALIGN),
  787. CE_state->dest_ring->
  788. base_addr_owner_space_unaligned,
  789. CE_state->dest_ring->
  790. base_addr_CE_space, 0);
  791. qdf_mem_free(CE_state->dest_ring);
  792. /* epping */
  793. if (CE_state->timer_inited) {
  794. CE_state->timer_inited = false;
  795. qdf_timer_free(&CE_state->poll_timer);
  796. }
  797. }
  798. qdf_mem_free(CE_state);
  799. }
  800. void hif_detach_htc(struct hif_opaque_softc *hif_ctx)
  801. {
  802. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  803. qdf_mem_zero(&hif_state->msg_callbacks_pending,
  804. sizeof(hif_state->msg_callbacks_pending));
  805. qdf_mem_zero(&hif_state->msg_callbacks_current,
  806. sizeof(hif_state->msg_callbacks_current));
  807. }
  808. /* Send the first nbytes bytes of the buffer */
  809. QDF_STATUS
  810. hif_send_head(struct hif_opaque_softc *hif_ctx,
  811. uint8_t pipe, unsigned int transfer_id, unsigned int nbytes,
  812. qdf_nbuf_t nbuf, unsigned int data_attr)
  813. {
  814. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  815. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  816. struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
  817. struct CE_handle *ce_hdl = pipe_info->ce_hdl;
  818. int bytes = nbytes, nfrags = 0;
  819. struct ce_sendlist sendlist;
  820. int status, i = 0;
  821. unsigned int mux_id = 0;
  822. QDF_ASSERT(nbytes <= qdf_nbuf_len(nbuf));
  823. transfer_id =
  824. (mux_id & MUX_ID_MASK) |
  825. (transfer_id & TRANSACTION_ID_MASK);
  826. data_attr &= DESC_DATA_FLAG_MASK;
  827. /*
  828. * The common case involves sending multiple fragments within a
  829. * single download (the tx descriptor and the tx frame header).
  830. * So, optimize for the case of multiple fragments by not even
  831. * checking whether it's necessary to use a sendlist.
  832. * The overhead of using a sendlist for a single buffer download
  833. * is not a big deal, since it happens rarely (for WMI messages).
  834. */
  835. ce_sendlist_init(&sendlist);
  836. do {
  837. qdf_dma_addr_t frag_paddr;
  838. int frag_bytes;
  839. frag_paddr = qdf_nbuf_get_frag_paddr(nbuf, nfrags);
  840. frag_bytes = qdf_nbuf_get_frag_len(nbuf, nfrags);
  841. /*
  842. * Clear the packet offset for all but the first CE desc.
  843. */
  844. if (i++ > 0)
  845. data_attr &= ~QDF_CE_TX_PKT_OFFSET_BIT_M;
  846. status = ce_sendlist_buf_add(&sendlist, frag_paddr,
  847. frag_bytes >
  848. bytes ? bytes : frag_bytes,
  849. qdf_nbuf_get_frag_is_wordstream
  850. (nbuf,
  851. nfrags) ? 0 :
  852. CE_SEND_FLAG_SWAP_DISABLE,
  853. data_attr);
  854. if (status != QDF_STATUS_SUCCESS) {
  855. HIF_ERROR("%s: error, frag_num %d larger than limit",
  856. __func__, nfrags);
  857. return status;
  858. }
  859. bytes -= frag_bytes;
  860. nfrags++;
  861. } while (bytes > 0);
  862. /* Make sure we have resources to handle this request */
  863. qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
  864. if (pipe_info->num_sends_allowed < nfrags) {
  865. qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
  866. ce_pkt_error_count_incr(hif_state, HIF_PIPE_NO_RESOURCE);
  867. return QDF_STATUS_E_RESOURCES;
  868. }
  869. pipe_info->num_sends_allowed -= nfrags;
  870. qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
  871. if (qdf_unlikely(ce_hdl == NULL)) {
  872. HIF_ERROR("%s: error CE handle is null", __func__);
  873. return A_ERROR;
  874. }
  875. QDF_NBUF_UPDATE_TX_PKT_COUNT(nbuf, QDF_NBUF_TX_PKT_HIF);
  876. DPTRACE(qdf_dp_trace(nbuf, QDF_DP_TRACE_HIF_PACKET_PTR_RECORD,
  877. (uint8_t *)(qdf_nbuf_data(nbuf)),
  878. sizeof(qdf_nbuf_data(nbuf))));
  879. status = ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
  880. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  881. return status;
  882. }
  883. void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
  884. int force)
  885. {
  886. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  887. if (!force) {
  888. int resources;
  889. /*
  890. * Decide whether to actually poll for completions, or just
  891. * wait for a later chance. If there seem to be plenty of
  892. * resources left, then just wait, since checking involves
  893. * reading a CE register, which is a relatively expensive
  894. * operation.
  895. */
  896. resources = hif_get_free_queue_number(hif_ctx, pipe);
  897. /*
  898. * If at least 50% of the total resources are still available,
  899. * don't bother checking again yet.
  900. */
  901. if (resources > (host_ce_config[pipe].src_nentries >> 1)) {
  902. return;
  903. }
  904. }
  905. #ifdef ATH_11AC_TXCOMPACT
  906. ce_per_engine_servicereap(scn, pipe);
  907. #else
  908. ce_per_engine_service(scn, pipe);
  909. #endif
  910. }
  911. uint16_t
  912. hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t pipe)
  913. {
  914. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  915. struct HIF_CE_pipe_info *pipe_info = &(hif_state->pipe_info[pipe]);
  916. uint16_t rv;
  917. qdf_spin_lock_bh(&pipe_info->completion_freeq_lock);
  918. rv = pipe_info->num_sends_allowed;
  919. qdf_spin_unlock_bh(&pipe_info->completion_freeq_lock);
  920. return rv;
  921. }
  922. /* Called by lower (CE) layer when a send to Target completes. */
  923. void
  924. hif_pci_ce_send_done(struct CE_handle *copyeng, void *ce_context,
  925. void *transfer_context, qdf_dma_addr_t CE_data,
  926. unsigned int nbytes, unsigned int transfer_id,
  927. unsigned int sw_index, unsigned int hw_index,
  928. unsigned int toeplitz_hash_result)
  929. {
  930. struct HIF_CE_pipe_info *pipe_info =
  931. (struct HIF_CE_pipe_info *)ce_context;
  932. struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
  933. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  934. unsigned int sw_idx = sw_index, hw_idx = hw_index;
  935. struct hif_msg_callbacks *msg_callbacks =
  936. &hif_state->msg_callbacks_current;
  937. do {
  938. /*
  939. * The upper layer callback will be triggered
  940. * when last fragment is complteted.
  941. */
  942. if (transfer_context != CE_SENDLIST_ITEM_CTXT) {
  943. if (scn->target_status
  944. == OL_TRGET_STATUS_RESET)
  945. qdf_nbuf_free(transfer_context);
  946. else
  947. msg_callbacks->txCompletionHandler(
  948. msg_callbacks->Context,
  949. transfer_context, transfer_id,
  950. toeplitz_hash_result);
  951. }
  952. qdf_spin_lock(&pipe_info->completion_freeq_lock);
  953. pipe_info->num_sends_allowed++;
  954. qdf_spin_unlock(&pipe_info->completion_freeq_lock);
  955. } while (ce_completed_send_next(copyeng,
  956. &ce_context, &transfer_context,
  957. &CE_data, &nbytes, &transfer_id,
  958. &sw_idx, &hw_idx,
  959. &toeplitz_hash_result) == QDF_STATUS_SUCCESS);
  960. }
  961. /**
  962. * hif_ce_do_recv(): send message from copy engine to upper layers
  963. * @msg_callbacks: structure containing callback and callback context
  964. * @netbuff: skb containing message
  965. * @nbytes: number of bytes in the message
  966. * @pipe_info: used for the pipe_number info
  967. *
  968. * Checks the packet length, configures the lenght in the netbuff,
  969. * and calls the upper layer callback.
  970. *
  971. * return: None
  972. */
  973. static inline void hif_ce_do_recv(struct hif_msg_callbacks *msg_callbacks,
  974. qdf_nbuf_t netbuf, int nbytes,
  975. struct HIF_CE_pipe_info *pipe_info) {
  976. if (nbytes <= pipe_info->buf_sz) {
  977. qdf_nbuf_set_pktlen(netbuf, nbytes);
  978. msg_callbacks->
  979. rxCompletionHandler(msg_callbacks->Context,
  980. netbuf, pipe_info->pipe_num);
  981. } else {
  982. HIF_ERROR("%s: Invalid Rx msg buf:%p nbytes:%d",
  983. __func__, netbuf, nbytes);
  984. qdf_nbuf_free(netbuf);
  985. }
  986. }
  987. /* Called by lower (CE) layer when data is received from the Target. */
  988. void
  989. hif_pci_ce_recv_data(struct CE_handle *copyeng, void *ce_context,
  990. void *transfer_context, qdf_dma_addr_t CE_data,
  991. unsigned int nbytes, unsigned int transfer_id,
  992. unsigned int flags)
  993. {
  994. struct HIF_CE_pipe_info *pipe_info =
  995. (struct HIF_CE_pipe_info *)ce_context;
  996. struct HIF_CE_state *hif_state = pipe_info->HIF_CE_state;
  997. struct CE_state *ce_state = (struct CE_state *) copyeng;
  998. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  999. #ifdef HIF_PCI
  1000. struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_state);
  1001. #endif
  1002. struct hif_msg_callbacks *msg_callbacks =
  1003. &hif_state->msg_callbacks_current;
  1004. uint32_t count;
  1005. do {
  1006. #ifdef HIF_PCI
  1007. hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
  1008. #endif
  1009. qdf_nbuf_unmap_single(scn->qdf_dev,
  1010. (qdf_nbuf_t) transfer_context,
  1011. QDF_DMA_FROM_DEVICE);
  1012. atomic_inc(&pipe_info->recv_bufs_needed);
  1013. hif_post_recv_buffers_for_pipe(pipe_info);
  1014. if (scn->target_status == OL_TRGET_STATUS_RESET)
  1015. qdf_nbuf_free(transfer_context);
  1016. else
  1017. hif_ce_do_recv(msg_callbacks, transfer_context,
  1018. nbytes, pipe_info);
  1019. /* Set up force_break flag if num of receices reaches
  1020. * MAX_NUM_OF_RECEIVES */
  1021. ce_state->receive_count++;
  1022. count = ce_state->receive_count;
  1023. if (qdf_unlikely(hif_max_num_receives_reached(scn, count))) {
  1024. ce_state->force_break = 1;
  1025. break;
  1026. }
  1027. } while (ce_completed_recv_next(copyeng, &ce_context, &transfer_context,
  1028. &CE_data, &nbytes, &transfer_id,
  1029. &flags) == QDF_STATUS_SUCCESS);
  1030. }
  1031. /* TBDXXX: Set CE High Watermark; invoke txResourceAvailHandler in response */
  1032. void
  1033. hif_post_init(struct hif_opaque_softc *hif_ctx, void *unused,
  1034. struct hif_msg_callbacks *callbacks)
  1035. {
  1036. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  1037. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  1038. spin_lock_init(&pcie_access_log_lock);
  1039. #endif
  1040. /* Save callbacks for later installation */
  1041. qdf_mem_copy(&hif_state->msg_callbacks_pending, callbacks,
  1042. sizeof(hif_state->msg_callbacks_pending));
  1043. }
  1044. int hif_completion_thread_startup(struct HIF_CE_state *hif_state)
  1045. {
  1046. struct CE_handle *ce_diag = hif_state->ce_diag;
  1047. int pipe_num;
  1048. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1049. struct hif_msg_callbacks *hif_msg_callbacks =
  1050. &hif_state->msg_callbacks_current;
  1051. /* daemonize("hif_compl_thread"); */
  1052. if (scn->ce_count == 0) {
  1053. HIF_ERROR("%s: Invalid ce_count\n", __func__);
  1054. return -EINVAL;
  1055. }
  1056. if (!hif_msg_callbacks ||
  1057. !hif_msg_callbacks->rxCompletionHandler ||
  1058. !hif_msg_callbacks->txCompletionHandler) {
  1059. HIF_ERROR("%s: no completion handler registered", __func__);
  1060. return -EFAULT;
  1061. }
  1062. A_TARGET_ACCESS_LIKELY(scn);
  1063. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1064. struct CE_attr attr;
  1065. struct HIF_CE_pipe_info *pipe_info;
  1066. pipe_info = &hif_state->pipe_info[pipe_num];
  1067. if (pipe_info->ce_hdl == ce_diag) {
  1068. continue; /* Handle Diagnostic CE specially */
  1069. }
  1070. attr = host_ce_config[pipe_num];
  1071. if (attr.src_nentries) {
  1072. /* pipe used to send to target */
  1073. HIF_INFO_MED("%s: pipe_num:%d pipe_info:0x%p",
  1074. __func__, pipe_num, pipe_info);
  1075. ce_send_cb_register(pipe_info->ce_hdl,
  1076. hif_pci_ce_send_done, pipe_info,
  1077. attr.flags & CE_ATTR_DISABLE_INTR);
  1078. pipe_info->num_sends_allowed = attr.src_nentries - 1;
  1079. }
  1080. if (attr.dest_nentries) {
  1081. /* pipe used to receive from target */
  1082. ce_recv_cb_register(pipe_info->ce_hdl,
  1083. hif_pci_ce_recv_data, pipe_info,
  1084. attr.flags & CE_ATTR_DISABLE_INTR);
  1085. }
  1086. if (attr.src_nentries)
  1087. qdf_spinlock_create(&pipe_info->completion_freeq_lock);
  1088. }
  1089. A_TARGET_ACCESS_UNLIKELY(scn);
  1090. return 0;
  1091. }
  1092. /*
  1093. * Install pending msg callbacks.
  1094. *
  1095. * TBDXXX: This hack is needed because upper layers install msg callbacks
  1096. * for use with HTC before BMI is done; yet this HIF implementation
  1097. * needs to continue to use BMI msg callbacks. Really, upper layers
  1098. * should not register HTC callbacks until AFTER BMI phase.
  1099. */
  1100. static void hif_msg_callbacks_install(struct hif_softc *scn)
  1101. {
  1102. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1103. qdf_mem_copy(&hif_state->msg_callbacks_current,
  1104. &hif_state->msg_callbacks_pending,
  1105. sizeof(hif_state->msg_callbacks_pending));
  1106. }
  1107. void hif_get_default_pipe(struct hif_opaque_softc *hif_hdl, uint8_t *ULPipe,
  1108. uint8_t *DLPipe)
  1109. {
  1110. int ul_is_polled, dl_is_polled;
  1111. (void)hif_map_service_to_pipe(hif_hdl, HTC_CTRL_RSVD_SVC,
  1112. ULPipe, DLPipe, &ul_is_polled, &dl_is_polled);
  1113. }
  1114. /**
  1115. * hif_dump_pipe_debug_count() - Log error count
  1116. * @scn: hif_softc pointer.
  1117. *
  1118. * Output the pipe error counts of each pipe to log file
  1119. *
  1120. * Return: N/A
  1121. */
  1122. void hif_dump_pipe_debug_count(struct hif_softc *scn)
  1123. {
  1124. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1125. int pipe_num;
  1126. if (hif_state == NULL) {
  1127. HIF_ERROR("%s hif_state is NULL", __func__);
  1128. return;
  1129. }
  1130. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1131. struct HIF_CE_pipe_info *pipe_info;
  1132. pipe_info = &hif_state->pipe_info[pipe_num];
  1133. if (pipe_info->nbuf_alloc_err_count > 0 ||
  1134. pipe_info->nbuf_dma_err_count > 0 ||
  1135. pipe_info->nbuf_ce_enqueue_err_count)
  1136. HIF_ERROR(
  1137. "%s: pipe_id = %d, recv_bufs_needed = %d, nbuf_alloc_err_count = %u, nbuf_dma_err_count = %u, nbuf_ce_enqueue_err_count = %u",
  1138. __func__, pipe_info->pipe_num,
  1139. atomic_read(&pipe_info->recv_bufs_needed),
  1140. pipe_info->nbuf_alloc_err_count,
  1141. pipe_info->nbuf_dma_err_count,
  1142. pipe_info->nbuf_ce_enqueue_err_count);
  1143. }
  1144. }
  1145. static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
  1146. {
  1147. struct CE_handle *ce_hdl;
  1148. qdf_size_t buf_sz;
  1149. struct hif_softc *scn = HIF_GET_SOFTC(pipe_info->HIF_CE_state);
  1150. QDF_STATUS ret;
  1151. uint32_t bufs_posted = 0;
  1152. buf_sz = pipe_info->buf_sz;
  1153. if (buf_sz == 0) {
  1154. /* Unused Copy Engine */
  1155. return 0;
  1156. }
  1157. ce_hdl = pipe_info->ce_hdl;
  1158. qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
  1159. while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
  1160. qdf_dma_addr_t CE_data; /* CE space buffer address */
  1161. qdf_nbuf_t nbuf;
  1162. int status;
  1163. atomic_dec(&pipe_info->recv_bufs_needed);
  1164. qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
  1165. nbuf = qdf_nbuf_alloc(scn->qdf_dev, buf_sz, 0, 4, false);
  1166. if (!nbuf) {
  1167. qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
  1168. pipe_info->nbuf_alloc_err_count++;
  1169. qdf_spin_unlock_bh(
  1170. &pipe_info->recv_bufs_needed_lock);
  1171. HIF_ERROR(
  1172. "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
  1173. __func__, pipe_info->pipe_num,
  1174. atomic_read(&pipe_info->recv_bufs_needed),
  1175. pipe_info->nbuf_alloc_err_count);
  1176. atomic_inc(&pipe_info->recv_bufs_needed);
  1177. return 1;
  1178. }
  1179. /*
  1180. * qdf_nbuf_peek_header(nbuf, &data, &unused);
  1181. * CE_data = dma_map_single(dev, data, buf_sz, );
  1182. * DMA_FROM_DEVICE);
  1183. */
  1184. ret =
  1185. qdf_nbuf_map_single(scn->qdf_dev, nbuf,
  1186. QDF_DMA_FROM_DEVICE);
  1187. if (unlikely(ret != QDF_STATUS_SUCCESS)) {
  1188. qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
  1189. pipe_info->nbuf_dma_err_count++;
  1190. qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
  1191. HIF_ERROR(
  1192. "%s buf alloc error [%d] needed %d, nbuf_dma_err_count = %u",
  1193. __func__, pipe_info->pipe_num,
  1194. atomic_read(&pipe_info->recv_bufs_needed),
  1195. pipe_info->nbuf_dma_err_count);
  1196. qdf_nbuf_free(nbuf);
  1197. atomic_inc(&pipe_info->recv_bufs_needed);
  1198. return 1;
  1199. }
  1200. CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
  1201. qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
  1202. buf_sz, DMA_FROM_DEVICE);
  1203. status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
  1204. QDF_ASSERT(status == QDF_STATUS_SUCCESS);
  1205. if (status != EOK) {
  1206. qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
  1207. pipe_info->nbuf_ce_enqueue_err_count++;
  1208. qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
  1209. HIF_ERROR(
  1210. "%s buf alloc error [%d] needed %d, nbuf_alloc_err_count = %u",
  1211. __func__, pipe_info->pipe_num,
  1212. atomic_read(&pipe_info->recv_bufs_needed),
  1213. pipe_info->nbuf_ce_enqueue_err_count);
  1214. atomic_inc(&pipe_info->recv_bufs_needed);
  1215. qdf_nbuf_free(nbuf);
  1216. return 1;
  1217. }
  1218. qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
  1219. bufs_posted++;
  1220. }
  1221. pipe_info->nbuf_alloc_err_count =
  1222. (pipe_info->nbuf_alloc_err_count > bufs_posted) ?
  1223. pipe_info->nbuf_alloc_err_count - bufs_posted : 0;
  1224. pipe_info->nbuf_dma_err_count =
  1225. (pipe_info->nbuf_dma_err_count > bufs_posted) ?
  1226. pipe_info->nbuf_dma_err_count - bufs_posted : 0;
  1227. pipe_info->nbuf_ce_enqueue_err_count =
  1228. (pipe_info->nbuf_ce_enqueue_err_count > bufs_posted) ?
  1229. pipe_info->nbuf_ce_enqueue_err_count - bufs_posted : 0;
  1230. qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
  1231. return 0;
  1232. }
  1233. /*
  1234. * Try to post all desired receive buffers for all pipes.
  1235. * Returns 0 if all desired buffers are posted,
  1236. * non-zero if were were unable to completely
  1237. * replenish receive buffers.
  1238. */
  1239. static int hif_post_recv_buffers(struct hif_softc *scn)
  1240. {
  1241. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1242. int pipe_num, rv = 0;
  1243. A_TARGET_ACCESS_LIKELY(scn);
  1244. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1245. struct HIF_CE_pipe_info *pipe_info;
  1246. pipe_info = &hif_state->pipe_info[pipe_num];
  1247. if (hif_post_recv_buffers_for_pipe(pipe_info)) {
  1248. rv = 1;
  1249. goto done;
  1250. }
  1251. }
  1252. done:
  1253. A_TARGET_ACCESS_UNLIKELY(scn);
  1254. return rv;
  1255. }
  1256. QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
  1257. {
  1258. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1259. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1260. hif_msg_callbacks_install(scn);
  1261. if (hif_completion_thread_startup(hif_state))
  1262. return QDF_STATUS_E_FAILURE;
  1263. /* Post buffers once to start things off. */
  1264. (void)hif_post_recv_buffers(scn);
  1265. hif_state->started = true;
  1266. return QDF_STATUS_SUCCESS;
  1267. }
  1268. #ifdef WLAN_FEATURE_FASTPATH
  1269. /**
  1270. * hif_enable_fastpath() Update that we have enabled fastpath mode
  1271. * @hif_ctx: HIF context
  1272. *
  1273. * For use in data path
  1274. *
  1275. * Retrun: void
  1276. */
  1277. void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx)
  1278. {
  1279. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1280. HIF_INFO("Enabling fastpath mode\n");
  1281. scn->fastpath_mode_on = true;
  1282. }
  1283. /**
  1284. * hif_is_fastpath_mode_enabled - API to query if fasthpath mode is enabled
  1285. * @hif_ctx: HIF Context
  1286. *
  1287. * For use in data path to skip HTC
  1288. *
  1289. * Return: bool
  1290. */
  1291. bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx)
  1292. {
  1293. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1294. return scn->fastpath_mode_on;
  1295. }
  1296. /**
  1297. * hif_get_ce_handle - API to get CE handle for FastPath mode
  1298. * @hif_ctx: HIF Context
  1299. * @id: CopyEngine Id
  1300. *
  1301. * API to return CE handle for fastpath mode
  1302. *
  1303. * Return: void
  1304. */
  1305. void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int id)
  1306. {
  1307. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1308. return scn->ce_id_to_state[id];
  1309. }
  1310. #endif /* WLAN_FEATURE_FASTPATH */
  1311. void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
  1312. {
  1313. struct hif_softc *scn;
  1314. struct CE_handle *ce_hdl;
  1315. uint32_t buf_sz;
  1316. struct HIF_CE_state *hif_state;
  1317. qdf_nbuf_t netbuf;
  1318. qdf_dma_addr_t CE_data;
  1319. void *per_CE_context;
  1320. buf_sz = pipe_info->buf_sz;
  1321. if (buf_sz == 0) {
  1322. /* Unused Copy Engine */
  1323. return;
  1324. }
  1325. hif_state = pipe_info->HIF_CE_state;
  1326. if (!hif_state->started) {
  1327. return;
  1328. }
  1329. scn = HIF_GET_SOFTC(hif_state);
  1330. ce_hdl = pipe_info->ce_hdl;
  1331. if (scn->qdf_dev == NULL) {
  1332. return;
  1333. }
  1334. while (ce_revoke_recv_next
  1335. (ce_hdl, &per_CE_context, (void **)&netbuf,
  1336. &CE_data) == QDF_STATUS_SUCCESS) {
  1337. qdf_nbuf_unmap_single(scn->qdf_dev, netbuf,
  1338. QDF_DMA_FROM_DEVICE);
  1339. qdf_nbuf_free(netbuf);
  1340. }
  1341. }
  1342. void hif_send_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
  1343. {
  1344. struct CE_handle *ce_hdl;
  1345. struct HIF_CE_state *hif_state;
  1346. struct hif_softc *scn;
  1347. qdf_nbuf_t netbuf;
  1348. void *per_CE_context;
  1349. qdf_dma_addr_t CE_data;
  1350. unsigned int nbytes;
  1351. unsigned int id;
  1352. uint32_t buf_sz;
  1353. uint32_t toeplitz_hash_result;
  1354. buf_sz = pipe_info->buf_sz;
  1355. if (buf_sz == 0) {
  1356. /* Unused Copy Engine */
  1357. return;
  1358. }
  1359. hif_state = pipe_info->HIF_CE_state;
  1360. if (!hif_state->started) {
  1361. return;
  1362. }
  1363. scn = HIF_GET_SOFTC(hif_state);
  1364. ce_hdl = pipe_info->ce_hdl;
  1365. while (ce_cancel_send_next
  1366. (ce_hdl, &per_CE_context,
  1367. (void **)&netbuf, &CE_data, &nbytes,
  1368. &id, &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
  1369. if (netbuf != CE_SENDLIST_ITEM_CTXT) {
  1370. /*
  1371. * Packets enqueued by htt_h2t_ver_req_msg() and
  1372. * htt_h2t_rx_ring_cfg_msg_ll() have already been
  1373. * freed in htt_htc_misc_pkt_pool_free() in
  1374. * wlantl_close(), so do not free them here again
  1375. * by checking whether it's the endpoint
  1376. * which they are queued in.
  1377. */
  1378. if (id == scn->htc_endpoint)
  1379. return;
  1380. /* Indicate the completion to higer
  1381. * layer to free the buffer */
  1382. hif_state->msg_callbacks_current.
  1383. txCompletionHandler(hif_state->
  1384. msg_callbacks_current.Context,
  1385. netbuf, id, toeplitz_hash_result);
  1386. }
  1387. }
  1388. }
  1389. /*
  1390. * Cleanup residual buffers for device shutdown:
  1391. * buffers that were enqueued for receive
  1392. * buffers that were to be sent
  1393. * Note: Buffers that had completed but which were
  1394. * not yet processed are on a completion queue. They
  1395. * are handled when the completion thread shuts down.
  1396. */
  1397. void hif_buffer_cleanup(struct HIF_CE_state *hif_state)
  1398. {
  1399. int pipe_num;
  1400. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1401. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1402. struct HIF_CE_pipe_info *pipe_info;
  1403. pipe_info = &hif_state->pipe_info[pipe_num];
  1404. hif_recv_buffer_cleanup_on_pipe(pipe_info);
  1405. hif_send_buffer_cleanup_on_pipe(pipe_info);
  1406. }
  1407. }
  1408. void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx)
  1409. {
  1410. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1411. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1412. hif_buffer_cleanup(hif_state);
  1413. }
  1414. void hif_stop(struct hif_opaque_softc *hif_ctx)
  1415. {
  1416. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1417. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
  1418. int pipe_num;
  1419. scn->hif_init_done = false;
  1420. /*
  1421. * At this point, asynchronous threads are stopped,
  1422. * The Target should not DMA nor interrupt, Host code may
  1423. * not initiate anything more. So we just need to clean
  1424. * up Host-side state.
  1425. */
  1426. if (scn->athdiag_procfs_inited) {
  1427. athdiag_procfs_remove();
  1428. scn->athdiag_procfs_inited = false;
  1429. }
  1430. hif_buffer_cleanup(hif_state);
  1431. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1432. struct HIF_CE_pipe_info *pipe_info;
  1433. pipe_info = &hif_state->pipe_info[pipe_num];
  1434. if (pipe_info->ce_hdl) {
  1435. ce_fini(pipe_info->ce_hdl);
  1436. pipe_info->ce_hdl = NULL;
  1437. pipe_info->buf_sz = 0;
  1438. }
  1439. }
  1440. if (hif_state->sleep_timer_init) {
  1441. qdf_timer_stop(&hif_state->sleep_timer);
  1442. qdf_timer_free(&hif_state->sleep_timer);
  1443. hif_state->sleep_timer_init = false;
  1444. }
  1445. hif_state->started = false;
  1446. }
  1447. /**
  1448. * hif_get_target_ce_config() - get copy engine configuration
  1449. * @target_ce_config_ret: basic copy engine configuration
  1450. * @target_ce_config_sz_ret: size of the basic configuration in bytes
  1451. * @target_service_to_ce_map_ret: service mapping for the copy engines
  1452. * @target_service_to_ce_map_sz_ret: size of the mapping in bytes
  1453. * @target_shadow_reg_cfg_ret: shadow register configuration
  1454. * @shadow_cfg_sz_ret: size of the shadow register configuration in bytes
  1455. *
  1456. * providing accessor to these values outside of this file.
  1457. * currently these are stored in static pointers to const sections.
  1458. * there are multiple configurations that are selected from at compile time.
  1459. * Runtime selection would need to consider mode, target type and bus type.
  1460. *
  1461. * Return: return by parameter.
  1462. */
  1463. void hif_get_target_ce_config(struct CE_pipe_config **target_ce_config_ret,
  1464. int *target_ce_config_sz_ret,
  1465. struct service_to_pipe **target_service_to_ce_map_ret,
  1466. int *target_service_to_ce_map_sz_ret,
  1467. struct shadow_reg_cfg **target_shadow_reg_cfg_ret,
  1468. int *shadow_cfg_sz_ret)
  1469. {
  1470. *target_ce_config_ret = target_ce_config;
  1471. *target_ce_config_sz_ret = target_ce_config_sz;
  1472. *target_service_to_ce_map_ret = target_service_to_ce_map;
  1473. *target_service_to_ce_map_sz_ret = target_service_to_ce_map_sz;
  1474. if (target_shadow_reg_cfg_ret)
  1475. *target_shadow_reg_cfg_ret = target_shadow_reg_cfg;
  1476. if (shadow_cfg_sz_ret)
  1477. *shadow_cfg_sz_ret = shadow_cfg_sz;
  1478. }
  1479. /**
  1480. * hif_wlan_enable(): call the platform driver to enable wlan
  1481. * @scn: HIF Context
  1482. *
  1483. * This function passes the con_mode and CE configuration to
  1484. * platform driver to enable wlan.
  1485. *
  1486. * Return: linux error code
  1487. */
  1488. int hif_wlan_enable(struct hif_softc *scn)
  1489. {
  1490. struct icnss_wlan_enable_cfg cfg;
  1491. enum icnss_driver_mode mode;
  1492. uint32_t con_mode = hif_get_conparam(scn);
  1493. hif_get_target_ce_config((struct CE_pipe_config **)&cfg.ce_tgt_cfg,
  1494. &cfg.num_ce_tgt_cfg,
  1495. (struct service_to_pipe **)&cfg.ce_svc_cfg,
  1496. &cfg.num_ce_svc_pipe_cfg,
  1497. (struct shadow_reg_cfg **)&cfg.shadow_reg_cfg,
  1498. &cfg.num_shadow_reg_cfg);
  1499. /* translate from structure size to array size */
  1500. cfg.num_ce_tgt_cfg /= sizeof(struct CE_pipe_config);
  1501. cfg.num_ce_svc_pipe_cfg /= sizeof(struct service_to_pipe);
  1502. cfg.num_shadow_reg_cfg /= sizeof(struct shadow_reg_cfg);
  1503. if (QDF_GLOBAL_FTM_MODE == con_mode)
  1504. mode = ICNSS_FTM;
  1505. else if (WLAN_IS_EPPING_ENABLED(con_mode))
  1506. mode = ICNSS_EPPING;
  1507. else
  1508. mode = ICNSS_MISSION;
  1509. if (BYPASS_QMI)
  1510. return 0;
  1511. else
  1512. return icnss_wlan_enable(&cfg, mode, QWLAN_VERSIONSTR);
  1513. }
  1514. /**
  1515. * hif_ce_prepare_config() - load the correct static tables.
  1516. * @scn: hif context
  1517. *
  1518. * Epping uses different static attribute tables than mission mode.
  1519. */
  1520. void hif_ce_prepare_config(struct hif_softc *scn)
  1521. {
  1522. uint32_t mode = hif_get_conparam(scn);
  1523. /* if epping is enabled we need to use the epping configuration. */
  1524. if (WLAN_IS_EPPING_ENABLED(mode)) {
  1525. if (WLAN_IS_EPPING_IRQ(mode))
  1526. host_ce_config = host_ce_config_wlan_epping_irq;
  1527. else
  1528. host_ce_config = host_ce_config_wlan_epping_poll;
  1529. target_ce_config = target_ce_config_wlan_epping;
  1530. target_ce_config_sz = sizeof(target_ce_config_wlan_epping);
  1531. target_service_to_ce_map =
  1532. target_service_to_ce_map_wlan_epping;
  1533. target_service_to_ce_map_sz =
  1534. sizeof(target_service_to_ce_map_wlan_epping);
  1535. }
  1536. }
  1537. /**
  1538. * hif_ce_open() - do ce specific allocations
  1539. * @hif_sc: pointer to hif context
  1540. *
  1541. * return: 0 for success or QDF_STATUS_E_NOMEM
  1542. */
  1543. QDF_STATUS hif_ce_open(struct hif_softc *hif_sc)
  1544. {
  1545. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1546. qdf_spinlock_create(&hif_state->keep_awake_lock);
  1547. return QDF_STATUS_SUCCESS;
  1548. }
  1549. /**
  1550. * hif_ce_close() - do ce specific free
  1551. * @hif_sc: pointer to hif context
  1552. */
  1553. void hif_ce_close(struct hif_softc *hif_sc)
  1554. {
  1555. }
  1556. #ifdef WLAN_FEATURE_FASTPATH
  1557. /**
  1558. * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
  1559. * @scn: Handle to HIF context
  1560. *
  1561. * Return: true if fastpath is enabled else false.
  1562. */
  1563. bool ce_is_fastpath_enabled(struct hif_opaque_softc *hif_hdl)
  1564. {
  1565. return HIF_GET_SOFTC(hif_hdl)->fastpath_mode_on;
  1566. }
  1567. /**
  1568. * ce_is_fastpath_handler_registered() - return true for datapath CEs and if
  1569. * fastpath is enabled.
  1570. * @ce_state: handle to copy engine
  1571. *
  1572. * Return: true if fastpath handler is registered for datapath CE.
  1573. */
  1574. bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
  1575. {
  1576. if (ce_state->fastpath_handler)
  1577. return true;
  1578. else
  1579. return false;
  1580. }
  1581. /**
  1582. * hif_update_fastpath_recv_bufs_cnt() - Increments the Rx buf count by 1
  1583. * @scn: HIF handle
  1584. *
  1585. * Datapath Rx CEs are special case, where we reuse all the message buffers.
  1586. * Hence we have to post all the entries in the pipe, even, in the beginning
  1587. * unlike for other CE pipes where one less than dest_nentries are filled in
  1588. * the beginning.
  1589. *
  1590. * Return: None
  1591. */
  1592. void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *hif_hdl)
  1593. {
  1594. int pipe_num;
  1595. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  1596. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1597. if (scn->fastpath_mode_on == false)
  1598. return;
  1599. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1600. struct HIF_CE_pipe_info *pipe_info =
  1601. &hif_state->pipe_info[pipe_num];
  1602. struct CE_state *ce_state =
  1603. scn->ce_id_to_state[pipe_info->pipe_num];
  1604. if (ce_state->htt_rx_data)
  1605. atomic_inc(&pipe_info->recv_bufs_needed);
  1606. }
  1607. }
  1608. #else
  1609. bool ce_is_fastpath_enabled(struct hif_opaque_softc *scn)
  1610. {
  1611. return false;
  1612. }
  1613. void hif_update_fastpath_recv_bufs_cnt(struct hif_opaque_softc *scn)
  1614. {
  1615. }
  1616. bool ce_is_fastpath_handler_registered(struct CE_state *ce_state)
  1617. {
  1618. return false;
  1619. }
  1620. #endif /* WLAN_FEATURE_FASTPATH */
  1621. /**
  1622. * hif_unconfig_ce() - ensure resources from hif_config_ce are freed
  1623. * @hif_sc: hif context
  1624. *
  1625. * uses state variables to support cleaning up when hif_config_ce fails.
  1626. */
  1627. void hif_unconfig_ce(struct hif_softc *hif_sc)
  1628. {
  1629. int pipe_num;
  1630. struct HIF_CE_pipe_info *pipe_info;
  1631. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1632. for (pipe_num = 0; pipe_num < hif_sc->ce_count; pipe_num++) {
  1633. pipe_info = &hif_state->pipe_info[pipe_num];
  1634. if (pipe_info->ce_hdl) {
  1635. ce_unregister_irq(hif_state, (1 << pipe_num));
  1636. hif_sc->request_irq_done = false;
  1637. ce_fini(pipe_info->ce_hdl);
  1638. pipe_info->ce_hdl = NULL;
  1639. pipe_info->buf_sz = 0;
  1640. }
  1641. }
  1642. if (hif_sc->athdiag_procfs_inited) {
  1643. athdiag_procfs_remove();
  1644. hif_sc->athdiag_procfs_inited = false;
  1645. }
  1646. }
  1647. #ifdef CONFIG_BYPASS_QMI
  1648. #define FW_SHARED_MEM (2 * 1024 * 1024)
  1649. /**
  1650. * hif_post_static_buf_to_target() - post static buffer to WLAN FW
  1651. * @scn: pointer to HIF structure
  1652. *
  1653. * WLAN FW needs 2MB memory from DDR when QMI is disabled.
  1654. *
  1655. * Return: void
  1656. */
  1657. static void hif_post_static_buf_to_target(struct hif_softc *scn)
  1658. {
  1659. uint32_t CE_data;
  1660. uint8_t *g_fw_mem;
  1661. uint32_t phys_addr;
  1662. g_fw_mem = kzalloc(FW_SHARED_MEM, GFP_KERNEL);
  1663. CE_data = dma_map_single(scn->cdf_dev->dev, g_fw_mem,
  1664. FW_SHARED_MEM, CDF_DMA_FROM_DEVICE);
  1665. HIF_TRACE("g_fw_mem %p physical 0x%x\n", g_fw_mem, CE_data);
  1666. if (dma_mapping_error(scn->cdf_dev->dev, CE_data)) {
  1667. pr_err("DMA map failed\n");
  1668. return;
  1669. }
  1670. phys_addr = virt_to_phys((scn->mem + BYPASS_QMI_TEMP_REGISTER));
  1671. hif_write32_mb(scn->mem + BYPASS_QMI_TEMP_REGISTER, CE_data);
  1672. HIF_TRACE("Write phy address 0x%x into scratch reg %p phy add 0x%x",
  1673. CE_data, (scn->mem + BYPASS_QMI_TEMP_REGISTER), phys_addr);
  1674. }
  1675. #else
  1676. static inline void hif_post_static_buf_to_target(struct hif_softc *scn)
  1677. {
  1678. return;
  1679. }
  1680. #endif
  1681. /**
  1682. * hif_config_ce() - configure copy engines
  1683. * @scn: hif context
  1684. *
  1685. * Prepares fw, copy engine hardware and host sw according
  1686. * to the attributes selected by hif_ce_prepare_config.
  1687. *
  1688. * also calls athdiag_procfs_init
  1689. *
  1690. * return: 0 for success nonzero for failure.
  1691. */
  1692. int hif_config_ce(struct hif_softc *scn)
  1693. {
  1694. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1695. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1696. struct HIF_CE_pipe_info *pipe_info;
  1697. int pipe_num;
  1698. #ifdef ADRASTEA_SHADOW_REGISTERS
  1699. int i;
  1700. #endif
  1701. QDF_STATUS rv = QDF_STATUS_SUCCESS;
  1702. scn->notice_send = true;
  1703. hif_post_static_buf_to_target(scn);
  1704. hif_state->fw_indicator_address = FW_INDICATOR_ADDRESS;
  1705. hif_config_rri_on_ddr(scn);
  1706. /* During CE initializtion */
  1707. scn->ce_count = HOST_CE_COUNT;
  1708. for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
  1709. struct CE_attr *attr;
  1710. pipe_info = &hif_state->pipe_info[pipe_num];
  1711. pipe_info->pipe_num = pipe_num;
  1712. pipe_info->HIF_CE_state = hif_state;
  1713. attr = &host_ce_config[pipe_num];
  1714. pipe_info->ce_hdl = ce_init(scn, pipe_num, attr);
  1715. QDF_ASSERT(pipe_info->ce_hdl != NULL);
  1716. if (pipe_info->ce_hdl == NULL) {
  1717. rv = QDF_STATUS_E_FAILURE;
  1718. A_TARGET_ACCESS_UNLIKELY(scn);
  1719. goto err;
  1720. }
  1721. if (pipe_num == DIAG_CE_ID) {
  1722. /* Reserve the ultimate CE for
  1723. * Diagnostic Window support */
  1724. hif_state->ce_diag =
  1725. hif_state->pipe_info[scn->ce_count - 1].ce_hdl;
  1726. continue;
  1727. }
  1728. pipe_info->buf_sz = (qdf_size_t) (attr->src_sz_max);
  1729. qdf_spinlock_create(&pipe_info->recv_bufs_needed_lock);
  1730. if (attr->dest_nentries > 0) {
  1731. atomic_set(&pipe_info->recv_bufs_needed,
  1732. init_buffer_count(attr->dest_nentries - 1));
  1733. } else {
  1734. atomic_set(&pipe_info->recv_bufs_needed, 0);
  1735. }
  1736. ce_tasklet_init(hif_state, (1 << pipe_num));
  1737. ce_register_irq(hif_state, (1 << pipe_num));
  1738. scn->request_irq_done = true;
  1739. }
  1740. if (athdiag_procfs_init(scn) != 0) {
  1741. A_TARGET_ACCESS_UNLIKELY(scn);
  1742. goto err;
  1743. }
  1744. scn->athdiag_procfs_inited = true;
  1745. HIF_INFO_MED("%s: ce_init done", __func__);
  1746. init_tasklet_workers(hif_hdl);
  1747. HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
  1748. #ifdef ADRASTEA_SHADOW_REGISTERS
  1749. HIF_ERROR("Using Shadow Registers instead of CE Registers\n");
  1750. for (i = 0; i < NUM_SHADOW_REGISTERS; i++) {
  1751. HIF_ERROR("%s Shadow Register%d is mapped to address %x\n",
  1752. __func__, i,
  1753. (A_TARGET_READ(scn, (SHADOW_ADDRESS(i))) << 2));
  1754. }
  1755. #endif
  1756. return rv != QDF_STATUS_SUCCESS;
  1757. err:
  1758. /* Failure, so clean up */
  1759. hif_unconfig_ce(scn);
  1760. HIF_TRACE("%s: X, ret = %d\n", __func__, rv);
  1761. return QDF_STATUS_SUCCESS != QDF_STATUS_E_FAILURE;
  1762. }
  1763. #ifdef WLAN_FEATURE_FASTPATH
  1764. /**
  1765. * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
  1766. * @handler: Callback funtcion
  1767. * @context: handle for callback function
  1768. *
  1769. * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
  1770. */
  1771. int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
  1772. {
  1773. struct hif_softc *scn =
  1774. (struct hif_softc *)cds_get_context(QDF_MODULE_ID_HIF);
  1775. struct CE_state *ce_state;
  1776. int i;
  1777. QDF_ASSERT(scn != NULL);
  1778. if (!scn->fastpath_mode_on) {
  1779. HIF_WARN("Fastpath mode disabled\n");
  1780. return QDF_STATUS_E_FAILURE;
  1781. }
  1782. for (i = 0; i < CE_COUNT_MAX; i++) {
  1783. ce_state = scn->ce_id_to_state[i];
  1784. if (ce_state->htt_rx_data) {
  1785. ce_state->fastpath_handler = handler;
  1786. ce_state->context = context;
  1787. }
  1788. }
  1789. return QDF_STATUS_SUCCESS;
  1790. }
  1791. #else
  1792. int hif_ce_fastpath_cb_register(fastpath_msg_handler handler, void *context)
  1793. {
  1794. return QDF_STATUS_SUCCESS;
  1795. }
  1796. #endif
  1797. #ifdef IPA_OFFLOAD
  1798. /**
  1799. * hif_ipa_get_ce_resource() - get uc resource on hif
  1800. * @scn: bus context
  1801. * @ce_sr_base_paddr: copyengine source ring base physical address
  1802. * @ce_sr_ring_size: copyengine source ring size
  1803. * @ce_reg_paddr: copyengine register physical address
  1804. *
  1805. * IPA micro controller data path offload feature enabled,
  1806. * HIF should release copy engine related resource information to IPA UC
  1807. * IPA UC will access hardware resource with released information
  1808. *
  1809. * Return: None
  1810. */
  1811. void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
  1812. qdf_dma_addr_t *ce_sr_base_paddr,
  1813. uint32_t *ce_sr_ring_size,
  1814. qdf_dma_addr_t *ce_reg_paddr)
  1815. {
  1816. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1817. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1818. struct HIF_CE_pipe_info *pipe_info =
  1819. &(hif_state->pipe_info[HIF_PCI_IPA_UC_ASSIGNED_CE]);
  1820. struct CE_handle *ce_hdl = pipe_info->ce_hdl;
  1821. ce_ipa_get_resource(ce_hdl, ce_sr_base_paddr, ce_sr_ring_size,
  1822. ce_reg_paddr);
  1823. return;
  1824. }
  1825. #endif /* IPA_OFFLOAD */
  1826. #ifdef ADRASTEA_SHADOW_REGISTERS
  1827. /*
  1828. Current shadow register config
  1829. -----------------------------------------------------------
  1830. Shadow Register | CE | src/dst write index
  1831. -----------------------------------------------------------
  1832. 0 | 0 | src
  1833. 1 No Config - Doesn't point to anything
  1834. 2 No Config - Doesn't point to anything
  1835. 3 | 3 | src
  1836. 4 | 4 | src
  1837. 5 | 5 | src
  1838. 6 No Config - Doesn't point to anything
  1839. 7 | 7 | src
  1840. 8 No Config - Doesn't point to anything
  1841. 9 No Config - Doesn't point to anything
  1842. 10 No Config - Doesn't point to anything
  1843. 11 No Config - Doesn't point to anything
  1844. -----------------------------------------------------------
  1845. 12 No Config - Doesn't point to anything
  1846. 13 | 1 | dst
  1847. 14 | 2 | dst
  1848. 15 No Config - Doesn't point to anything
  1849. 16 No Config - Doesn't point to anything
  1850. 17 No Config - Doesn't point to anything
  1851. 18 No Config - Doesn't point to anything
  1852. 19 | 7 | dst
  1853. 20 | 8 | dst
  1854. 21 No Config - Doesn't point to anything
  1855. 22 No Config - Doesn't point to anything
  1856. 23 No Config - Doesn't point to anything
  1857. -----------------------------------------------------------
  1858. ToDo - Move shadow register config to following in the future
  1859. This helps free up a block of shadow registers towards the end.
  1860. Can be used for other purposes
  1861. -----------------------------------------------------------
  1862. Shadow Register | CE | src/dst write index
  1863. -----------------------------------------------------------
  1864. 0 | 0 | src
  1865. 1 | 3 | src
  1866. 2 | 4 | src
  1867. 3 | 5 | src
  1868. 4 | 7 | src
  1869. -----------------------------------------------------------
  1870. 5 | 1 | dst
  1871. 6 | 2 | dst
  1872. 7 | 7 | dst
  1873. 8 | 8 | dst
  1874. -----------------------------------------------------------
  1875. 9 No Config - Doesn't point to anything
  1876. 12 No Config - Doesn't point to anything
  1877. 13 No Config - Doesn't point to anything
  1878. 14 No Config - Doesn't point to anything
  1879. 15 No Config - Doesn't point to anything
  1880. 16 No Config - Doesn't point to anything
  1881. 17 No Config - Doesn't point to anything
  1882. 18 No Config - Doesn't point to anything
  1883. 19 No Config - Doesn't point to anything
  1884. 20 No Config - Doesn't point to anything
  1885. 21 No Config - Doesn't point to anything
  1886. 22 No Config - Doesn't point to anything
  1887. 23 No Config - Doesn't point to anything
  1888. -----------------------------------------------------------
  1889. */
  1890. u32 shadow_sr_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
  1891. {
  1892. u32 addr = 0;
  1893. switch (COPY_ENGINE_ID(ctrl_addr)) {
  1894. case 0:
  1895. addr = SHADOW_VALUE0;
  1896. break;
  1897. case 3:
  1898. addr = SHADOW_VALUE3;
  1899. break;
  1900. case 4:
  1901. addr = SHADOW_VALUE4;
  1902. break;
  1903. case 5:
  1904. addr = SHADOW_VALUE5;
  1905. break;
  1906. case 7:
  1907. addr = SHADOW_VALUE7;
  1908. break;
  1909. default:
  1910. HIF_ERROR("invalid CE ctrl_addr\n");
  1911. QDF_ASSERT(0);
  1912. }
  1913. return addr;
  1914. }
  1915. u32 shadow_dst_wr_ind_addr(struct hif_softc *scn, u32 ctrl_addr)
  1916. {
  1917. u32 addr = 0;
  1918. switch (COPY_ENGINE_ID(ctrl_addr)) {
  1919. case 1:
  1920. addr = SHADOW_VALUE13;
  1921. break;
  1922. case 2:
  1923. addr = SHADOW_VALUE14;
  1924. break;
  1925. case 7:
  1926. addr = SHADOW_VALUE19;
  1927. break;
  1928. case 8:
  1929. addr = SHADOW_VALUE20;
  1930. break;
  1931. default:
  1932. HIF_ERROR("invalid CE ctrl_addr\n");
  1933. QDF_ASSERT(0);
  1934. }
  1935. return addr;
  1936. }
  1937. #endif
  1938. #if defined(FEATURE_LRO)
  1939. /**
  1940. * ce_lro_flush_cb_register() - register the LRO flush
  1941. * callback
  1942. * @scn: HIF context
  1943. * @handler: callback function
  1944. * @data: opaque data pointer to be passed back
  1945. *
  1946. * Store the LRO flush callback provided
  1947. *
  1948. * Return: none
  1949. */
  1950. void ce_lro_flush_cb_register(struct hif_opaque_softc *hif_hdl,
  1951. void (handler)(void *), void *data)
  1952. {
  1953. int i;
  1954. struct CE_state *ce_state;
  1955. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  1956. QDF_ASSERT(scn != NULL);
  1957. for (i = 0; i < CE_COUNT_MAX; i++) {
  1958. ce_state = scn->ce_id_to_state[i];
  1959. if (ce_state->htt_rx_data) {
  1960. ce_state->lro_flush_cb = handler;
  1961. ce_state->lro_data = data;
  1962. }
  1963. }
  1964. }
  1965. /**
  1966. * ce_lro_flush_cb_deregister() - deregister the LRO flush
  1967. * callback
  1968. * @scn: HIF context
  1969. *
  1970. * Remove the LRO flush callback
  1971. *
  1972. * Return: none
  1973. */
  1974. void ce_lro_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
  1975. {
  1976. int i;
  1977. struct CE_state *ce_state;
  1978. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  1979. QDF_ASSERT(scn != NULL);
  1980. for (i = 0; i < CE_COUNT_MAX; i++) {
  1981. ce_state = scn->ce_id_to_state[i];
  1982. if (ce_state->htt_rx_data) {
  1983. ce_state->lro_flush_cb = NULL;
  1984. ce_state->lro_data = NULL;
  1985. }
  1986. }
  1987. }
  1988. #endif
  1989. /**
  1990. * hif_map_service_to_pipe() - returns the ce ids pertaining to
  1991. * this service
  1992. * @scn: hif_softc pointer.
  1993. * @svc_id: Service ID for which the mapping is needed.
  1994. * @ul_pipe: address of the container in which ul pipe is returned.
  1995. * @dl_pipe: address of the container in which dl pipe is returned.
  1996. * @ul_is_polled: address of the container in which a bool
  1997. * indicating if the UL CE for this service
  1998. * is polled is returned.
  1999. * @dl_is_polled: address of the container in which a bool
  2000. * indicating if the DL CE for this service
  2001. * is polled is returned.
  2002. *
  2003. * Return: Indicates whether this operation was successful.
  2004. */
  2005. int hif_map_service_to_pipe(struct hif_opaque_softc *hif_hdl, uint16_t svc_id,
  2006. uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
  2007. int *dl_is_polled)
  2008. {
  2009. int status = QDF_STATUS_SUCCESS;
  2010. unsigned int i;
  2011. struct service_to_pipe element;
  2012. struct service_to_pipe *tgt_svc_map_to_use;
  2013. size_t sz_tgt_svc_map_to_use;
  2014. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  2015. uint32_t mode = hif_get_conparam(scn);
  2016. if (WLAN_IS_EPPING_ENABLED(mode)) {
  2017. tgt_svc_map_to_use = target_service_to_ce_map_wlan_epping;
  2018. sz_tgt_svc_map_to_use =
  2019. sizeof(target_service_to_ce_map_wlan_epping);
  2020. } else {
  2021. tgt_svc_map_to_use = target_service_to_ce_map_wlan;
  2022. sz_tgt_svc_map_to_use = sizeof(target_service_to_ce_map_wlan);
  2023. }
  2024. *dl_is_polled = 0; /* polling for received messages not supported */
  2025. for (i = 0; i < (sz_tgt_svc_map_to_use/sizeof(element)); i++) {
  2026. memcpy(&element, &tgt_svc_map_to_use[i], sizeof(element));
  2027. if (element.service_id == svc_id) {
  2028. if (element.pipedir == PIPEDIR_OUT)
  2029. *ul_pipe = element.pipenum;
  2030. else if (element.pipedir == PIPEDIR_IN)
  2031. *dl_pipe = element.pipenum;
  2032. }
  2033. }
  2034. *ul_is_polled =
  2035. (host_ce_config[*ul_pipe].flags & CE_ATTR_DISABLE_INTR) != 0;
  2036. return status;
  2037. }
  2038. #ifdef SHADOW_REG_DEBUG
  2039. inline uint32_t DEBUG_CE_SRC_RING_READ_IDX_GET(struct hif_softc *scn,
  2040. uint32_t CE_ctrl_addr)
  2041. {
  2042. uint32_t read_from_hw, srri_from_ddr = 0;
  2043. read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_SRRI_ADDRESS);
  2044. srri_from_ddr = SRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
  2045. if (read_from_hw != srri_from_ddr) {
  2046. HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
  2047. srri_from_ddr, read_from_hw,
  2048. CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
  2049. QDF_ASSERT(0);
  2050. }
  2051. return srri_from_ddr;
  2052. }
  2053. inline uint32_t DEBUG_CE_DEST_RING_READ_IDX_GET(struct hif_softc *scn,
  2054. uint32_t CE_ctrl_addr)
  2055. {
  2056. uint32_t read_from_hw, drri_from_ddr = 0;
  2057. read_from_hw = A_TARGET_READ(scn, CE_ctrl_addr + CURRENT_DRRI_ADDRESS);
  2058. drri_from_ddr = DRRI_FROM_DDR_ADDR(VADDR_FOR_CE(scn, CE_ctrl_addr));
  2059. if (read_from_hw != drri_from_ddr) {
  2060. HIF_ERROR("error: read from ddr = %d actual read from register = %d, CE_MISC_INT_STATUS_GET = 0x%x\n",
  2061. drri_from_ddr, read_from_hw,
  2062. CE_MISC_INT_STATUS_GET(scn, CE_ctrl_addr));
  2063. QDF_ASSERT(0);
  2064. }
  2065. return drri_from_ddr;
  2066. }
  2067. #endif
  2068. #ifdef ADRASTEA_RRI_ON_DDR
  2069. /**
  2070. * hif_get_src_ring_read_index(): Called to get the SRRI
  2071. *
  2072. * @scn: hif_softc pointer
  2073. * @CE_ctrl_addr: base address of the CE whose RRI is to be read
  2074. *
  2075. * This function returns the SRRI to the caller. For CEs that
  2076. * dont have interrupts enabled, we look at the DDR based SRRI
  2077. *
  2078. * Return: SRRI
  2079. */
  2080. inline unsigned int hif_get_src_ring_read_index(struct hif_softc *scn,
  2081. uint32_t CE_ctrl_addr)
  2082. {
  2083. struct CE_attr attr;
  2084. attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
  2085. if (attr.flags & CE_ATTR_DISABLE_INTR)
  2086. return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
  2087. else
  2088. return A_TARGET_READ(scn,
  2089. (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
  2090. }
  2091. /**
  2092. * hif_get_dst_ring_read_index(): Called to get the DRRI
  2093. *
  2094. * @scn: hif_softc pointer
  2095. * @CE_ctrl_addr: base address of the CE whose RRI is to be read
  2096. *
  2097. * This function returns the DRRI to the caller. For CEs that
  2098. * dont have interrupts enabled, we look at the DDR based DRRI
  2099. *
  2100. * Return: DRRI
  2101. */
  2102. inline unsigned int hif_get_dst_ring_read_index(struct hif_softc *scn,
  2103. uint32_t CE_ctrl_addr)
  2104. {
  2105. struct CE_attr attr;
  2106. attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
  2107. if (attr.flags & CE_ATTR_DISABLE_INTR)
  2108. return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
  2109. else
  2110. return A_TARGET_READ(scn,
  2111. (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
  2112. }
  2113. /**
  2114. * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
  2115. *
  2116. * @scn: hif_softc pointer
  2117. *
  2118. * This function allocates non cached memory on ddr and sends
  2119. * the physical address of this memory to the CE hardware. The
  2120. * hardware updates the RRI on this particular location.
  2121. *
  2122. * Return: None
  2123. */
  2124. static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
  2125. {
  2126. unsigned int i;
  2127. qdf_dma_addr_t paddr_rri_on_ddr;
  2128. uint32_t high_paddr, low_paddr;
  2129. scn->vaddr_rri_on_ddr =
  2130. (uint32_t *)qdf_mem_alloc_consistent(scn->qdf_dev,
  2131. scn->qdf_dev->dev, (CE_COUNT*sizeof(uint32_t)),
  2132. &paddr_rri_on_ddr);
  2133. low_paddr = BITS0_TO_31(paddr_rri_on_ddr);
  2134. high_paddr = BITS32_TO_35(paddr_rri_on_ddr);
  2135. HIF_ERROR("%s using srri and drri from DDR\n", __func__);
  2136. WRITE_CE_DDR_ADDRESS_FOR_RRI_LOW(scn, low_paddr);
  2137. WRITE_CE_DDR_ADDRESS_FOR_RRI_HIGH(scn, high_paddr);
  2138. for (i = 0; i < CE_COUNT; i++)
  2139. CE_IDX_UPD_EN_SET(scn, CE_BASE_ADDRESS(i));
  2140. qdf_mem_zero(scn->vaddr_rri_on_ddr, CE_COUNT*sizeof(uint32_t));
  2141. return;
  2142. }
  2143. #else
  2144. /**
  2145. * hif_config_rri_on_ddr(): Configure the RRI on DDR mechanism
  2146. *
  2147. * @scn: hif_softc pointer
  2148. *
  2149. * This is a dummy implementation for platforms that don't
  2150. * support this functionality.
  2151. *
  2152. * Return: None
  2153. */
  2154. static inline void hif_config_rri_on_ddr(struct hif_softc *scn)
  2155. {
  2156. return;
  2157. }
  2158. #endif
  2159. /**
  2160. * hif_dump_ce_registers() - dump ce registers
  2161. * @scn: hif_opaque_softc pointer.
  2162. *
  2163. * Output the copy engine registers
  2164. *
  2165. * Return: 0 for success or error code
  2166. */
  2167. int hif_dump_ce_registers(struct hif_softc *scn)
  2168. {
  2169. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  2170. uint32_t ce_reg_address = CE0_BASE_ADDRESS;
  2171. uint32_t ce_reg_values[CE_COUNT_MAX][CE_USEFUL_SIZE >> 2];
  2172. uint32_t ce_reg_word_size = CE_USEFUL_SIZE >> 2;
  2173. uint16_t i;
  2174. QDF_STATUS status;
  2175. for (i = 0; i < CE_COUNT_MAX; i++, ce_reg_address += CE_OFFSET) {
  2176. status = hif_diag_read_mem(hif_hdl, ce_reg_address,
  2177. (uint8_t *) &ce_reg_values[i][0],
  2178. ce_reg_word_size * sizeof(uint32_t));
  2179. if (status != QDF_STATUS_SUCCESS) {
  2180. HIF_ERROR("Dumping CE register failed!");
  2181. return -EACCES;
  2182. }
  2183. HIF_ERROR("CE%d Registers:", i);
  2184. qdf_trace_hex_dump(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG,
  2185. (uint8_t *) &ce_reg_values[i][0],
  2186. ce_reg_word_size * sizeof(uint32_t));
  2187. }
  2188. return 0;
  2189. }