hif_main.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include "targcfg.h"
  19. #include "qdf_lock.h"
  20. #include "qdf_status.h"
  21. #include "qdf_status.h"
  22. #include <qdf_atomic.h> /* qdf_atomic_read */
  23. #include <targaddrs.h>
  24. #include "hif_io32.h"
  25. #include <hif.h>
  26. #include <target_type.h>
  27. #include "regtable.h"
  28. #define ATH_MODULE_NAME hif
  29. #include <a_debug.h>
  30. #include "hif_main.h"
  31. #include "hif_hw_version.h"
  32. #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  33. defined(HIF_IPCI))
  34. #include "ce_tasklet.h"
  35. #include "ce_api.h"
  36. #endif
  37. #include "qdf_trace.h"
  38. #include "qdf_status.h"
  39. #include "hif_debug.h"
  40. #include "mp_dev.h"
  41. #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  42. defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574)
  43. #include "hal_api.h"
  44. #endif
  45. #include "hif_napi.h"
  46. #include "hif_unit_test_suspend_i.h"
  47. #include "qdf_module.h"
  48. #ifdef HIF_CE_LOG_INFO
  49. #include <qdf_notifier.h>
  50. #include <qdf_hang_event_notifier.h>
  51. #endif
  52. #include <linux/cpumask.h>
  53. #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
  54. #include <pld_common.h>
  55. #endif
  56. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
  57. {
  58. hif_trigger_dump(hif_ctx, cmd_id, start);
  59. }
  60. /**
  61. * hif_get_target_id(): hif_get_target_id
  62. *
  63. * Return the virtual memory base address to the caller
  64. *
  65. * @scn: hif_softc
  66. *
  67. * Return: A_target_id_t
  68. */
  69. A_target_id_t hif_get_target_id(struct hif_softc *scn)
  70. {
  71. return scn->mem;
  72. }
  73. /**
  74. * hif_get_targetdef(): hif_get_targetdef
  75. * @scn: scn
  76. *
  77. * Return: void *
  78. */
  79. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
  80. {
  81. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  82. return scn->targetdef;
  83. }
  84. #ifdef FORCE_WAKE
  85. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  86. bool init_phase)
  87. {
  88. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  89. if (ce_srng_based(scn))
  90. hal_set_init_phase(scn->hal_soc, init_phase);
  91. }
  92. #endif /* FORCE_WAKE */
  93. #ifdef HIF_IPCI
  94. void hif_shutdown_notifier_cb(void *hif_ctx)
  95. {
  96. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  97. scn->recovery = true;
  98. }
  99. #endif
  100. /**
  101. * hif_vote_link_down(): unvote for link up
  102. *
  103. * Call hif_vote_link_down to release a previous request made using
  104. * hif_vote_link_up. A hif_vote_link_down call should only be made
  105. * after a corresponding hif_vote_link_up, otherwise you could be
  106. * negating a vote from another source. When no votes are present
  107. * hif will not guarantee the linkstate after hif_bus_suspend.
  108. *
  109. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  110. * and initialization deinitialization sequencences.
  111. *
  112. * Return: n/a
  113. */
  114. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
  115. {
  116. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  117. QDF_BUG(scn);
  118. scn->linkstate_vote--;
  119. hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
  120. if (scn->linkstate_vote == 0)
  121. hif_bus_prevent_linkdown(scn, false);
  122. }
  123. /**
  124. * hif_vote_link_up(): vote to prevent bus from suspending
  125. *
  126. * Makes hif guarantee that fw can message the host normally
  127. * durring suspend.
  128. *
  129. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  130. * and initialization deinitialization sequencences.
  131. *
  132. * Return: n/a
  133. */
  134. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
  135. {
  136. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  137. QDF_BUG(scn);
  138. scn->linkstate_vote++;
  139. hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
  140. if (scn->linkstate_vote == 1)
  141. hif_bus_prevent_linkdown(scn, true);
  142. }
  143. /**
  144. * hif_can_suspend_link(): query if hif is permitted to suspend the link
  145. *
  146. * Hif will ensure that the link won't be suspended if the upperlayers
  147. * don't want it to.
  148. *
  149. * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
  150. * we don't need extra locking to ensure votes dont change while
  151. * we are in the process of suspending or resuming.
  152. *
  153. * Return: false if hif will guarantee link up durring suspend.
  154. */
  155. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
  156. {
  157. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  158. QDF_BUG(scn);
  159. return scn->linkstate_vote == 0;
  160. }
  161. /**
  162. * hif_hia_item_address(): hif_hia_item_address
  163. * @target_type: target_type
  164. * @item_offset: item_offset
  165. *
  166. * Return: n/a
  167. */
  168. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
  169. {
  170. switch (target_type) {
  171. case TARGET_TYPE_AR6002:
  172. return AR6002_HOST_INTEREST_ADDRESS + item_offset;
  173. case TARGET_TYPE_AR6003:
  174. return AR6003_HOST_INTEREST_ADDRESS + item_offset;
  175. case TARGET_TYPE_AR6004:
  176. return AR6004_HOST_INTEREST_ADDRESS + item_offset;
  177. case TARGET_TYPE_AR6006:
  178. return AR6006_HOST_INTEREST_ADDRESS + item_offset;
  179. case TARGET_TYPE_AR9888:
  180. return AR9888_HOST_INTEREST_ADDRESS + item_offset;
  181. case TARGET_TYPE_AR6320:
  182. case TARGET_TYPE_AR6320V2:
  183. return AR6320_HOST_INTEREST_ADDRESS + item_offset;
  184. case TARGET_TYPE_ADRASTEA:
  185. /* ADRASTEA doesn't have a host interest address */
  186. ASSERT(0);
  187. return 0;
  188. case TARGET_TYPE_AR900B:
  189. return AR900B_HOST_INTEREST_ADDRESS + item_offset;
  190. case TARGET_TYPE_QCA9984:
  191. return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
  192. case TARGET_TYPE_QCA9888:
  193. return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
  194. case TARGET_TYPE_IPQ4019:
  195. return IPQ4019_HOST_INTEREST_ADDRESS + item_offset;
  196. default:
  197. ASSERT(0);
  198. return 0;
  199. }
  200. }
  201. /**
  202. * hif_max_num_receives_reached() - check max receive is reached
  203. * @scn: HIF Context
  204. * @count: unsigned int.
  205. *
  206. * Output check status as bool
  207. *
  208. * Return: bool
  209. */
  210. bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
  211. {
  212. if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
  213. return count > 120;
  214. else
  215. return count > MAX_NUM_OF_RECEIVES;
  216. }
  217. /**
  218. * init_buffer_count() - initial buffer count
  219. * @maxSize: qdf_size_t
  220. *
  221. * routine to modify the initial buffer count to be allocated on an os
  222. * platform basis. Platform owner will need to modify this as needed
  223. *
  224. * Return: qdf_size_t
  225. */
  226. qdf_size_t init_buffer_count(qdf_size_t maxSize)
  227. {
  228. return maxSize;
  229. }
  230. /**
  231. * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
  232. * @hif_ctx: hif context
  233. * @htc_htt_tx_endpoint: htt_tx_endpoint
  234. *
  235. * Return: void
  236. */
  237. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  238. int htc_htt_tx_endpoint)
  239. {
  240. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  241. if (!scn) {
  242. hif_err("scn or scn->hif_sc is NULL!");
  243. return;
  244. }
  245. scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
  246. }
  247. qdf_export_symbol(hif_save_htc_htt_config_endpoint);
  248. static const struct qwlan_hw qwlan_hw_list[] = {
  249. {
  250. .id = AR6320_REV1_VERSION,
  251. .subid = 0,
  252. .name = "QCA6174_REV1",
  253. },
  254. {
  255. .id = AR6320_REV1_1_VERSION,
  256. .subid = 0x1,
  257. .name = "QCA6174_REV1_1",
  258. },
  259. {
  260. .id = AR6320_REV1_3_VERSION,
  261. .subid = 0x2,
  262. .name = "QCA6174_REV1_3",
  263. },
  264. {
  265. .id = AR6320_REV2_1_VERSION,
  266. .subid = 0x4,
  267. .name = "QCA6174_REV2_1",
  268. },
  269. {
  270. .id = AR6320_REV2_1_VERSION,
  271. .subid = 0x5,
  272. .name = "QCA6174_REV2_2",
  273. },
  274. {
  275. .id = AR6320_REV3_VERSION,
  276. .subid = 0x6,
  277. .name = "QCA6174_REV2.3",
  278. },
  279. {
  280. .id = AR6320_REV3_VERSION,
  281. .subid = 0x8,
  282. .name = "QCA6174_REV3",
  283. },
  284. {
  285. .id = AR6320_REV3_VERSION,
  286. .subid = 0x9,
  287. .name = "QCA6174_REV3_1",
  288. },
  289. {
  290. .id = AR6320_REV3_2_VERSION,
  291. .subid = 0xA,
  292. .name = "AR6320_REV3_2_VERSION",
  293. },
  294. {
  295. .id = QCA6390_V1,
  296. .subid = 0x0,
  297. .name = "QCA6390_V1",
  298. },
  299. {
  300. .id = QCA6490_V1,
  301. .subid = 0x0,
  302. .name = "QCA6490_V1",
  303. },
  304. {
  305. .id = WCN3990_v1,
  306. .subid = 0x0,
  307. .name = "WCN3990_V1",
  308. },
  309. {
  310. .id = WCN3990_v2,
  311. .subid = 0x0,
  312. .name = "WCN3990_V2",
  313. },
  314. {
  315. .id = WCN3990_v2_1,
  316. .subid = 0x0,
  317. .name = "WCN3990_V2.1",
  318. },
  319. {
  320. .id = WCN3998,
  321. .subid = 0x0,
  322. .name = "WCN3998",
  323. },
  324. {
  325. .id = QCA9379_REV1_VERSION,
  326. .subid = 0xC,
  327. .name = "QCA9379_REV1",
  328. },
  329. {
  330. .id = QCA9379_REV1_VERSION,
  331. .subid = 0xD,
  332. .name = "QCA9379_REV1_1",
  333. },
  334. {
  335. .id = WCN7850_V1,
  336. .subid = 0xE,
  337. .name = "WCN7850_V1",
  338. }
  339. };
  340. /**
  341. * hif_get_hw_name(): get a human readable name for the hardware
  342. * @info: Target Info
  343. *
  344. * Return: human readable name for the underlying wifi hardware.
  345. */
  346. static const char *hif_get_hw_name(struct hif_target_info *info)
  347. {
  348. int i;
  349. if (info->hw_name)
  350. return info->hw_name;
  351. for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
  352. if (info->target_version == qwlan_hw_list[i].id &&
  353. info->target_revision == qwlan_hw_list[i].subid) {
  354. return qwlan_hw_list[i].name;
  355. }
  356. }
  357. info->hw_name = qdf_mem_malloc(64);
  358. if (!info->hw_name)
  359. return "Unknown Device (nomem)";
  360. i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
  361. info->target_version);
  362. if (i < 0)
  363. return "Unknown Device (snprintf failure)";
  364. else
  365. return info->hw_name;
  366. }
  367. /**
  368. * hif_get_hw_info(): hif_get_hw_info
  369. * @scn: scn
  370. * @version: version
  371. * @revision: revision
  372. *
  373. * Return: n/a
  374. */
  375. void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
  376. const char **target_name)
  377. {
  378. struct hif_target_info *info = hif_get_target_info_handle(scn);
  379. struct hif_softc *sc = HIF_GET_SOFTC(scn);
  380. if (sc->bus_type == QDF_BUS_TYPE_USB)
  381. hif_usb_get_hw_info(sc);
  382. *version = info->target_version;
  383. *revision = info->target_revision;
  384. *target_name = hif_get_hw_name(info);
  385. }
  386. /**
  387. * hif_get_dev_ba(): API to get device base address.
  388. * @scn: scn
  389. * @version: version
  390. * @revision: revision
  391. *
  392. * Return: n/a
  393. */
  394. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
  395. {
  396. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  397. return scn->mem;
  398. }
  399. qdf_export_symbol(hif_get_dev_ba);
  400. /**
  401. * hif_get_dev_ba_ce(): API to get device ce base address.
  402. * @scn: scn
  403. *
  404. * Return: dev mem base address for CE
  405. */
  406. void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
  407. {
  408. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  409. return scn->mem_ce;
  410. }
  411. qdf_export_symbol(hif_get_dev_ba_ce);
  412. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  413. /**
  414. * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
  415. * @scn: hif context
  416. * @psoc: psoc objmgr handle
  417. *
  418. * Return: None
  419. */
  420. static inline
  421. void hif_get_cfg_from_psoc(struct hif_softc *scn,
  422. struct wlan_objmgr_psoc *psoc)
  423. {
  424. if (psoc) {
  425. scn->ini_cfg.ce_status_ring_timer_threshold =
  426. cfg_get(psoc,
  427. CFG_CE_STATUS_RING_TIMER_THRESHOLD);
  428. scn->ini_cfg.ce_status_ring_batch_count_threshold =
  429. cfg_get(psoc,
  430. CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
  431. }
  432. }
  433. #else
  434. static inline
  435. void hif_get_cfg_from_psoc(struct hif_softc *scn,
  436. struct wlan_objmgr_psoc *psoc)
  437. {
  438. }
  439. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  440. #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
  441. /**
  442. * hif_recovery_notifier_cb - Recovery notifier callback to log
  443. * hang event data
  444. * @block: notifier block
  445. * @state: state
  446. * @data: notifier data
  447. *
  448. * Return: status
  449. */
  450. static
  451. int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
  452. void *data)
  453. {
  454. struct qdf_notifer_data *notif_data = data;
  455. qdf_notif_block *notif_block;
  456. struct hif_softc *hif_handle;
  457. bool bus_id_invalid;
  458. if (!data || !block)
  459. return -EINVAL;
  460. notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
  461. hif_handle = notif_block->priv_data;
  462. if (!hif_handle)
  463. return -EINVAL;
  464. bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
  465. &notif_data->offset);
  466. if (bus_id_invalid)
  467. return NOTIFY_STOP_MASK;
  468. hif_log_ce_info(hif_handle, notif_data->hang_data,
  469. &notif_data->offset);
  470. return 0;
  471. }
  472. /**
  473. * hif_register_recovery_notifier - Register hif recovery notifier
  474. * @hif_handle: hif handle
  475. *
  476. * Return: status
  477. */
  478. static
  479. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  480. {
  481. qdf_notif_block *hif_notifier;
  482. if (!hif_handle)
  483. return QDF_STATUS_E_FAILURE;
  484. hif_notifier = &hif_handle->hif_recovery_notifier;
  485. hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
  486. hif_notifier->priv_data = hif_handle;
  487. return qdf_hang_event_register_notifier(hif_notifier);
  488. }
  489. /**
  490. * hif_unregister_recovery_notifier - Un-register hif recovery notifier
  491. * @hif_handle: hif handle
  492. *
  493. * Return: status
  494. */
  495. static
  496. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  497. {
  498. qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
  499. return qdf_hang_event_unregister_notifier(hif_notifier);
  500. }
  501. #else
  502. static inline
  503. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  504. {
  505. return QDF_STATUS_SUCCESS;
  506. }
  507. static inline
  508. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  509. {
  510. return QDF_STATUS_SUCCESS;
  511. }
  512. #endif
  513. #ifdef HIF_CPU_PERF_AFFINE_MASK
  514. /**
  515. * __hif_cpu_hotplug_notify() - CPU hotplug event handler
  516. * @cpu: CPU Id of the CPU generating the event
  517. * @cpu_up: true if the CPU is online
  518. *
  519. * Return: None
  520. */
  521. static void __hif_cpu_hotplug_notify(void *context,
  522. uint32_t cpu, bool cpu_up)
  523. {
  524. struct hif_softc *scn = context;
  525. if (!scn)
  526. return;
  527. if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
  528. return;
  529. if (cpu_up) {
  530. hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
  531. hif_debug("Setting affinity for online CPU: %d", cpu);
  532. } else {
  533. hif_debug("Skip setting affinity for offline CPU: %d", cpu);
  534. }
  535. }
  536. /**
  537. * hif_cpu_hotplug_notify - cpu core up/down notification
  538. * handler
  539. * @cpu: CPU generating the event
  540. * @cpu_up: true if the CPU is online
  541. *
  542. * Return: None
  543. */
  544. static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
  545. {
  546. struct qdf_op_sync *op_sync;
  547. if (qdf_op_protect(&op_sync))
  548. return;
  549. __hif_cpu_hotplug_notify(context, cpu, cpu_up);
  550. qdf_op_unprotect(op_sync);
  551. }
  552. static void hif_cpu_online_cb(void *context, uint32_t cpu)
  553. {
  554. hif_cpu_hotplug_notify(context, cpu, true);
  555. }
  556. static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
  557. {
  558. hif_cpu_hotplug_notify(context, cpu, false);
  559. }
  560. static void hif_cpuhp_register(struct hif_softc *scn)
  561. {
  562. if (!scn) {
  563. hif_info_high("cannot register hotplug notifiers");
  564. return;
  565. }
  566. qdf_cpuhp_register(&scn->cpuhp_event_handle,
  567. scn,
  568. hif_cpu_online_cb,
  569. hif_cpu_before_offline_cb);
  570. }
  571. static void hif_cpuhp_unregister(struct hif_softc *scn)
  572. {
  573. if (!scn) {
  574. hif_info_high("cannot unregister hotplug notifiers");
  575. return;
  576. }
  577. qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
  578. }
  579. #else
  580. static void hif_cpuhp_register(struct hif_softc *scn)
  581. {
  582. }
  583. static void hif_cpuhp_unregister(struct hif_softc *scn)
  584. {
  585. }
  586. #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
  587. #ifdef HIF_DETECTION_LATENCY_ENABLE
  588. /**
  589. * hif_check_detection_latency(): to check if latency for tasklet/credit
  590. *
  591. * @scn: hif context
  592. * @from_timer: if called from timer handler
  593. * @bitmap_type: indicate if check tasklet or credit
  594. *
  595. * Return: none
  596. */
  597. void hif_check_detection_latency(struct hif_softc *scn,
  598. bool from_timer,
  599. uint32_t bitmap_type)
  600. {
  601. qdf_time_t ce2_tasklet_sched_time =
  602. scn->latency_detect.ce2_tasklet_sched_time;
  603. qdf_time_t ce2_tasklet_exec_time =
  604. scn->latency_detect.ce2_tasklet_exec_time;
  605. qdf_time_t credit_request_time =
  606. scn->latency_detect.credit_request_time;
  607. qdf_time_t credit_report_time =
  608. scn->latency_detect.credit_report_time;
  609. qdf_time_t curr_jiffies = qdf_system_ticks();
  610. uint32_t detect_latency_threshold =
  611. scn->latency_detect.detect_latency_threshold;
  612. int cpu_id = qdf_get_cpu();
  613. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  614. return;
  615. if (!scn->latency_detect.enable_detection)
  616. return;
  617. /* 2 kinds of check here.
  618. * from_timer==true: check if tasklet or credit report stall
  619. * from_timer==false: check tasklet execute or credit report comes late
  620. */
  621. if (bitmap_type & BIT(HIF_DETECT_TASKLET) &&
  622. (from_timer ?
  623. qdf_system_time_after(ce2_tasklet_sched_time,
  624. ce2_tasklet_exec_time) :
  625. qdf_system_time_after(ce2_tasklet_exec_time,
  626. ce2_tasklet_sched_time)) &&
  627. qdf_system_time_after(
  628. curr_jiffies,
  629. ce2_tasklet_sched_time +
  630. qdf_system_msecs_to_ticks(detect_latency_threshold))) {
  631. hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
  632. from_timer, curr_jiffies, ce2_tasklet_sched_time,
  633. ce2_tasklet_exec_time, detect_latency_threshold,
  634. scn->latency_detect.detect_latency_timer_timeout,
  635. cpu_id, (void *)_RET_IP_);
  636. goto latency;
  637. }
  638. if (bitmap_type & BIT(HIF_DETECT_CREDIT) &&
  639. (from_timer ?
  640. qdf_system_time_after(credit_request_time,
  641. credit_report_time) :
  642. qdf_system_time_after(credit_report_time,
  643. credit_request_time)) &&
  644. qdf_system_time_after(
  645. curr_jiffies,
  646. credit_request_time +
  647. qdf_system_msecs_to_ticks(detect_latency_threshold))) {
  648. hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
  649. from_timer, curr_jiffies, credit_request_time,
  650. credit_report_time, detect_latency_threshold,
  651. scn->latency_detect.detect_latency_timer_timeout,
  652. cpu_id, (void *)_RET_IP_);
  653. goto latency;
  654. }
  655. return;
  656. latency:
  657. qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
  658. }
  659. static void hif_latency_detect_timeout_handler(void *arg)
  660. {
  661. struct hif_softc *scn = (struct hif_softc *)arg;
  662. int next_cpu;
  663. hif_check_detection_latency(scn, true,
  664. BIT(HIF_DETECT_TASKLET) |
  665. BIT(HIF_DETECT_CREDIT));
  666. /* it need to make sure timer start on a differnt cpu,
  667. * so it can detect the tasklet schedule stall, but there
  668. * is still chance that, after timer has been started, then
  669. * irq/tasklet happens on the same cpu, then tasklet will
  670. * execute before softirq timer, if this tasklet stall, the
  671. * timer can't detect it, we can accept this as a limition,
  672. * if tasklet stall, anyway other place will detect it, just
  673. * a little later.
  674. */
  675. next_cpu = cpumask_any_but(
  676. cpu_active_mask,
  677. scn->latency_detect.ce2_tasklet_sched_cpuid);
  678. if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
  679. hif_debug("start timer on local");
  680. /* it doesn't found a available cpu, start on local cpu*/
  681. qdf_timer_mod(
  682. &scn->latency_detect.detect_latency_timer,
  683. scn->latency_detect.detect_latency_timer_timeout);
  684. } else {
  685. qdf_timer_start_on(
  686. &scn->latency_detect.detect_latency_timer,
  687. scn->latency_detect.detect_latency_timer_timeout,
  688. next_cpu);
  689. }
  690. }
  691. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  692. {
  693. if (!scn) {
  694. hif_info_high("scn is null");
  695. return;
  696. }
  697. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  698. return;
  699. scn->latency_detect.detect_latency_timer_timeout =
  700. DETECTION_TIMER_TIMEOUT;
  701. scn->latency_detect.detect_latency_threshold =
  702. DETECTION_LATENCY_THRESHOLD;
  703. hif_info("timer timeout %u, latency threshold %u",
  704. scn->latency_detect.detect_latency_timer_timeout,
  705. scn->latency_detect.detect_latency_threshold);
  706. scn->latency_detect.is_timer_started = false;
  707. qdf_timer_init(NULL,
  708. &scn->latency_detect.detect_latency_timer,
  709. &hif_latency_detect_timeout_handler,
  710. scn,
  711. QDF_TIMER_TYPE_SW_SPIN);
  712. }
  713. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  714. {
  715. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  716. return;
  717. hif_info("deinit timer");
  718. qdf_timer_free(&scn->latency_detect.detect_latency_timer);
  719. }
  720. void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
  721. {
  722. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  723. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  724. return;
  725. hif_info_rl("start timer");
  726. if (scn->latency_detect.is_timer_started) {
  727. hif_info("timer has been started");
  728. return;
  729. }
  730. qdf_timer_start(&scn->latency_detect.detect_latency_timer,
  731. scn->latency_detect.detect_latency_timer_timeout);
  732. scn->latency_detect.is_timer_started = true;
  733. }
  734. void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
  735. {
  736. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  737. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  738. return;
  739. hif_info_rl("stop timer");
  740. qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
  741. scn->latency_detect.is_timer_started = false;
  742. }
  743. void hif_latency_detect_credit_record_time(
  744. enum hif_credit_exchange_type type,
  745. struct hif_opaque_softc *hif_ctx)
  746. {
  747. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  748. if (!scn) {
  749. hif_err("Could not do runtime put, scn is null");
  750. return;
  751. }
  752. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  753. return;
  754. if (HIF_REQUEST_CREDIT == type)
  755. scn->latency_detect.credit_request_time = qdf_system_ticks();
  756. else if (HIF_PROCESS_CREDIT_REPORT == type)
  757. scn->latency_detect.credit_report_time = qdf_system_ticks();
  758. hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
  759. }
  760. void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
  761. {
  762. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  763. if (!scn) {
  764. hif_err("Could not do runtime put, scn is null");
  765. return;
  766. }
  767. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  768. return;
  769. scn->latency_detect.enable_detection = value;
  770. }
  771. #else
  772. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  773. {}
  774. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  775. {}
  776. #endif
  777. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  778. uint32_t mode,
  779. enum qdf_bus_type bus_type,
  780. struct hif_driver_state_callbacks *cbk,
  781. struct wlan_objmgr_psoc *psoc)
  782. {
  783. struct hif_softc *scn;
  784. QDF_STATUS status = QDF_STATUS_SUCCESS;
  785. int bus_context_size = hif_bus_get_context_size(bus_type);
  786. if (bus_context_size == 0) {
  787. hif_err("context size 0 not allowed");
  788. return NULL;
  789. }
  790. scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
  791. if (!scn)
  792. return GET_HIF_OPAQUE_HDL(scn);
  793. scn->qdf_dev = qdf_ctx;
  794. scn->hif_con_param = mode;
  795. qdf_atomic_init(&scn->active_tasklet_cnt);
  796. qdf_atomic_init(&scn->active_grp_tasklet_cnt);
  797. qdf_atomic_init(&scn->link_suspended);
  798. qdf_atomic_init(&scn->tasklet_from_intr);
  799. hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
  800. qdf_mem_copy(&scn->callbacks, cbk,
  801. sizeof(struct hif_driver_state_callbacks));
  802. scn->bus_type = bus_type;
  803. hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_DOWN);
  804. hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
  805. hif_get_cfg_from_psoc(scn, psoc);
  806. hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
  807. status = hif_bus_open(scn, bus_type);
  808. if (status != QDF_STATUS_SUCCESS) {
  809. hif_err("hif_bus_open error = %d, bus_type = %d",
  810. status, bus_type);
  811. qdf_mem_free(scn);
  812. scn = NULL;
  813. goto out;
  814. }
  815. hif_cpuhp_register(scn);
  816. hif_latency_detect_timer_init(scn);
  817. out:
  818. return GET_HIF_OPAQUE_HDL(scn);
  819. }
  820. #ifdef ADRASTEA_RRI_ON_DDR
  821. /**
  822. * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
  823. * @scn: hif context
  824. *
  825. * Return: none
  826. */
  827. void hif_uninit_rri_on_ddr(struct hif_softc *scn)
  828. {
  829. if (scn->vaddr_rri_on_ddr)
  830. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  831. (CE_COUNT * sizeof(uint32_t)),
  832. scn->vaddr_rri_on_ddr,
  833. scn->paddr_rri_on_ddr, 0);
  834. scn->vaddr_rri_on_ddr = NULL;
  835. }
  836. #endif
  837. /**
  838. * hif_close(): hif_close
  839. * @hif_ctx: hif_ctx
  840. *
  841. * Return: n/a
  842. */
  843. void hif_close(struct hif_opaque_softc *hif_ctx)
  844. {
  845. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  846. if (!scn) {
  847. hif_err("hif_opaque_softc is NULL");
  848. return;
  849. }
  850. hif_latency_detect_timer_deinit(scn);
  851. if (scn->athdiag_procfs_inited) {
  852. athdiag_procfs_remove();
  853. scn->athdiag_procfs_inited = false;
  854. }
  855. if (scn->target_info.hw_name) {
  856. char *hw_name = scn->target_info.hw_name;
  857. scn->target_info.hw_name = "ErrUnloading";
  858. qdf_mem_free(hw_name);
  859. }
  860. hif_uninit_rri_on_ddr(scn);
  861. hif_cleanup_static_buf_to_target(scn);
  862. hif_cpuhp_unregister(scn);
  863. hif_bus_close(scn);
  864. qdf_mem_free(scn);
  865. }
  866. /**
  867. * hif_get_num_active_grp_tasklets() - get the number of active
  868. * datapath group tasklets pending to be completed.
  869. * @scn: HIF context
  870. *
  871. * Returns: the number of datapath group tasklets which are active
  872. */
  873. static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
  874. {
  875. return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
  876. }
  877. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  878. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  879. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  880. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  881. defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCN9224) || \
  882. defined(QCA_WIFI_QCA9574))
  883. /**
  884. * hif_get_num_pending_work() - get the number of entries in
  885. * the workqueue pending to be completed.
  886. * @scn: HIF context
  887. *
  888. * Returns: the number of tasklets which are active
  889. */
  890. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  891. {
  892. return hal_get_reg_write_pending_work(scn->hal_soc);
  893. }
  894. #else
  895. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  896. {
  897. return 0;
  898. }
  899. #endif
  900. QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
  901. {
  902. uint32_t task_drain_wait_cnt = 0;
  903. int tasklet = 0, grp_tasklet = 0, work = 0;
  904. while ((tasklet = hif_get_num_active_tasklets(scn)) ||
  905. (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
  906. (work = hif_get_num_pending_work(scn))) {
  907. if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
  908. hif_err("pending tasklets %d grp tasklets %d work %d",
  909. tasklet, grp_tasklet, work);
  910. return QDF_STATUS_E_FAULT;
  911. }
  912. hif_info("waiting for tasklets %d grp tasklets %d work %d",
  913. tasklet, grp_tasklet, work);
  914. msleep(10);
  915. }
  916. return QDF_STATUS_SUCCESS;
  917. }
  918. #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
  919. QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  920. {
  921. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  922. uint32_t work_drain_wait_cnt = 0;
  923. uint32_t wait_cnt = 0;
  924. int work = 0;
  925. qdf_atomic_set(&scn->dp_ep_vote_access,
  926. HIF_EP_VOTE_ACCESS_DISABLE);
  927. qdf_atomic_set(&scn->ep_vote_access,
  928. HIF_EP_VOTE_ACCESS_DISABLE);
  929. while ((work = hif_get_num_pending_work(scn))) {
  930. if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
  931. qdf_atomic_set(&scn->dp_ep_vote_access,
  932. HIF_EP_VOTE_ACCESS_ENABLE);
  933. qdf_atomic_set(&scn->ep_vote_access,
  934. HIF_EP_VOTE_ACCESS_ENABLE);
  935. hif_err("timeout wait for pending work %d ", work);
  936. return QDF_STATUS_E_FAULT;
  937. }
  938. qdf_sleep(10);
  939. }
  940. while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
  941. if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
  942. hif_err("Release EP vote is not proceed by Fw");
  943. return QDF_STATUS_E_FAULT;
  944. }
  945. qdf_sleep(5);
  946. }
  947. return QDF_STATUS_SUCCESS;
  948. }
  949. void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
  950. {
  951. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  952. uint8_t vote_access;
  953. vote_access = qdf_atomic_read(&scn->ep_vote_access);
  954. if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
  955. hif_info("EP vote changed from:%u to intermediate state",
  956. vote_access);
  957. if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
  958. QDF_BUG(0);
  959. qdf_atomic_set(&scn->ep_vote_access,
  960. HIF_EP_VOTE_INTERMEDIATE_ACCESS);
  961. }
  962. void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  963. {
  964. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  965. qdf_atomic_set(&scn->dp_ep_vote_access,
  966. HIF_EP_VOTE_ACCESS_ENABLE);
  967. qdf_atomic_set(&scn->ep_vote_access,
  968. HIF_EP_VOTE_ACCESS_ENABLE);
  969. }
  970. void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  971. uint8_t type, uint8_t access)
  972. {
  973. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  974. if (type == HIF_EP_VOTE_DP_ACCESS)
  975. qdf_atomic_set(&scn->dp_ep_vote_access, access);
  976. else
  977. qdf_atomic_set(&scn->ep_vote_access, access);
  978. }
  979. uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  980. uint8_t type)
  981. {
  982. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  983. if (type == HIF_EP_VOTE_DP_ACCESS)
  984. return qdf_atomic_read(&scn->dp_ep_vote_access);
  985. else
  986. return qdf_atomic_read(&scn->ep_vote_access);
  987. }
  988. #endif
  989. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  990. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  991. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  992. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  993. defined(QCA_WIFI_WCN7850) || defined(QCA_WIFI_QCN9224) || \
  994. defined(QCA_WIFI_QCA9574))
  995. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  996. {
  997. if (ce_srng_based(scn)) {
  998. scn->hal_soc = hal_attach(
  999. hif_softc_to_hif_opaque_softc(scn),
  1000. scn->qdf_dev);
  1001. if (!scn->hal_soc)
  1002. return QDF_STATUS_E_FAILURE;
  1003. }
  1004. return QDF_STATUS_SUCCESS;
  1005. }
  1006. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1007. {
  1008. if (ce_srng_based(scn)) {
  1009. hal_detach(scn->hal_soc);
  1010. scn->hal_soc = NULL;
  1011. }
  1012. return QDF_STATUS_SUCCESS;
  1013. }
  1014. #else
  1015. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1016. {
  1017. return QDF_STATUS_SUCCESS;
  1018. }
  1019. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1020. {
  1021. return QDF_STATUS_SUCCESS;
  1022. }
  1023. #endif
  1024. int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
  1025. {
  1026. int ret;
  1027. switch (bus_type) {
  1028. case QDF_BUS_TYPE_IPCI:
  1029. ret = qdf_set_dma_coherent_mask(dev,
  1030. DMA_COHERENT_MASK_DEFAULT);
  1031. if (ret) {
  1032. hif_err("Failed to set dma mask error = %d", ret);
  1033. return ret;
  1034. }
  1035. break;
  1036. default:
  1037. /* Follow the existing sequence for other targets */
  1038. break;
  1039. }
  1040. return 0;
  1041. }
  1042. /**
  1043. * hif_enable(): hif_enable
  1044. * @hif_ctx: hif_ctx
  1045. * @dev: dev
  1046. * @bdev: bus dev
  1047. * @bid: bus ID
  1048. * @bus_type: bus type
  1049. * @type: enable type
  1050. *
  1051. * Return: QDF_STATUS
  1052. */
  1053. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  1054. void *bdev,
  1055. const struct hif_bus_id *bid,
  1056. enum qdf_bus_type bus_type,
  1057. enum hif_enable_type type)
  1058. {
  1059. QDF_STATUS status;
  1060. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1061. if (!scn) {
  1062. hif_err("hif_ctx = NULL");
  1063. return QDF_STATUS_E_NULL_VALUE;
  1064. }
  1065. status = hif_enable_bus(scn, dev, bdev, bid, type);
  1066. if (status != QDF_STATUS_SUCCESS) {
  1067. hif_err("hif_enable_bus error = %d", status);
  1068. return status;
  1069. }
  1070. hif_pm_set_link_state(GET_HIF_OPAQUE_HDL(scn), HIF_PM_LINK_STATE_UP);
  1071. status = hif_hal_attach(scn);
  1072. if (status != QDF_STATUS_SUCCESS) {
  1073. hif_err("hal attach failed");
  1074. goto disable_bus;
  1075. }
  1076. if (hif_bus_configure(scn)) {
  1077. hif_err("Target probe failed");
  1078. status = QDF_STATUS_E_FAILURE;
  1079. goto hal_detach;
  1080. }
  1081. hif_ut_suspend_init(scn);
  1082. hif_register_recovery_notifier(scn);
  1083. hif_latency_detect_timer_start(hif_ctx);
  1084. /*
  1085. * Flag to avoid potential unallocated memory access from MSI
  1086. * interrupt handler which could get scheduled as soon as MSI
  1087. * is enabled, i.e to take care of the race due to the order
  1088. * in where MSI is enabled before the memory, that will be
  1089. * in interrupt handlers, is allocated.
  1090. */
  1091. scn->hif_init_done = true;
  1092. hif_debug("OK");
  1093. return QDF_STATUS_SUCCESS;
  1094. hal_detach:
  1095. hif_hal_detach(scn);
  1096. disable_bus:
  1097. hif_disable_bus(scn);
  1098. return status;
  1099. }
  1100. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
  1101. {
  1102. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1103. if (!scn)
  1104. return;
  1105. hif_set_enable_detection(hif_ctx, false);
  1106. hif_latency_detect_timer_stop(hif_ctx);
  1107. hif_unregister_recovery_notifier(scn);
  1108. hif_nointrs(scn);
  1109. if (scn->hif_init_done == false)
  1110. hif_shutdown_device(hif_ctx);
  1111. else
  1112. hif_stop(hif_ctx);
  1113. hif_hal_detach(scn);
  1114. hif_pm_set_link_state(hif_ctx, HIF_PM_LINK_STATE_DOWN);
  1115. hif_disable_bus(scn);
  1116. hif_wlan_disable(scn);
  1117. scn->notice_send = false;
  1118. hif_debug("X");
  1119. }
  1120. #ifdef CE_TASKLET_DEBUG_ENABLE
  1121. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
  1122. {
  1123. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1124. if (!scn)
  1125. return;
  1126. scn->ce_latency_stats = val;
  1127. }
  1128. #endif
  1129. void hif_display_stats(struct hif_opaque_softc *hif_ctx)
  1130. {
  1131. hif_display_bus_stats(hif_ctx);
  1132. }
  1133. qdf_export_symbol(hif_display_stats);
  1134. void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
  1135. {
  1136. hif_clear_bus_stats(hif_ctx);
  1137. }
  1138. /**
  1139. * hif_crash_shutdown_dump_bus_register() - dump bus registers
  1140. * @hif_ctx: hif_ctx
  1141. *
  1142. * Return: n/a
  1143. */
  1144. #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
  1145. static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
  1146. {
  1147. struct hif_opaque_softc *scn = hif_ctx;
  1148. if (hif_check_soc_status(scn))
  1149. return;
  1150. if (hif_dump_registers(scn))
  1151. hif_err("Failed to dump bus registers!");
  1152. }
  1153. /**
  1154. * hif_crash_shutdown(): hif_crash_shutdown
  1155. *
  1156. * This function is called by the platform driver to dump CE registers
  1157. *
  1158. * @hif_ctx: hif_ctx
  1159. *
  1160. * Return: n/a
  1161. */
  1162. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1163. {
  1164. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1165. if (!hif_ctx)
  1166. return;
  1167. if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
  1168. hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
  1169. return;
  1170. }
  1171. if (TARGET_STATUS_RESET == scn->target_status) {
  1172. hif_warn("Target is already asserted, ignore!");
  1173. return;
  1174. }
  1175. if (hif_is_load_or_unload_in_progress(scn)) {
  1176. hif_err("Load/unload is in progress, ignore!");
  1177. return;
  1178. }
  1179. hif_crash_shutdown_dump_bus_register(hif_ctx);
  1180. hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
  1181. if (ol_copy_ramdump(hif_ctx))
  1182. goto out;
  1183. hif_info("RAM dump collecting completed!");
  1184. out:
  1185. return;
  1186. }
  1187. #else
  1188. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1189. {
  1190. hif_debug("Collecting target RAM dump disabled");
  1191. }
  1192. #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
  1193. #ifdef QCA_WIFI_3_0
  1194. /**
  1195. * hif_check_fw_reg(): hif_check_fw_reg
  1196. * @scn: scn
  1197. * @state:
  1198. *
  1199. * Return: int
  1200. */
  1201. int hif_check_fw_reg(struct hif_opaque_softc *scn)
  1202. {
  1203. return 0;
  1204. }
  1205. #endif
  1206. /**
  1207. * hif_read_phy_mem_base(): hif_read_phy_mem_base
  1208. * @scn: scn
  1209. * @phy_mem_base: physical mem base
  1210. *
  1211. * Return: n/a
  1212. */
  1213. void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
  1214. {
  1215. *phy_mem_base = scn->mem_pa;
  1216. }
  1217. qdf_export_symbol(hif_read_phy_mem_base);
  1218. /**
  1219. * hif_get_device_type(): hif_get_device_type
  1220. * @device_id: device_id
  1221. * @revision_id: revision_id
  1222. * @hif_type: returned hif_type
  1223. * @target_type: returned target_type
  1224. *
  1225. * Return: int
  1226. */
  1227. int hif_get_device_type(uint32_t device_id,
  1228. uint32_t revision_id,
  1229. uint32_t *hif_type, uint32_t *target_type)
  1230. {
  1231. int ret = 0;
  1232. switch (device_id) {
  1233. case ADRASTEA_DEVICE_ID_P2_E12:
  1234. *hif_type = HIF_TYPE_ADRASTEA;
  1235. *target_type = TARGET_TYPE_ADRASTEA;
  1236. break;
  1237. case AR9888_DEVICE_ID:
  1238. *hif_type = HIF_TYPE_AR9888;
  1239. *target_type = TARGET_TYPE_AR9888;
  1240. break;
  1241. case AR6320_DEVICE_ID:
  1242. switch (revision_id) {
  1243. case AR6320_FW_1_1:
  1244. case AR6320_FW_1_3:
  1245. *hif_type = HIF_TYPE_AR6320;
  1246. *target_type = TARGET_TYPE_AR6320;
  1247. break;
  1248. case AR6320_FW_2_0:
  1249. case AR6320_FW_3_0:
  1250. case AR6320_FW_3_2:
  1251. *hif_type = HIF_TYPE_AR6320V2;
  1252. *target_type = TARGET_TYPE_AR6320V2;
  1253. break;
  1254. default:
  1255. hif_err("dev_id = 0x%x, rev_id = 0x%x",
  1256. device_id, revision_id);
  1257. ret = -ENODEV;
  1258. goto end;
  1259. }
  1260. break;
  1261. case AR9887_DEVICE_ID:
  1262. *hif_type = HIF_TYPE_AR9888;
  1263. *target_type = TARGET_TYPE_AR9888;
  1264. hif_info(" *********** AR9887 **************");
  1265. break;
  1266. case QCA9984_DEVICE_ID:
  1267. *hif_type = HIF_TYPE_QCA9984;
  1268. *target_type = TARGET_TYPE_QCA9984;
  1269. hif_info(" *********** QCA9984 *************");
  1270. break;
  1271. case QCA9888_DEVICE_ID:
  1272. *hif_type = HIF_TYPE_QCA9888;
  1273. *target_type = TARGET_TYPE_QCA9888;
  1274. hif_info(" *********** QCA9888 *************");
  1275. break;
  1276. case AR900B_DEVICE_ID:
  1277. *hif_type = HIF_TYPE_AR900B;
  1278. *target_type = TARGET_TYPE_AR900B;
  1279. hif_info(" *********** AR900B *************");
  1280. break;
  1281. case IPQ4019_DEVICE_ID:
  1282. *hif_type = HIF_TYPE_IPQ4019;
  1283. *target_type = TARGET_TYPE_IPQ4019;
  1284. hif_info(" *********** IPQ4019 *************");
  1285. break;
  1286. case QCA8074_DEVICE_ID:
  1287. *hif_type = HIF_TYPE_QCA8074;
  1288. *target_type = TARGET_TYPE_QCA8074;
  1289. hif_info(" *********** QCA8074 *************");
  1290. break;
  1291. case QCA6290_EMULATION_DEVICE_ID:
  1292. case QCA6290_DEVICE_ID:
  1293. *hif_type = HIF_TYPE_QCA6290;
  1294. *target_type = TARGET_TYPE_QCA6290;
  1295. hif_info(" *********** QCA6290EMU *************");
  1296. break;
  1297. case QCN9000_DEVICE_ID:
  1298. *hif_type = HIF_TYPE_QCN9000;
  1299. *target_type = TARGET_TYPE_QCN9000;
  1300. hif_info(" *********** QCN9000 *************");
  1301. break;
  1302. case QCN9224_DEVICE_ID:
  1303. *hif_type = HIF_TYPE_QCN9224;
  1304. *target_type = TARGET_TYPE_QCN9224;
  1305. hif_info(" *********** QCN9224 *************");
  1306. break;
  1307. case QCN6122_DEVICE_ID:
  1308. *hif_type = HIF_TYPE_QCN6122;
  1309. *target_type = TARGET_TYPE_QCN6122;
  1310. hif_info(" *********** QCN6122 *************");
  1311. break;
  1312. case QCN7605_DEVICE_ID:
  1313. case QCN7605_COMPOSITE:
  1314. case QCN7605_STANDALONE:
  1315. case QCN7605_STANDALONE_V2:
  1316. case QCN7605_COMPOSITE_V2:
  1317. *hif_type = HIF_TYPE_QCN7605;
  1318. *target_type = TARGET_TYPE_QCN7605;
  1319. hif_info(" *********** QCN7605 *************");
  1320. break;
  1321. case QCA6390_DEVICE_ID:
  1322. case QCA6390_EMULATION_DEVICE_ID:
  1323. *hif_type = HIF_TYPE_QCA6390;
  1324. *target_type = TARGET_TYPE_QCA6390;
  1325. hif_info(" *********** QCA6390 *************");
  1326. break;
  1327. case QCA6490_DEVICE_ID:
  1328. case QCA6490_EMULATION_DEVICE_ID:
  1329. *hif_type = HIF_TYPE_QCA6490;
  1330. *target_type = TARGET_TYPE_QCA6490;
  1331. hif_info(" *********** QCA6490 *************");
  1332. break;
  1333. case QCA6750_DEVICE_ID:
  1334. case QCA6750_EMULATION_DEVICE_ID:
  1335. *hif_type = HIF_TYPE_QCA6750;
  1336. *target_type = TARGET_TYPE_QCA6750;
  1337. hif_info(" *********** QCA6750 *************");
  1338. break;
  1339. case WCN7850_DEVICE_ID:
  1340. *hif_type = HIF_TYPE_WCN7850;
  1341. *target_type = TARGET_TYPE_WCN7850;
  1342. hif_info(" *********** WCN7850 *************");
  1343. break;
  1344. case QCA8074V2_DEVICE_ID:
  1345. *hif_type = HIF_TYPE_QCA8074V2;
  1346. *target_type = TARGET_TYPE_QCA8074V2;
  1347. hif_info(" *********** QCA8074V2 *************");
  1348. break;
  1349. case QCA6018_DEVICE_ID:
  1350. case RUMIM2M_DEVICE_ID_NODE0:
  1351. case RUMIM2M_DEVICE_ID_NODE1:
  1352. case RUMIM2M_DEVICE_ID_NODE2:
  1353. case RUMIM2M_DEVICE_ID_NODE3:
  1354. case RUMIM2M_DEVICE_ID_NODE4:
  1355. case RUMIM2M_DEVICE_ID_NODE5:
  1356. *hif_type = HIF_TYPE_QCA6018;
  1357. *target_type = TARGET_TYPE_QCA6018;
  1358. hif_info(" *********** QCA6018 *************");
  1359. break;
  1360. case QCA5018_DEVICE_ID:
  1361. *hif_type = HIF_TYPE_QCA5018;
  1362. *target_type = TARGET_TYPE_QCA5018;
  1363. hif_info(" *********** qca5018 *************");
  1364. break;
  1365. case QCA9574_DEVICE_ID:
  1366. *hif_type = HIF_TYPE_QCA9574;
  1367. *target_type = TARGET_TYPE_QCA9574;
  1368. hif_info(" *********** QCA9574 *************");
  1369. break;
  1370. default:
  1371. hif_err("Unsupported device ID = 0x%x!", device_id);
  1372. ret = -ENODEV;
  1373. break;
  1374. }
  1375. if (*target_type == TARGET_TYPE_UNKNOWN) {
  1376. hif_err("Unsupported target_type!");
  1377. ret = -ENODEV;
  1378. }
  1379. end:
  1380. return ret;
  1381. }
  1382. /**
  1383. * hif_get_bus_type() - return the bus type
  1384. *
  1385. * Return: enum qdf_bus_type
  1386. */
  1387. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
  1388. {
  1389. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  1390. return scn->bus_type;
  1391. }
  1392. /**
  1393. * Target info and ini parameters are global to the driver
  1394. * Hence these structures are exposed to all the modules in
  1395. * the driver and they don't need to maintains multiple copies
  1396. * of the same info, instead get the handle from hif and
  1397. * modify them in hif
  1398. */
  1399. /**
  1400. * hif_get_ini_handle() - API to get hif_config_param handle
  1401. * @hif_ctx: HIF Context
  1402. *
  1403. * Return: pointer to hif_config_info
  1404. */
  1405. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
  1406. {
  1407. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  1408. return &sc->hif_config;
  1409. }
  1410. /**
  1411. * hif_get_target_info_handle() - API to get hif_target_info handle
  1412. * @hif_ctx: HIF context
  1413. *
  1414. * Return: Pointer to hif_target_info
  1415. */
  1416. struct hif_target_info *hif_get_target_info_handle(
  1417. struct hif_opaque_softc *hif_ctx)
  1418. {
  1419. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  1420. return &sc->target_info;
  1421. }
  1422. qdf_export_symbol(hif_get_target_info_handle);
  1423. #ifdef RECEIVE_OFFLOAD
  1424. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  1425. void (offld_flush_handler)(void *))
  1426. {
  1427. if (hif_napi_enabled(scn, -1))
  1428. hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
  1429. else
  1430. hif_err("NAPI not enabled");
  1431. }
  1432. qdf_export_symbol(hif_offld_flush_cb_register);
  1433. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
  1434. {
  1435. if (hif_napi_enabled(scn, -1))
  1436. hif_napi_rx_offld_flush_cb_deregister(scn);
  1437. else
  1438. hif_err("NAPI not enabled");
  1439. }
  1440. qdf_export_symbol(hif_offld_flush_cb_deregister);
  1441. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  1442. {
  1443. if (hif_napi_enabled(hif_hdl, -1))
  1444. return NAPI_PIPE2ID(ctx_id);
  1445. else
  1446. return ctx_id;
  1447. }
  1448. #else /* RECEIVE_OFFLOAD */
  1449. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  1450. {
  1451. return 0;
  1452. }
  1453. qdf_export_symbol(hif_get_rx_ctx_id);
  1454. #endif /* RECEIVE_OFFLOAD */
  1455. #if defined(FEATURE_LRO)
  1456. /**
  1457. * hif_get_lro_info - Returns LRO instance for instance ID
  1458. * @ctx_id: LRO instance ID
  1459. * @hif_hdl: HIF Context
  1460. *
  1461. * Return: Pointer to LRO instance.
  1462. */
  1463. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
  1464. {
  1465. void *data;
  1466. if (hif_napi_enabled(hif_hdl, -1))
  1467. data = hif_napi_get_lro_info(hif_hdl, ctx_id);
  1468. else
  1469. data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
  1470. return data;
  1471. }
  1472. #endif
  1473. /**
  1474. * hif_get_target_status - API to get target status
  1475. * @hif_ctx: HIF Context
  1476. *
  1477. * Return: enum hif_target_status
  1478. */
  1479. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
  1480. {
  1481. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1482. return scn->target_status;
  1483. }
  1484. qdf_export_symbol(hif_get_target_status);
  1485. /**
  1486. * hif_set_target_status() - API to set target status
  1487. * @hif_ctx: HIF Context
  1488. * @status: Target Status
  1489. *
  1490. * Return: void
  1491. */
  1492. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  1493. hif_target_status status)
  1494. {
  1495. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1496. scn->target_status = status;
  1497. }
  1498. /**
  1499. * hif_init_ini_config() - API to initialize HIF configuration parameters
  1500. * @hif_ctx: HIF Context
  1501. * @cfg: HIF Configuration
  1502. *
  1503. * Return: void
  1504. */
  1505. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  1506. struct hif_config_info *cfg)
  1507. {
  1508. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1509. qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
  1510. }
  1511. /**
  1512. * hif_get_conparam() - API to get driver mode in HIF
  1513. * @scn: HIF Context
  1514. *
  1515. * Return: driver mode of operation
  1516. */
  1517. uint32_t hif_get_conparam(struct hif_softc *scn)
  1518. {
  1519. if (!scn)
  1520. return 0;
  1521. return scn->hif_con_param;
  1522. }
  1523. /**
  1524. * hif_get_callbacks_handle() - API to get callbacks Handle
  1525. * @scn: HIF Context
  1526. *
  1527. * Return: pointer to HIF Callbacks
  1528. */
  1529. struct hif_driver_state_callbacks *hif_get_callbacks_handle(
  1530. struct hif_softc *scn)
  1531. {
  1532. return &scn->callbacks;
  1533. }
  1534. /**
  1535. * hif_is_driver_unloading() - API to query upper layers if driver is unloading
  1536. * @scn: HIF Context
  1537. *
  1538. * Return: True/False
  1539. */
  1540. bool hif_is_driver_unloading(struct hif_softc *scn)
  1541. {
  1542. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1543. if (cbk && cbk->is_driver_unloading)
  1544. return cbk->is_driver_unloading(cbk->context);
  1545. return false;
  1546. }
  1547. /**
  1548. * hif_is_load_or_unload_in_progress() - API to query upper layers if
  1549. * load/unload in progress
  1550. * @scn: HIF Context
  1551. *
  1552. * Return: True/False
  1553. */
  1554. bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
  1555. {
  1556. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1557. if (cbk && cbk->is_load_unload_in_progress)
  1558. return cbk->is_load_unload_in_progress(cbk->context);
  1559. return false;
  1560. }
  1561. /**
  1562. * hif_is_recovery_in_progress() - API to query upper layers if recovery in
  1563. * progress
  1564. * @scn: HIF Context
  1565. *
  1566. * Return: True/False
  1567. */
  1568. bool hif_is_recovery_in_progress(struct hif_softc *scn)
  1569. {
  1570. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1571. if (cbk && cbk->is_recovery_in_progress)
  1572. return cbk->is_recovery_in_progress(cbk->context);
  1573. return false;
  1574. }
  1575. #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  1576. defined(HIF_IPCI)
  1577. /**
  1578. * hif_update_pipe_callback() - API to register pipe specific callbacks
  1579. * @osc: Opaque softc
  1580. * @pipeid: pipe id
  1581. * @callbacks: callbacks to register
  1582. *
  1583. * Return: void
  1584. */
  1585. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  1586. u_int8_t pipeid,
  1587. struct hif_msg_callbacks *callbacks)
  1588. {
  1589. struct hif_softc *scn = HIF_GET_SOFTC(osc);
  1590. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1591. struct HIF_CE_pipe_info *pipe_info;
  1592. QDF_BUG(pipeid < CE_COUNT_MAX);
  1593. hif_debug("pipeid: %d", pipeid);
  1594. pipe_info = &hif_state->pipe_info[pipeid];
  1595. qdf_mem_copy(&pipe_info->pipe_callbacks,
  1596. callbacks, sizeof(pipe_info->pipe_callbacks));
  1597. }
  1598. qdf_export_symbol(hif_update_pipe_callback);
  1599. /**
  1600. * hif_is_target_ready() - API to query if target is in ready state
  1601. * progress
  1602. * @scn: HIF Context
  1603. *
  1604. * Return: True/False
  1605. */
  1606. bool hif_is_target_ready(struct hif_softc *scn)
  1607. {
  1608. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1609. if (cbk && cbk->is_target_ready)
  1610. return cbk->is_target_ready(cbk->context);
  1611. /*
  1612. * if callback is not registered then there is no way to determine
  1613. * if target is ready. In-such case return true to indicate that
  1614. * target is ready.
  1615. */
  1616. return true;
  1617. }
  1618. qdf_export_symbol(hif_is_target_ready);
  1619. int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
  1620. {
  1621. struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
  1622. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1623. if (cbk && cbk->get_bandwidth_level)
  1624. return cbk->get_bandwidth_level(cbk->context);
  1625. return 0;
  1626. }
  1627. qdf_export_symbol(hif_get_bandwidth_level);
  1628. #ifdef DP_MEM_PRE_ALLOC
  1629. void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
  1630. qdf_size_t size,
  1631. qdf_dma_addr_t *paddr,
  1632. uint32_t ring_type,
  1633. uint8_t *is_mem_prealloc)
  1634. {
  1635. void *vaddr = NULL;
  1636. struct hif_driver_state_callbacks *cbk =
  1637. hif_get_callbacks_handle(scn);
  1638. *is_mem_prealloc = false;
  1639. if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
  1640. vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
  1641. paddr,
  1642. ring_type);
  1643. if (vaddr) {
  1644. *is_mem_prealloc = true;
  1645. goto end;
  1646. }
  1647. }
  1648. vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
  1649. scn->qdf_dev->dev,
  1650. size,
  1651. paddr);
  1652. end:
  1653. dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
  1654. *is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
  1655. (void *)*paddr, (int)size, ring_type);
  1656. return vaddr;
  1657. }
  1658. void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
  1659. qdf_size_t size,
  1660. void *vaddr,
  1661. qdf_dma_addr_t paddr,
  1662. qdf_dma_context_t memctx,
  1663. uint8_t is_mem_prealloc)
  1664. {
  1665. struct hif_driver_state_callbacks *cbk =
  1666. hif_get_callbacks_handle(scn);
  1667. if (is_mem_prealloc) {
  1668. if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
  1669. cbk->prealloc_put_consistent_mem_unaligned(vaddr);
  1670. } else {
  1671. dp_warn("dp_prealloc_put_consistent_unligned NULL");
  1672. QDF_BUG(0);
  1673. }
  1674. } else {
  1675. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  1676. size, vaddr, paddr, memctx);
  1677. }
  1678. }
  1679. #endif
  1680. /**
  1681. * hif_batch_send() - API to access hif specific function
  1682. * ce_batch_send.
  1683. * @osc: HIF Context
  1684. * @msdu : list of msdus to be sent
  1685. * @transfer_id : transfer id
  1686. * @len : donwloaded length
  1687. *
  1688. * Return: list of msds not sent
  1689. */
  1690. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1691. uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
  1692. {
  1693. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  1694. if (!ce_tx_hdl)
  1695. return NULL;
  1696. return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  1697. len, sendhead);
  1698. }
  1699. qdf_export_symbol(hif_batch_send);
  1700. /**
  1701. * hif_update_tx_ring() - API to access hif specific function
  1702. * ce_update_tx_ring.
  1703. * @osc: HIF Context
  1704. * @num_htt_cmpls : number of htt compl received.
  1705. *
  1706. * Return: void
  1707. */
  1708. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
  1709. {
  1710. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  1711. ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
  1712. }
  1713. qdf_export_symbol(hif_update_tx_ring);
  1714. /**
  1715. * hif_send_single() - API to access hif specific function
  1716. * ce_send_single.
  1717. * @osc: HIF Context
  1718. * @msdu : msdu to be sent
  1719. * @transfer_id: transfer id
  1720. * @len : downloaded length
  1721. *
  1722. * Return: msdu sent status
  1723. */
  1724. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1725. uint32_t transfer_id, u_int32_t len)
  1726. {
  1727. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  1728. if (!ce_tx_hdl)
  1729. return QDF_STATUS_E_NULL_VALUE;
  1730. return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  1731. len);
  1732. }
  1733. qdf_export_symbol(hif_send_single);
  1734. #endif
  1735. /**
  1736. * hif_reg_write() - API to access hif specific function
  1737. * hif_write32_mb.
  1738. * @hif_ctx : HIF Context
  1739. * @offset : offset on which value has to be written
  1740. * @value : value to be written
  1741. *
  1742. * Return: None
  1743. */
  1744. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  1745. uint32_t value)
  1746. {
  1747. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1748. hif_write32_mb(scn, scn->mem + offset, value);
  1749. }
  1750. qdf_export_symbol(hif_reg_write);
  1751. /**
  1752. * hif_reg_read() - API to access hif specific function
  1753. * hif_read32_mb.
  1754. * @hif_ctx : HIF Context
  1755. * @offset : offset from which value has to be read
  1756. *
  1757. * Return: Read value
  1758. */
  1759. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
  1760. {
  1761. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1762. return hif_read32_mb(scn, scn->mem + offset);
  1763. }
  1764. qdf_export_symbol(hif_reg_read);
  1765. /**
  1766. * hif_ramdump_handler(): generic ramdump handler
  1767. * @scn: struct hif_opaque_softc
  1768. *
  1769. * Return: None
  1770. */
  1771. void hif_ramdump_handler(struct hif_opaque_softc *scn)
  1772. {
  1773. if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
  1774. hif_usb_ramdump_handler(scn);
  1775. }
  1776. hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
  1777. {
  1778. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1779. return scn->wake_irq_type;
  1780. }
  1781. irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
  1782. {
  1783. struct hif_softc *scn = context;
  1784. struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(scn);
  1785. hif_info("wake interrupt received on irq %d", irq);
  1786. if (hif_pm_runtime_get_monitor_wake_intr(hif_ctx)) {
  1787. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  1788. hif_pm_runtime_request_resume(hif_ctx);
  1789. }
  1790. if (scn->initial_wakeup_cb)
  1791. scn->initial_wakeup_cb(scn->initial_wakeup_priv);
  1792. if (hif_is_ut_suspended(scn))
  1793. hif_ut_fw_resume(scn);
  1794. qdf_pm_system_wakeup();
  1795. return IRQ_HANDLED;
  1796. }
  1797. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  1798. void (*callback)(void *),
  1799. void *priv)
  1800. {
  1801. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1802. scn->initial_wakeup_cb = callback;
  1803. scn->initial_wakeup_priv = priv;
  1804. }
  1805. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  1806. uint32_t ce_service_max_yield_time)
  1807. {
  1808. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1809. hif_ctx->ce_service_max_yield_time =
  1810. ce_service_max_yield_time * 1000;
  1811. }
  1812. unsigned long long
  1813. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
  1814. {
  1815. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1816. return hif_ctx->ce_service_max_yield_time;
  1817. }
  1818. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  1819. uint8_t ce_service_max_rx_ind_flush)
  1820. {
  1821. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1822. if (ce_service_max_rx_ind_flush == 0 ||
  1823. ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
  1824. hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
  1825. else
  1826. hif_ctx->ce_service_max_rx_ind_flush =
  1827. ce_service_max_rx_ind_flush;
  1828. }
  1829. #ifdef SYSTEM_PM_CHECK
  1830. void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
  1831. enum hif_system_pm_state state)
  1832. {
  1833. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1834. qdf_atomic_set(&hif_ctx->sys_pm_state, state);
  1835. }
  1836. int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
  1837. {
  1838. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1839. return qdf_atomic_read(&hif_ctx->sys_pm_state);
  1840. }
  1841. int hif_system_pm_state_check(struct hif_opaque_softc *hif)
  1842. {
  1843. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1844. int32_t sys_pm_state;
  1845. if (!hif_ctx) {
  1846. hif_err("hif context is null");
  1847. return -EFAULT;
  1848. }
  1849. sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
  1850. if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
  1851. sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
  1852. hif_info("Triggering system wakeup");
  1853. qdf_pm_system_wakeup();
  1854. return -EAGAIN;
  1855. }
  1856. return 0;
  1857. }
  1858. #endif