main.c 88 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/delay.h>
  7. #include <linux/jiffies.h>
  8. #include <linux/module.h>
  9. #include <linux/of.h>
  10. #include <linux/of_device.h>
  11. #include <linux/pm_wakeup.h>
  12. #include <linux/reboot.h>
  13. #include <linux/rwsem.h>
  14. #include <linux/suspend.h>
  15. #include <linux/timer.h>
  16. #include <linux/version.h>
  17. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0))
  18. #include <linux/panic_notifier.h>
  19. #endif
  20. #if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
  21. #include <soc/qcom/minidump.h>
  22. #endif
  23. #include "cnss_plat_ipc_qmi.h"
  24. #include "main.h"
  25. #include "bus.h"
  26. #include "debug.h"
  27. #include "genl.h"
  28. #define CNSS_DUMP_FORMAT_VER 0x11
  29. #define CNSS_DUMP_FORMAT_VER_V2 0x22
  30. #define CNSS_DUMP_MAGIC_VER_V2 0x42445953
  31. #define CNSS_DUMP_NAME "CNSS_WLAN"
  32. #define CNSS_DUMP_DESC_SIZE 0x1000
  33. #define CNSS_DUMP_SEG_VER 0x1
  34. #define FILE_SYSTEM_READY 1
  35. #define FW_READY_TIMEOUT 20000
  36. #define FW_ASSERT_TIMEOUT 5000
  37. #define CNSS_EVENT_PENDING 2989
  38. #define POWER_RESET_MIN_DELAY_MS 100
  39. #define CNSS_QUIRKS_DEFAULT 0
  40. #ifdef CONFIG_CNSS_EMULATION
  41. #define CNSS_MHI_TIMEOUT_DEFAULT 90000
  42. #define CNSS_MHI_M2_TIMEOUT_DEFAULT 2000
  43. #define CNSS_QMI_TIMEOUT_DEFAULT 90000
  44. #else
  45. #define CNSS_MHI_TIMEOUT_DEFAULT 0
  46. #define CNSS_MHI_M2_TIMEOUT_DEFAULT 25
  47. #define CNSS_QMI_TIMEOUT_DEFAULT 10000
  48. #endif
  49. #define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF
  50. #define CNSS_TIME_SYNC_PERIOD_DEFAULT 900000
  51. #define CNSS_DMS_QMI_CONNECTION_WAIT_MS 50
  52. #define CNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
  53. #define CNSS_DAEMON_CONNECT_TIMEOUT_MS 30000
  54. #define CNSS_CAL_DB_FILE_NAME "wlfw_cal_db.bin"
  55. #define CNSS_CAL_START_PROBE_WAIT_RETRY_MAX 100
  56. #define CNSS_CAL_START_PROBE_WAIT_MS 500
  57. enum cnss_cal_db_op {
  58. CNSS_CAL_DB_UPLOAD,
  59. CNSS_CAL_DB_DOWNLOAD,
  60. CNSS_CAL_DB_INVALID_OP,
  61. };
  62. static struct cnss_plat_data *plat_env;
  63. static DECLARE_RWSEM(cnss_pm_sem);
  64. static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
  65. "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
  66. "utfbd30.bin", "epping30.bin", "evicted30.bin"
  67. };
  68. static struct cnss_fw_files FW_FILES_DEFAULT = {
  69. "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
  70. "utfbd.bin", "epping.bin", "evicted.bin"
  71. };
  72. struct cnss_driver_event {
  73. struct list_head list;
  74. enum cnss_driver_event_type type;
  75. bool sync;
  76. struct completion complete;
  77. int ret;
  78. void *data;
  79. };
  80. static void cnss_set_plat_priv(struct platform_device *plat_dev,
  81. struct cnss_plat_data *plat_priv)
  82. {
  83. plat_env = plat_priv;
  84. }
  85. struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev)
  86. {
  87. return plat_env;
  88. }
  89. /**
  90. * cnss_get_mem_seg_count - Get segment count of memory
  91. * @type: memory type
  92. * @seg: segment count
  93. *
  94. * Return: 0 on success, negative value on failure
  95. */
  96. int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg)
  97. {
  98. struct cnss_plat_data *plat_priv;
  99. plat_priv = cnss_get_plat_priv(NULL);
  100. if (!plat_priv)
  101. return -ENODEV;
  102. switch (type) {
  103. case CNSS_REMOTE_MEM_TYPE_FW:
  104. *seg = plat_priv->fw_mem_seg_len;
  105. break;
  106. case CNSS_REMOTE_MEM_TYPE_QDSS:
  107. *seg = plat_priv->qdss_mem_seg_len;
  108. break;
  109. default:
  110. return -EINVAL;
  111. }
  112. return 0;
  113. }
  114. EXPORT_SYMBOL(cnss_get_mem_seg_count);
  115. /**
  116. * cnss_get_mem_segment_info - Get memory info of different type
  117. * @type: memory type
  118. * @segment: array to save the segment info
  119. * @seg: segment count
  120. *
  121. * Return: 0 on success, negative value on failure
  122. */
  123. int cnss_get_mem_segment_info(enum cnss_remote_mem_type type,
  124. struct cnss_mem_segment segment[],
  125. u32 segment_count)
  126. {
  127. struct cnss_plat_data *plat_priv;
  128. u32 i;
  129. plat_priv = cnss_get_plat_priv(NULL);
  130. if (!plat_priv)
  131. return -ENODEV;
  132. switch (type) {
  133. case CNSS_REMOTE_MEM_TYPE_FW:
  134. if (segment_count > plat_priv->fw_mem_seg_len)
  135. segment_count = plat_priv->fw_mem_seg_len;
  136. for (i = 0; i < segment_count; i++) {
  137. segment[i].size = plat_priv->fw_mem[i].size;
  138. segment[i].va = plat_priv->fw_mem[i].va;
  139. segment[i].pa = plat_priv->fw_mem[i].pa;
  140. }
  141. break;
  142. case CNSS_REMOTE_MEM_TYPE_QDSS:
  143. if (segment_count > plat_priv->qdss_mem_seg_len)
  144. segment_count = plat_priv->qdss_mem_seg_len;
  145. for (i = 0; i < segment_count; i++) {
  146. segment[i].size = plat_priv->qdss_mem[i].size;
  147. segment[i].va = plat_priv->qdss_mem[i].va;
  148. segment[i].pa = plat_priv->qdss_mem[i].pa;
  149. }
  150. break;
  151. default:
  152. return -EINVAL;
  153. }
  154. return 0;
  155. }
  156. EXPORT_SYMBOL(cnss_get_mem_segment_info);
  157. int cnss_set_feature_list(struct cnss_plat_data *plat_priv,
  158. enum cnss_feature_v01 feature)
  159. {
  160. if (unlikely(!plat_priv || feature >= CNSS_MAX_FEATURE_V01))
  161. return -EINVAL;
  162. plat_priv->feature_list |= 1 << feature;
  163. return 0;
  164. }
  165. int cnss_get_feature_list(struct cnss_plat_data *plat_priv,
  166. u64 *feature_list)
  167. {
  168. if (unlikely(!plat_priv))
  169. return -EINVAL;
  170. *feature_list = plat_priv->feature_list;
  171. return 0;
  172. }
  173. static int cnss_pm_notify(struct notifier_block *b,
  174. unsigned long event, void *p)
  175. {
  176. switch (event) {
  177. case PM_SUSPEND_PREPARE:
  178. down_write(&cnss_pm_sem);
  179. break;
  180. case PM_POST_SUSPEND:
  181. up_write(&cnss_pm_sem);
  182. break;
  183. }
  184. return NOTIFY_DONE;
  185. }
  186. static struct notifier_block cnss_pm_notifier = {
  187. .notifier_call = cnss_pm_notify,
  188. };
  189. void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
  190. {
  191. if (atomic_inc_return(&plat_priv->pm_count) != 1)
  192. return;
  193. cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
  194. plat_priv->driver_state,
  195. atomic_read(&plat_priv->pm_count));
  196. pm_stay_awake(&plat_priv->plat_dev->dev);
  197. }
  198. void cnss_pm_relax(struct cnss_plat_data *plat_priv)
  199. {
  200. int r = atomic_dec_return(&plat_priv->pm_count);
  201. WARN_ON(r < 0);
  202. if (r != 0)
  203. return;
  204. cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
  205. plat_priv->driver_state,
  206. atomic_read(&plat_priv->pm_count));
  207. pm_relax(&plat_priv->plat_dev->dev);
  208. }
  209. void cnss_lock_pm_sem(struct device *dev)
  210. {
  211. down_read(&cnss_pm_sem);
  212. }
  213. EXPORT_SYMBOL(cnss_lock_pm_sem);
  214. void cnss_release_pm_sem(struct device *dev)
  215. {
  216. up_read(&cnss_pm_sem);
  217. }
  218. EXPORT_SYMBOL(cnss_release_pm_sem);
  219. int cnss_get_fw_files_for_target(struct device *dev,
  220. struct cnss_fw_files *pfw_files,
  221. u32 target_type, u32 target_version)
  222. {
  223. if (!pfw_files)
  224. return -ENODEV;
  225. switch (target_version) {
  226. case QCA6174_REV3_VERSION:
  227. case QCA6174_REV3_2_VERSION:
  228. memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
  229. break;
  230. default:
  231. memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
  232. cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
  233. target_type, target_version);
  234. break;
  235. }
  236. return 0;
  237. }
  238. EXPORT_SYMBOL(cnss_get_fw_files_for_target);
  239. int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap)
  240. {
  241. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  242. if (!plat_priv)
  243. return -ENODEV;
  244. if (!cap)
  245. return -EINVAL;
  246. *cap = plat_priv->cap;
  247. cnss_pr_dbg("Platform cap_flag is 0x%x\n", cap->cap_flag);
  248. return 0;
  249. }
  250. EXPORT_SYMBOL(cnss_get_platform_cap);
  251. void cnss_request_pm_qos(struct device *dev, u32 qos_val)
  252. {
  253. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  254. if (!plat_priv)
  255. return;
  256. cpu_latency_qos_add_request(&plat_priv->qos_request, qos_val);
  257. }
  258. EXPORT_SYMBOL(cnss_request_pm_qos);
  259. void cnss_remove_pm_qos(struct device *dev)
  260. {
  261. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  262. if (!plat_priv)
  263. return;
  264. cpu_latency_qos_remove_request(&plat_priv->qos_request);
  265. }
  266. EXPORT_SYMBOL(cnss_remove_pm_qos);
  267. int cnss_wlan_enable(struct device *dev,
  268. struct cnss_wlan_enable_cfg *config,
  269. enum cnss_driver_mode mode,
  270. const char *host_version)
  271. {
  272. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  273. int ret = 0;
  274. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  275. return 0;
  276. if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
  277. return 0;
  278. if (!config || !host_version) {
  279. cnss_pr_err("Invalid config or host_version pointer\n");
  280. return -EINVAL;
  281. }
  282. cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
  283. mode, config, host_version);
  284. if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
  285. goto skip_cfg;
  286. ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, config, host_version);
  287. if (ret)
  288. goto out;
  289. skip_cfg:
  290. ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
  291. out:
  292. return ret;
  293. }
  294. EXPORT_SYMBOL(cnss_wlan_enable);
  295. int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
  296. {
  297. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  298. int ret = 0;
  299. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  300. return 0;
  301. if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
  302. return 0;
  303. ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
  304. cnss_bus_free_qdss_mem(plat_priv);
  305. return ret;
  306. }
  307. EXPORT_SYMBOL(cnss_wlan_disable);
  308. int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
  309. u32 data_len, u8 *output)
  310. {
  311. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  312. int ret = 0;
  313. if (!plat_priv) {
  314. cnss_pr_err("plat_priv is NULL!\n");
  315. return -EINVAL;
  316. }
  317. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  318. return 0;
  319. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  320. cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
  321. plat_priv->driver_state);
  322. ret = -EINVAL;
  323. goto out;
  324. }
  325. ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
  326. data_len, output);
  327. out:
  328. return ret;
  329. }
  330. EXPORT_SYMBOL(cnss_athdiag_read);
  331. int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
  332. u32 data_len, u8 *input)
  333. {
  334. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  335. int ret = 0;
  336. if (!plat_priv) {
  337. cnss_pr_err("plat_priv is NULL!\n");
  338. return -EINVAL;
  339. }
  340. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  341. return 0;
  342. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  343. cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
  344. plat_priv->driver_state);
  345. ret = -EINVAL;
  346. goto out;
  347. }
  348. ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
  349. data_len, input);
  350. out:
  351. return ret;
  352. }
  353. EXPORT_SYMBOL(cnss_athdiag_write);
  354. int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
  355. {
  356. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  357. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  358. return 0;
  359. return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
  360. }
  361. EXPORT_SYMBOL(cnss_set_fw_log_mode);
  362. int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed)
  363. {
  364. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  365. if (!plat_priv)
  366. return -EINVAL;
  367. if (plat_priv->device_id != QCA6490_DEVICE_ID ||
  368. !plat_priv->fw_pcie_gen_switch)
  369. return -EOPNOTSUPP;
  370. if (pcie_gen_speed < QMI_PCIE_GEN_SPEED_1_V01 ||
  371. pcie_gen_speed > QMI_PCIE_GEN_SPEED_3_V01)
  372. return -EINVAL;
  373. cnss_pr_dbg("WLAN provided PCIE gen speed: %d\n", pcie_gen_speed);
  374. plat_priv->pcie_gen_speed = pcie_gen_speed;
  375. return 0;
  376. }
  377. EXPORT_SYMBOL(cnss_set_pcie_gen_speed);
  378. static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
  379. {
  380. int ret = 0;
  381. if (!plat_priv)
  382. return -ENODEV;
  383. set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
  384. ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
  385. if (ret)
  386. goto out;
  387. if (plat_priv->hds_enabled)
  388. cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_HDS);
  389. cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
  390. ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
  391. plat_priv->ctrl_params.bdf_type);
  392. if (ret)
  393. goto out;
  394. ret = cnss_bus_load_m3(plat_priv);
  395. if (ret)
  396. goto out;
  397. ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
  398. if (ret)
  399. goto out;
  400. cnss_wlfw_qdss_dnld_send_sync(plat_priv);
  401. return 0;
  402. out:
  403. return ret;
  404. }
  405. static int cnss_request_antenna_sharing(struct cnss_plat_data *plat_priv)
  406. {
  407. int ret = 0;
  408. if (!plat_priv->antenna) {
  409. ret = cnss_wlfw_antenna_switch_send_sync(plat_priv);
  410. if (ret)
  411. goto out;
  412. }
  413. if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state)) {
  414. ret = coex_antenna_switch_to_wlan_send_sync_msg(plat_priv);
  415. if (ret)
  416. goto out;
  417. }
  418. ret = cnss_wlfw_antenna_grant_send_sync(plat_priv);
  419. if (ret)
  420. goto out;
  421. return 0;
  422. out:
  423. return ret;
  424. }
  425. static void cnss_release_antenna_sharing(struct cnss_plat_data *plat_priv)
  426. {
  427. if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state))
  428. coex_antenna_switch_to_mdm_send_sync_msg(plat_priv);
  429. }
  430. static int cnss_setup_dms_mac(struct cnss_plat_data *plat_priv)
  431. {
  432. u32 i;
  433. int ret = 0;
  434. struct cnss_plat_ipc_daemon_config *cfg;
  435. ret = cnss_qmi_get_dms_mac(plat_priv);
  436. if (ret == 0 && plat_priv->dms.mac_valid)
  437. goto qmi_send;
  438. /* DTSI property use-nv-mac is used to force DMS MAC address for WLAN.
  439. * Thus assert on failure to get MAC from DMS even after retries
  440. */
  441. if (plat_priv->use_nv_mac) {
  442. /* Check if Daemon says platform support DMS MAC provisioning */
  443. cfg = cnss_plat_ipc_qmi_daemon_config();
  444. if (cfg) {
  445. if (!cfg->dms_mac_addr_supported) {
  446. cnss_pr_err("DMS MAC address not supported\n");
  447. CNSS_ASSERT(0);
  448. return -EINVAL;
  449. }
  450. }
  451. for (i = 0; i < CNSS_DMS_QMI_CONNECTION_WAIT_RETRY; i++) {
  452. if (plat_priv->dms.mac_valid)
  453. break;
  454. ret = cnss_qmi_get_dms_mac(plat_priv);
  455. if (ret == 0)
  456. break;
  457. msleep(CNSS_DMS_QMI_CONNECTION_WAIT_MS);
  458. }
  459. if (!plat_priv->dms.mac_valid) {
  460. cnss_pr_err("Unable to get MAC from DMS after retries\n");
  461. CNSS_ASSERT(0);
  462. return -EINVAL;
  463. }
  464. }
  465. qmi_send:
  466. if (plat_priv->dms.mac_valid)
  467. ret =
  468. cnss_wlfw_wlan_mac_req_send_sync(plat_priv, plat_priv->dms.mac,
  469. ARRAY_SIZE(plat_priv->dms.mac));
  470. return ret;
  471. }
  472. static int cnss_cal_db_mem_update(struct cnss_plat_data *plat_priv,
  473. enum cnss_cal_db_op op, u32 *size)
  474. {
  475. int ret = 0;
  476. u32 timeout = cnss_get_timeout(plat_priv,
  477. CNSS_TIMEOUT_DAEMON_CONNECTION);
  478. enum cnss_plat_ipc_qmi_client_id_v01 client_id =
  479. CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01;
  480. if (op >= CNSS_CAL_DB_INVALID_OP)
  481. return -EINVAL;
  482. if (!plat_priv->cbc_file_download) {
  483. cnss_pr_info("CAL DB file not required as per BDF\n");
  484. return 0;
  485. }
  486. if (*size == 0) {
  487. cnss_pr_err("Invalid cal file size\n");
  488. return -EINVAL;
  489. }
  490. if (!test_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state)) {
  491. cnss_pr_info("Waiting for CNSS Daemon connection\n");
  492. ret = wait_for_completion_timeout(&plat_priv->daemon_connected,
  493. msecs_to_jiffies(timeout));
  494. if (!ret) {
  495. cnss_pr_err("Daemon not yet connected\n");
  496. CNSS_ASSERT(0);
  497. return ret;
  498. }
  499. }
  500. if (!plat_priv->cal_mem->va) {
  501. cnss_pr_err("CAL DB Memory not setup for FW\n");
  502. return -EINVAL;
  503. }
  504. /* Copy CAL DB file contents to/from CAL_TYPE_DDR mem allocated to FW */
  505. if (op == CNSS_CAL_DB_DOWNLOAD) {
  506. cnss_pr_dbg("Initiating Calibration file download to mem\n");
  507. ret = cnss_plat_ipc_qmi_file_download(client_id,
  508. CNSS_CAL_DB_FILE_NAME,
  509. plat_priv->cal_mem->va,
  510. size);
  511. } else {
  512. cnss_pr_dbg("Initiating Calibration mem upload to file\n");
  513. ret = cnss_plat_ipc_qmi_file_upload(client_id,
  514. CNSS_CAL_DB_FILE_NAME,
  515. plat_priv->cal_mem->va,
  516. *size);
  517. }
  518. if (ret)
  519. cnss_pr_err("Cal DB file %s %s failure\n",
  520. CNSS_CAL_DB_FILE_NAME,
  521. op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload");
  522. else
  523. cnss_pr_dbg("Cal DB file %s %s size %d done\n",
  524. CNSS_CAL_DB_FILE_NAME,
  525. op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload",
  526. *size);
  527. return ret;
  528. }
  529. static int cnss_cal_mem_upload_to_file(struct cnss_plat_data *plat_priv)
  530. {
  531. if (plat_priv->cal_file_size > plat_priv->cal_mem->size) {
  532. cnss_pr_err("Cal file size is larger than Cal DB Mem size\n");
  533. return -EINVAL;
  534. }
  535. return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_UPLOAD,
  536. &plat_priv->cal_file_size);
  537. }
  538. static int cnss_cal_file_download_to_mem(struct cnss_plat_data *plat_priv,
  539. u32 *cal_file_size)
  540. {
  541. /* To download pass the total size of cal DB mem allocated.
  542. * After cal file is download to mem, its size is updated in
  543. * return pointer
  544. */
  545. *cal_file_size = plat_priv->cal_mem->size;
  546. return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_DOWNLOAD,
  547. cal_file_size);
  548. }
  549. static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
  550. {
  551. int ret = 0;
  552. u32 cal_file_size = 0;
  553. if (!plat_priv)
  554. return -ENODEV;
  555. cnss_pr_dbg("Processing FW Init Done..\n");
  556. del_timer(&plat_priv->fw_boot_timer);
  557. set_bit(CNSS_FW_READY, &plat_priv->driver_state);
  558. clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
  559. cnss_wlfw_send_pcie_gen_speed_sync(plat_priv);
  560. if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
  561. clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
  562. clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  563. }
  564. if (test_bit(ENABLE_WALTEST, &plat_priv->ctrl_params.quirks)) {
  565. ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
  566. CNSS_WALTEST);
  567. } else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
  568. cnss_request_antenna_sharing(plat_priv);
  569. cnss_cal_file_download_to_mem(plat_priv, &cal_file_size);
  570. cnss_wlfw_cal_report_req_send_sync(plat_priv, cal_file_size);
  571. plat_priv->cal_time = jiffies;
  572. ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
  573. CNSS_CALIBRATION);
  574. } else {
  575. ret = cnss_setup_dms_mac(plat_priv);
  576. ret = cnss_bus_call_driver_probe(plat_priv);
  577. }
  578. if (ret && test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  579. goto out;
  580. else if (ret)
  581. goto shutdown;
  582. cnss_vreg_unvote_type(plat_priv, CNSS_VREG_PRIM);
  583. return 0;
  584. shutdown:
  585. cnss_bus_dev_shutdown(plat_priv);
  586. clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
  587. clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
  588. out:
  589. return ret;
  590. }
  591. static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
  592. {
  593. switch (type) {
  594. case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
  595. return "SERVER_ARRIVE";
  596. case CNSS_DRIVER_EVENT_SERVER_EXIT:
  597. return "SERVER_EXIT";
  598. case CNSS_DRIVER_EVENT_REQUEST_MEM:
  599. return "REQUEST_MEM";
  600. case CNSS_DRIVER_EVENT_FW_MEM_READY:
  601. return "FW_MEM_READY";
  602. case CNSS_DRIVER_EVENT_FW_READY:
  603. return "FW_READY";
  604. case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
  605. return "COLD_BOOT_CAL_START";
  606. case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
  607. return "COLD_BOOT_CAL_DONE";
  608. case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
  609. return "REGISTER_DRIVER";
  610. case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
  611. return "UNREGISTER_DRIVER";
  612. case CNSS_DRIVER_EVENT_RECOVERY:
  613. return "RECOVERY";
  614. case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
  615. return "FORCE_FW_ASSERT";
  616. case CNSS_DRIVER_EVENT_POWER_UP:
  617. return "POWER_UP";
  618. case CNSS_DRIVER_EVENT_POWER_DOWN:
  619. return "POWER_DOWN";
  620. case CNSS_DRIVER_EVENT_IDLE_RESTART:
  621. return "IDLE_RESTART";
  622. case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
  623. return "IDLE_SHUTDOWN";
  624. case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
  625. return "IMS_WFC_CALL_IND";
  626. case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
  627. return "WLFW_TWC_CFG_IND";
  628. case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
  629. return "QDSS_TRACE_REQ_MEM";
  630. case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
  631. return "FW_MEM_FILE_SAVE";
  632. case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
  633. return "QDSS_TRACE_FREE";
  634. case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
  635. return "QDSS_TRACE_REQ_DATA";
  636. case CNSS_DRIVER_EVENT_MAX:
  637. return "EVENT_MAX";
  638. }
  639. return "UNKNOWN";
  640. };
  641. int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
  642. enum cnss_driver_event_type type,
  643. u32 flags, void *data)
  644. {
  645. struct cnss_driver_event *event;
  646. unsigned long irq_flags;
  647. int gfp = GFP_KERNEL;
  648. int ret = 0;
  649. if (!plat_priv)
  650. return -ENODEV;
  651. cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx flags: 0x%0x\n",
  652. cnss_driver_event_to_str(type), type,
  653. flags ? "-sync" : "", plat_priv->driver_state, flags);
  654. if (type >= CNSS_DRIVER_EVENT_MAX) {
  655. cnss_pr_err("Invalid Event type: %d, can't post", type);
  656. return -EINVAL;
  657. }
  658. if (in_interrupt() || irqs_disabled())
  659. gfp = GFP_ATOMIC;
  660. event = kzalloc(sizeof(*event), gfp);
  661. if (!event)
  662. return -ENOMEM;
  663. cnss_pm_stay_awake(plat_priv);
  664. event->type = type;
  665. event->data = data;
  666. init_completion(&event->complete);
  667. event->ret = CNSS_EVENT_PENDING;
  668. event->sync = !!(flags & CNSS_EVENT_SYNC);
  669. spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
  670. list_add_tail(&event->list, &plat_priv->event_list);
  671. spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
  672. queue_work(plat_priv->event_wq, &plat_priv->event_work);
  673. if (!(flags & CNSS_EVENT_SYNC))
  674. goto out;
  675. if (flags & CNSS_EVENT_UNKILLABLE)
  676. wait_for_completion(&event->complete);
  677. else if (flags & CNSS_EVENT_UNINTERRUPTIBLE)
  678. ret = wait_for_completion_killable(&event->complete);
  679. else
  680. ret = wait_for_completion_interruptible(&event->complete);
  681. cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
  682. cnss_driver_event_to_str(type), type,
  683. plat_priv->driver_state, ret, event->ret);
  684. spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
  685. if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
  686. event->sync = false;
  687. spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
  688. ret = -EINTR;
  689. goto out;
  690. }
  691. spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
  692. ret = event->ret;
  693. kfree(event);
  694. out:
  695. cnss_pm_relax(plat_priv);
  696. return ret;
  697. }
  698. /**
  699. * cnss_get_timeout - Get timeout for corresponding type.
  700. * @plat_priv: Pointer to platform driver context.
  701. * @cnss_timeout_type: Timeout type.
  702. *
  703. * Return: Timeout in milliseconds.
  704. */
  705. unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
  706. enum cnss_timeout_type timeout_type)
  707. {
  708. unsigned int qmi_timeout = cnss_get_qmi_timeout(plat_priv);
  709. switch (timeout_type) {
  710. case CNSS_TIMEOUT_QMI:
  711. return qmi_timeout;
  712. case CNSS_TIMEOUT_POWER_UP:
  713. return (qmi_timeout << 2);
  714. case CNSS_TIMEOUT_IDLE_RESTART:
  715. /* In idle restart power up sequence, we have fw_boot_timer to
  716. * handle FW initialization failure.
  717. * It uses WLAN_MISSION_MODE_TIMEOUT, so setup 3x that time to
  718. * account for FW dump collection and FW re-initialization on
  719. * retry.
  720. */
  721. return (qmi_timeout + WLAN_MISSION_MODE_TIMEOUT * 3);
  722. case CNSS_TIMEOUT_CALIBRATION:
  723. /* Similar to mission mode, in CBC if FW init fails
  724. * fw recovery is tried. Thus return 2x the CBC timeout.
  725. */
  726. return (qmi_timeout + WLAN_COLD_BOOT_CAL_TIMEOUT * 2);
  727. case CNSS_TIMEOUT_WLAN_WATCHDOG:
  728. return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS);
  729. case CNSS_TIMEOUT_RDDM:
  730. return CNSS_RDDM_TIMEOUT_MS;
  731. case CNSS_TIMEOUT_RECOVERY:
  732. return RECOVERY_TIMEOUT;
  733. case CNSS_TIMEOUT_DAEMON_CONNECTION:
  734. return qmi_timeout + CNSS_DAEMON_CONNECT_TIMEOUT_MS;
  735. default:
  736. return qmi_timeout;
  737. }
  738. }
  739. unsigned int cnss_get_boot_timeout(struct device *dev)
  740. {
  741. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  742. if (!plat_priv) {
  743. cnss_pr_err("plat_priv is NULL\n");
  744. return 0;
  745. }
  746. return cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
  747. }
  748. EXPORT_SYMBOL(cnss_get_boot_timeout);
  749. int cnss_power_up(struct device *dev)
  750. {
  751. int ret = 0;
  752. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  753. unsigned int timeout;
  754. if (!plat_priv) {
  755. cnss_pr_err("plat_priv is NULL\n");
  756. return -ENODEV;
  757. }
  758. cnss_pr_dbg("Powering up device\n");
  759. ret = cnss_driver_event_post(plat_priv,
  760. CNSS_DRIVER_EVENT_POWER_UP,
  761. CNSS_EVENT_SYNC, NULL);
  762. if (ret)
  763. goto out;
  764. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  765. goto out;
  766. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_POWER_UP);
  767. reinit_completion(&plat_priv->power_up_complete);
  768. ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
  769. msecs_to_jiffies(timeout));
  770. if (!ret) {
  771. cnss_pr_err("Timeout (%ums) waiting for power up to complete\n",
  772. timeout);
  773. ret = -EAGAIN;
  774. goto out;
  775. }
  776. return 0;
  777. out:
  778. return ret;
  779. }
  780. EXPORT_SYMBOL(cnss_power_up);
  781. int cnss_power_down(struct device *dev)
  782. {
  783. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  784. if (!plat_priv) {
  785. cnss_pr_err("plat_priv is NULL\n");
  786. return -ENODEV;
  787. }
  788. cnss_pr_dbg("Powering down device\n");
  789. return cnss_driver_event_post(plat_priv,
  790. CNSS_DRIVER_EVENT_POWER_DOWN,
  791. CNSS_EVENT_SYNC, NULL);
  792. }
  793. EXPORT_SYMBOL(cnss_power_down);
  794. int cnss_idle_restart(struct device *dev)
  795. {
  796. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  797. unsigned int timeout;
  798. int ret = 0;
  799. if (!plat_priv) {
  800. cnss_pr_err("plat_priv is NULL\n");
  801. return -ENODEV;
  802. }
  803. if (!mutex_trylock(&plat_priv->driver_ops_lock)) {
  804. cnss_pr_dbg("Another driver operation is in progress, ignore idle restart\n");
  805. return -EBUSY;
  806. }
  807. cnss_pr_dbg("Doing idle restart\n");
  808. reinit_completion(&plat_priv->power_up_complete);
  809. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  810. cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
  811. ret = -EINVAL;
  812. goto out;
  813. }
  814. ret = cnss_driver_event_post(plat_priv,
  815. CNSS_DRIVER_EVENT_IDLE_RESTART,
  816. CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
  817. if (ret)
  818. goto out;
  819. if (plat_priv->device_id == QCA6174_DEVICE_ID) {
  820. ret = cnss_bus_call_driver_probe(plat_priv);
  821. goto out;
  822. }
  823. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_IDLE_RESTART);
  824. ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
  825. msecs_to_jiffies(timeout));
  826. if (plat_priv->power_up_error) {
  827. ret = plat_priv->power_up_error;
  828. clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
  829. cnss_pr_dbg("Power up error:%d, exiting\n",
  830. plat_priv->power_up_error);
  831. goto out;
  832. }
  833. if (!ret) {
  834. /* This exception occurs after attempting retry of FW recovery.
  835. * Thus we can safely power off the device.
  836. */
  837. cnss_fatal_err("Timeout (%ums) waiting for idle restart to complete\n",
  838. timeout);
  839. ret = -ETIMEDOUT;
  840. cnss_power_down(dev);
  841. CNSS_ASSERT(0);
  842. goto out;
  843. }
  844. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  845. cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
  846. del_timer(&plat_priv->fw_boot_timer);
  847. ret = -EINVAL;
  848. goto out;
  849. }
  850. mutex_unlock(&plat_priv->driver_ops_lock);
  851. return 0;
  852. out:
  853. mutex_unlock(&plat_priv->driver_ops_lock);
  854. return ret;
  855. }
  856. EXPORT_SYMBOL(cnss_idle_restart);
  857. int cnss_idle_shutdown(struct device *dev)
  858. {
  859. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  860. unsigned int timeout;
  861. int ret;
  862. if (!plat_priv) {
  863. cnss_pr_err("plat_priv is NULL\n");
  864. return -ENODEV;
  865. }
  866. if (test_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state)) {
  867. cnss_pr_dbg("System suspend or resume in progress, ignore idle shutdown\n");
  868. return -EAGAIN;
  869. }
  870. cnss_pr_dbg("Doing idle shutdown\n");
  871. if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
  872. !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  873. goto skip_wait;
  874. reinit_completion(&plat_priv->recovery_complete);
  875. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
  876. ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
  877. msecs_to_jiffies(timeout));
  878. if (!ret) {
  879. cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
  880. timeout);
  881. CNSS_ASSERT(0);
  882. }
  883. skip_wait:
  884. return cnss_driver_event_post(plat_priv,
  885. CNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
  886. CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
  887. }
  888. EXPORT_SYMBOL(cnss_idle_shutdown);
  889. static int cnss_get_resources(struct cnss_plat_data *plat_priv)
  890. {
  891. int ret = 0;
  892. ret = cnss_get_vreg_type(plat_priv, CNSS_VREG_PRIM);
  893. if (ret) {
  894. cnss_pr_err("Failed to get vreg, err = %d\n", ret);
  895. goto out;
  896. }
  897. ret = cnss_get_clk(plat_priv);
  898. if (ret) {
  899. cnss_pr_err("Failed to get clocks, err = %d\n", ret);
  900. goto put_vreg;
  901. }
  902. ret = cnss_get_pinctrl(plat_priv);
  903. if (ret) {
  904. cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
  905. goto put_clk;
  906. }
  907. return 0;
  908. put_clk:
  909. cnss_put_clk(plat_priv);
  910. put_vreg:
  911. cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
  912. out:
  913. return ret;
  914. }
  915. static void cnss_put_resources(struct cnss_plat_data *plat_priv)
  916. {
  917. cnss_put_clk(plat_priv);
  918. cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
  919. }
  920. #if IS_ENABLED(CONFIG_ESOC) && IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
  921. static int cnss_modem_notifier_nb(struct notifier_block *nb,
  922. unsigned long code,
  923. void *ss_handle)
  924. {
  925. struct cnss_plat_data *plat_priv =
  926. container_of(nb, struct cnss_plat_data, modem_nb);
  927. struct cnss_esoc_info *esoc_info;
  928. cnss_pr_dbg("Modem notifier: event %lu\n", code);
  929. if (!plat_priv)
  930. return NOTIFY_DONE;
  931. esoc_info = &plat_priv->esoc_info;
  932. if (code == SUBSYS_AFTER_POWERUP)
  933. esoc_info->modem_current_status = 1;
  934. else if (code == SUBSYS_BEFORE_SHUTDOWN)
  935. esoc_info->modem_current_status = 0;
  936. else
  937. return NOTIFY_DONE;
  938. if (!cnss_bus_call_driver_modem_status(plat_priv,
  939. esoc_info->modem_current_status))
  940. return NOTIFY_DONE;
  941. return NOTIFY_OK;
  942. }
  943. static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
  944. {
  945. int ret = 0;
  946. struct device *dev;
  947. struct cnss_esoc_info *esoc_info;
  948. struct esoc_desc *esoc_desc;
  949. const char *client_desc;
  950. dev = &plat_priv->plat_dev->dev;
  951. esoc_info = &plat_priv->esoc_info;
  952. esoc_info->notify_modem_status =
  953. of_property_read_bool(dev->of_node,
  954. "qcom,notify-modem-status");
  955. if (!esoc_info->notify_modem_status)
  956. goto out;
  957. ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
  958. &client_desc);
  959. if (ret) {
  960. cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
  961. } else {
  962. esoc_desc = devm_register_esoc_client(dev, client_desc);
  963. if (IS_ERR_OR_NULL(esoc_desc)) {
  964. ret = PTR_RET(esoc_desc);
  965. cnss_pr_err("Failed to register esoc_desc, err = %d\n",
  966. ret);
  967. goto out;
  968. }
  969. esoc_info->esoc_desc = esoc_desc;
  970. }
  971. plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
  972. esoc_info->modem_current_status = 0;
  973. esoc_info->modem_notify_handler =
  974. subsys_notif_register_notifier(esoc_info->esoc_desc ?
  975. esoc_info->esoc_desc->name :
  976. "modem", &plat_priv->modem_nb);
  977. if (IS_ERR(esoc_info->modem_notify_handler)) {
  978. ret = PTR_ERR(esoc_info->modem_notify_handler);
  979. cnss_pr_err("Failed to register esoc notifier, err = %d\n",
  980. ret);
  981. goto unreg_esoc;
  982. }
  983. return 0;
  984. unreg_esoc:
  985. if (esoc_info->esoc_desc)
  986. devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
  987. out:
  988. return ret;
  989. }
  990. static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
  991. {
  992. struct device *dev;
  993. struct cnss_esoc_info *esoc_info;
  994. dev = &plat_priv->plat_dev->dev;
  995. esoc_info = &plat_priv->esoc_info;
  996. if (esoc_info->notify_modem_status)
  997. subsys_notif_unregister_notifier
  998. (esoc_info->modem_notify_handler,
  999. &plat_priv->modem_nb);
  1000. if (esoc_info->esoc_desc)
  1001. devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
  1002. }
  1003. #else
  1004. static inline int cnss_register_esoc(struct cnss_plat_data *plat_priv)
  1005. {
  1006. return 0;
  1007. }
  1008. static inline void cnss_unregister_esoc(struct cnss_plat_data *plat_priv) {}
  1009. #endif
  1010. #if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
  1011. static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
  1012. {
  1013. struct cnss_plat_data *plat_priv;
  1014. int ret = 0;
  1015. if (!subsys_desc->dev) {
  1016. cnss_pr_err("dev from subsys_desc is NULL\n");
  1017. return -ENODEV;
  1018. }
  1019. plat_priv = dev_get_drvdata(subsys_desc->dev);
  1020. if (!plat_priv) {
  1021. cnss_pr_err("plat_priv is NULL\n");
  1022. return -ENODEV;
  1023. }
  1024. if (!plat_priv->driver_state) {
  1025. cnss_pr_dbg("Powerup is ignored\n");
  1026. return 0;
  1027. }
  1028. ret = cnss_bus_dev_powerup(plat_priv);
  1029. if (ret)
  1030. __pm_relax(plat_priv->recovery_ws);
  1031. return ret;
  1032. }
  1033. static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
  1034. bool force_stop)
  1035. {
  1036. struct cnss_plat_data *plat_priv;
  1037. if (!subsys_desc->dev) {
  1038. cnss_pr_err("dev from subsys_desc is NULL\n");
  1039. return -ENODEV;
  1040. }
  1041. plat_priv = dev_get_drvdata(subsys_desc->dev);
  1042. if (!plat_priv) {
  1043. cnss_pr_err("plat_priv is NULL\n");
  1044. return -ENODEV;
  1045. }
  1046. if (!plat_priv->driver_state) {
  1047. cnss_pr_dbg("shutdown is ignored\n");
  1048. return 0;
  1049. }
  1050. return cnss_bus_dev_shutdown(plat_priv);
  1051. }
  1052. void cnss_device_crashed(struct device *dev)
  1053. {
  1054. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1055. struct cnss_subsys_info *subsys_info;
  1056. if (!plat_priv)
  1057. return;
  1058. subsys_info = &plat_priv->subsys_info;
  1059. if (subsys_info->subsys_device) {
  1060. set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  1061. subsys_set_crash_status(subsys_info->subsys_device, true);
  1062. subsystem_restart_dev(subsys_info->subsys_device);
  1063. }
  1064. }
  1065. EXPORT_SYMBOL(cnss_device_crashed);
  1066. static void cnss_subsys_crash_shutdown(const struct subsys_desc *subsys_desc)
  1067. {
  1068. struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
  1069. if (!plat_priv) {
  1070. cnss_pr_err("plat_priv is NULL\n");
  1071. return;
  1072. }
  1073. cnss_bus_dev_crash_shutdown(plat_priv);
  1074. }
  1075. static int cnss_subsys_ramdump(int enable,
  1076. const struct subsys_desc *subsys_desc)
  1077. {
  1078. struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
  1079. if (!plat_priv) {
  1080. cnss_pr_err("plat_priv is NULL\n");
  1081. return -ENODEV;
  1082. }
  1083. if (!enable)
  1084. return 0;
  1085. return cnss_bus_dev_ramdump(plat_priv);
  1086. }
  1087. static void cnss_recovery_work_handler(struct work_struct *work)
  1088. {
  1089. }
  1090. #else
  1091. static void cnss_recovery_work_handler(struct work_struct *work)
  1092. {
  1093. int ret;
  1094. struct cnss_plat_data *plat_priv =
  1095. container_of(work, struct cnss_plat_data, recovery_work);
  1096. if (!plat_priv->recovery_enabled)
  1097. panic("subsys-restart: Resetting the SoC wlan crashed\n");
  1098. cnss_bus_dev_shutdown(plat_priv);
  1099. cnss_bus_dev_ramdump(plat_priv);
  1100. msleep(POWER_RESET_MIN_DELAY_MS);
  1101. ret = cnss_bus_dev_powerup(plat_priv);
  1102. if (ret)
  1103. __pm_relax(plat_priv->recovery_ws);
  1104. return;
  1105. }
  1106. void cnss_device_crashed(struct device *dev)
  1107. {
  1108. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1109. if (!plat_priv)
  1110. return;
  1111. set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  1112. schedule_work(&plat_priv->recovery_work);
  1113. }
  1114. EXPORT_SYMBOL(cnss_device_crashed);
  1115. #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
  1116. void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
  1117. {
  1118. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1119. struct cnss_ramdump_info *ramdump_info;
  1120. if (!plat_priv)
  1121. return NULL;
  1122. ramdump_info = &plat_priv->ramdump_info;
  1123. *size = ramdump_info->ramdump_size;
  1124. return ramdump_info->ramdump_va;
  1125. }
  1126. EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
  1127. static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
  1128. {
  1129. switch (reason) {
  1130. case CNSS_REASON_DEFAULT:
  1131. return "DEFAULT";
  1132. case CNSS_REASON_LINK_DOWN:
  1133. return "LINK_DOWN";
  1134. case CNSS_REASON_RDDM:
  1135. return "RDDM";
  1136. case CNSS_REASON_TIMEOUT:
  1137. return "TIMEOUT";
  1138. }
  1139. return "UNKNOWN";
  1140. };
  1141. static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
  1142. enum cnss_recovery_reason reason)
  1143. {
  1144. plat_priv->recovery_count++;
  1145. if (plat_priv->device_id == QCA6174_DEVICE_ID)
  1146. goto self_recovery;
  1147. if (test_bit(SKIP_RECOVERY, &plat_priv->ctrl_params.quirks)) {
  1148. cnss_pr_dbg("Skip device recovery\n");
  1149. return 0;
  1150. }
  1151. /* FW recovery sequence has multiple steps and firmware load requires
  1152. * linux PM in awake state. Thus hold the cnss wake source until
  1153. * WLAN MISSION enabled. CNSS_TIMEOUT_RECOVERY option should cover all
  1154. * time taken in this process.
  1155. */
  1156. pm_wakeup_ws_event(plat_priv->recovery_ws,
  1157. cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY),
  1158. true);
  1159. switch (reason) {
  1160. case CNSS_REASON_LINK_DOWN:
  1161. if (!cnss_bus_check_link_status(plat_priv)) {
  1162. cnss_pr_dbg("Skip link down recovery as link is already up\n");
  1163. return 0;
  1164. }
  1165. if (test_bit(LINK_DOWN_SELF_RECOVERY,
  1166. &plat_priv->ctrl_params.quirks))
  1167. goto self_recovery;
  1168. if (!cnss_bus_recover_link_down(plat_priv)) {
  1169. /* clear recovery bit here to avoid skipping
  1170. * the recovery work for RDDM later
  1171. */
  1172. clear_bit(CNSS_DRIVER_RECOVERY,
  1173. &plat_priv->driver_state);
  1174. return 0;
  1175. }
  1176. break;
  1177. case CNSS_REASON_RDDM:
  1178. cnss_bus_collect_dump_info(plat_priv, false);
  1179. break;
  1180. case CNSS_REASON_DEFAULT:
  1181. case CNSS_REASON_TIMEOUT:
  1182. break;
  1183. default:
  1184. cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
  1185. cnss_recovery_reason_to_str(reason), reason);
  1186. break;
  1187. }
  1188. cnss_bus_device_crashed(plat_priv);
  1189. return 0;
  1190. self_recovery:
  1191. cnss_pr_dbg("Going for self recovery\n");
  1192. cnss_bus_dev_shutdown(plat_priv);
  1193. if (test_bit(LINK_DOWN_SELF_RECOVERY, &plat_priv->ctrl_params.quirks))
  1194. clear_bit(LINK_DOWN_SELF_RECOVERY,
  1195. &plat_priv->ctrl_params.quirks);
  1196. cnss_bus_dev_powerup(plat_priv);
  1197. return 0;
  1198. }
  1199. static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
  1200. void *data)
  1201. {
  1202. struct cnss_recovery_data *recovery_data = data;
  1203. int ret = 0;
  1204. cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
  1205. cnss_recovery_reason_to_str(recovery_data->reason),
  1206. recovery_data->reason);
  1207. if (!plat_priv->driver_state) {
  1208. cnss_pr_err("Improper driver state, ignore recovery\n");
  1209. ret = -EINVAL;
  1210. goto out;
  1211. }
  1212. if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
  1213. cnss_pr_err("Reboot is in progress, ignore recovery\n");
  1214. ret = -EINVAL;
  1215. goto out;
  1216. }
  1217. if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
  1218. cnss_pr_err("Recovery is already in progress\n");
  1219. CNSS_ASSERT(0);
  1220. ret = -EINVAL;
  1221. goto out;
  1222. }
  1223. if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
  1224. test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
  1225. cnss_pr_err("Driver unload or idle shutdown is in progress, ignore recovery\n");
  1226. ret = -EINVAL;
  1227. goto out;
  1228. }
  1229. switch (plat_priv->device_id) {
  1230. case QCA6174_DEVICE_ID:
  1231. if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
  1232. test_bit(CNSS_DRIVER_IDLE_RESTART,
  1233. &plat_priv->driver_state)) {
  1234. cnss_pr_err("Driver load or idle restart is in progress, ignore recovery\n");
  1235. ret = -EINVAL;
  1236. goto out;
  1237. }
  1238. break;
  1239. default:
  1240. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  1241. set_bit(CNSS_FW_BOOT_RECOVERY,
  1242. &plat_priv->driver_state);
  1243. }
  1244. break;
  1245. }
  1246. set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
  1247. ret = cnss_do_recovery(plat_priv, recovery_data->reason);
  1248. out:
  1249. kfree(data);
  1250. return ret;
  1251. }
  1252. int cnss_self_recovery(struct device *dev,
  1253. enum cnss_recovery_reason reason)
  1254. {
  1255. cnss_schedule_recovery(dev, reason);
  1256. return 0;
  1257. }
  1258. EXPORT_SYMBOL(cnss_self_recovery);
  1259. void cnss_schedule_recovery(struct device *dev,
  1260. enum cnss_recovery_reason reason)
  1261. {
  1262. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1263. struct cnss_recovery_data *data;
  1264. int gfp = GFP_KERNEL;
  1265. if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
  1266. cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
  1267. if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
  1268. test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
  1269. cnss_pr_dbg("Driver unload or idle shutdown is in progress, ignore schedule recovery\n");
  1270. return;
  1271. }
  1272. if (in_interrupt() || irqs_disabled())
  1273. gfp = GFP_ATOMIC;
  1274. data = kzalloc(sizeof(*data), gfp);
  1275. if (!data)
  1276. return;
  1277. data->reason = reason;
  1278. cnss_driver_event_post(plat_priv,
  1279. CNSS_DRIVER_EVENT_RECOVERY,
  1280. 0, data);
  1281. }
  1282. EXPORT_SYMBOL(cnss_schedule_recovery);
  1283. int cnss_force_fw_assert(struct device *dev)
  1284. {
  1285. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1286. if (!plat_priv) {
  1287. cnss_pr_err("plat_priv is NULL\n");
  1288. return -ENODEV;
  1289. }
  1290. if (plat_priv->device_id == QCA6174_DEVICE_ID) {
  1291. cnss_pr_info("Forced FW assert is not supported\n");
  1292. return -EOPNOTSUPP;
  1293. }
  1294. if (cnss_bus_is_device_down(plat_priv)) {
  1295. cnss_pr_info("Device is already in bad state, ignore force assert\n");
  1296. return 0;
  1297. }
  1298. if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
  1299. cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
  1300. return 0;
  1301. }
  1302. if (in_interrupt() || irqs_disabled())
  1303. cnss_driver_event_post(plat_priv,
  1304. CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
  1305. 0, NULL);
  1306. else
  1307. cnss_bus_force_fw_assert_hdlr(plat_priv);
  1308. return 0;
  1309. }
  1310. EXPORT_SYMBOL(cnss_force_fw_assert);
  1311. int cnss_force_collect_rddm(struct device *dev)
  1312. {
  1313. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1314. unsigned int timeout;
  1315. int ret = 0;
  1316. if (!plat_priv) {
  1317. cnss_pr_err("plat_priv is NULL\n");
  1318. return -ENODEV;
  1319. }
  1320. if (plat_priv->device_id == QCA6174_DEVICE_ID) {
  1321. cnss_pr_info("Force collect rddm is not supported\n");
  1322. return -EOPNOTSUPP;
  1323. }
  1324. if (cnss_bus_is_device_down(plat_priv)) {
  1325. cnss_pr_info("Device is already in bad state, wait to collect rddm\n");
  1326. goto wait_rddm;
  1327. }
  1328. if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
  1329. cnss_pr_info("Recovery is already in progress, wait to collect rddm\n");
  1330. goto wait_rddm;
  1331. }
  1332. if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
  1333. test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
  1334. test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
  1335. test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
  1336. cnss_pr_info("Loading/Unloading/idle restart/shutdown is in progress, ignore forced collect rddm\n");
  1337. return 0;
  1338. }
  1339. ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
  1340. if (ret)
  1341. return ret;
  1342. wait_rddm:
  1343. reinit_completion(&plat_priv->rddm_complete);
  1344. timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RDDM);
  1345. ret = wait_for_completion_timeout(&plat_priv->rddm_complete,
  1346. msecs_to_jiffies(timeout));
  1347. if (!ret) {
  1348. cnss_pr_err("Timeout (%ums) waiting for RDDM to complete\n",
  1349. timeout);
  1350. ret = -ETIMEDOUT;
  1351. } else if (ret > 0) {
  1352. ret = 0;
  1353. }
  1354. return ret;
  1355. }
  1356. EXPORT_SYMBOL(cnss_force_collect_rddm);
  1357. int cnss_qmi_send_get(struct device *dev)
  1358. {
  1359. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1360. if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
  1361. return 0;
  1362. return cnss_bus_qmi_send_get(plat_priv);
  1363. }
  1364. EXPORT_SYMBOL(cnss_qmi_send_get);
  1365. int cnss_qmi_send_put(struct device *dev)
  1366. {
  1367. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1368. if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
  1369. return 0;
  1370. return cnss_bus_qmi_send_put(plat_priv);
  1371. }
  1372. EXPORT_SYMBOL(cnss_qmi_send_put);
  1373. int cnss_qmi_send(struct device *dev, int type, void *cmd,
  1374. int cmd_len, void *cb_ctx,
  1375. int (*cb)(void *ctx, void *event, int event_len))
  1376. {
  1377. struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
  1378. int ret;
  1379. if (!plat_priv)
  1380. return -ENODEV;
  1381. if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
  1382. return -EINVAL;
  1383. plat_priv->get_info_cb = cb;
  1384. plat_priv->get_info_cb_ctx = cb_ctx;
  1385. ret = cnss_wlfw_get_info_send_sync(plat_priv, type, cmd, cmd_len);
  1386. if (ret) {
  1387. plat_priv->get_info_cb = NULL;
  1388. plat_priv->get_info_cb_ctx = NULL;
  1389. }
  1390. return ret;
  1391. }
  1392. EXPORT_SYMBOL(cnss_qmi_send);
  1393. static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
  1394. {
  1395. int ret = 0;
  1396. u32 retry = 0;
  1397. if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
  1398. cnss_pr_dbg("Calibration complete. Ignore calibration req\n");
  1399. goto out;
  1400. } else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
  1401. cnss_pr_dbg("Calibration in progress. Ignore new calibration req\n");
  1402. goto out;
  1403. }
  1404. if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
  1405. test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state) ||
  1406. test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  1407. cnss_pr_err("WLAN in mission mode before cold boot calibration\n");
  1408. CNSS_ASSERT(0);
  1409. return -EINVAL;
  1410. }
  1411. while (retry++ < CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
  1412. if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))
  1413. break;
  1414. msleep(CNSS_CAL_START_PROBE_WAIT_MS);
  1415. if (retry == CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
  1416. cnss_pr_err("Calibration start failed as PCI probe not complete\n");
  1417. CNSS_ASSERT(0);
  1418. ret = -EINVAL;
  1419. goto mark_cal_fail;
  1420. }
  1421. }
  1422. set_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
  1423. reinit_completion(&plat_priv->cal_complete);
  1424. ret = cnss_bus_dev_powerup(plat_priv);
  1425. mark_cal_fail:
  1426. if (ret) {
  1427. complete(&plat_priv->cal_complete);
  1428. clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
  1429. /* Set CBC done in driver state to mark attempt and note error
  1430. * since calibration cannot be retried at boot.
  1431. */
  1432. plat_priv->cal_done = CNSS_CAL_FAILURE;
  1433. set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
  1434. }
  1435. out:
  1436. return ret;
  1437. }
  1438. static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
  1439. void *data)
  1440. {
  1441. struct cnss_cal_info *cal_info = data;
  1442. if (!test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
  1443. test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
  1444. goto out;
  1445. switch (cal_info->cal_status) {
  1446. case CNSS_CAL_DONE:
  1447. cnss_pr_dbg("Calibration completed successfully\n");
  1448. plat_priv->cal_done = true;
  1449. break;
  1450. case CNSS_CAL_TIMEOUT:
  1451. case CNSS_CAL_FAILURE:
  1452. cnss_pr_dbg("Calibration failed. Status: %d, force shutdown\n",
  1453. cal_info->cal_status);
  1454. break;
  1455. default:
  1456. cnss_pr_err("Unknown calibration status: %u\n",
  1457. cal_info->cal_status);
  1458. break;
  1459. }
  1460. cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
  1461. cnss_bus_free_qdss_mem(plat_priv);
  1462. cnss_release_antenna_sharing(plat_priv);
  1463. cnss_bus_dev_shutdown(plat_priv);
  1464. msleep(POWER_RESET_MIN_DELAY_MS);
  1465. complete(&plat_priv->cal_complete);
  1466. clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
  1467. set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
  1468. if (cal_info->cal_status == CNSS_CAL_DONE) {
  1469. cnss_cal_mem_upload_to_file(plat_priv);
  1470. if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work)
  1471. ) {
  1472. cnss_pr_dbg("Schedule WLAN driver load\n");
  1473. schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
  1474. 0);
  1475. }
  1476. }
  1477. out:
  1478. kfree(data);
  1479. return 0;
  1480. }
  1481. static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
  1482. {
  1483. int ret;
  1484. ret = cnss_bus_dev_powerup(plat_priv);
  1485. if (ret)
  1486. clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
  1487. return ret;
  1488. }
  1489. static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
  1490. {
  1491. cnss_bus_dev_shutdown(plat_priv);
  1492. return 0;
  1493. }
  1494. static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
  1495. {
  1496. int ret = 0;
  1497. ret = cnss_bus_alloc_qdss_mem(plat_priv);
  1498. if (ret < 0)
  1499. return ret;
  1500. return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
  1501. }
  1502. static void *cnss_get_fw_mem_pa_to_va(struct cnss_fw_mem *fw_mem,
  1503. u32 mem_seg_len, u64 pa, u32 size)
  1504. {
  1505. int i = 0;
  1506. u64 offset = 0;
  1507. void *va = NULL;
  1508. u64 local_pa;
  1509. u32 local_size;
  1510. for (i = 0; i < mem_seg_len; i++) {
  1511. local_pa = (u64)fw_mem[i].pa;
  1512. local_size = (u32)fw_mem[i].size;
  1513. if (pa == local_pa && size <= local_size) {
  1514. va = fw_mem[i].va;
  1515. break;
  1516. }
  1517. if (pa > local_pa &&
  1518. pa < local_pa + local_size &&
  1519. pa + size <= local_pa + local_size) {
  1520. offset = pa - local_pa;
  1521. va = fw_mem[i].va + offset;
  1522. break;
  1523. }
  1524. }
  1525. return va;
  1526. }
  1527. static int cnss_fw_mem_file_save_hdlr(struct cnss_plat_data *plat_priv,
  1528. void *data)
  1529. {
  1530. struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
  1531. struct cnss_fw_mem *fw_mem_seg;
  1532. int ret = 0L;
  1533. void *va = NULL;
  1534. u32 i, fw_mem_seg_len;
  1535. switch (event_data->mem_type) {
  1536. case QMI_WLFW_MEM_TYPE_DDR_V01:
  1537. if (!plat_priv->fw_mem_seg_len)
  1538. goto invalid_mem_save;
  1539. fw_mem_seg = plat_priv->fw_mem;
  1540. fw_mem_seg_len = plat_priv->fw_mem_seg_len;
  1541. break;
  1542. case QMI_WLFW_MEM_QDSS_V01:
  1543. if (!plat_priv->qdss_mem_seg_len)
  1544. goto invalid_mem_save;
  1545. fw_mem_seg = plat_priv->qdss_mem;
  1546. fw_mem_seg_len = plat_priv->qdss_mem_seg_len;
  1547. break;
  1548. default:
  1549. goto invalid_mem_save;
  1550. }
  1551. for (i = 0; i < event_data->mem_seg_len; i++) {
  1552. va = cnss_get_fw_mem_pa_to_va(fw_mem_seg, fw_mem_seg_len,
  1553. event_data->mem_seg[i].addr,
  1554. event_data->mem_seg[i].size);
  1555. if (!va) {
  1556. cnss_pr_err("Fail to find matching va of pa %pa for mem type: %d\n",
  1557. &event_data->mem_seg[i].addr,
  1558. event_data->mem_type);
  1559. ret = -EINVAL;
  1560. break;
  1561. }
  1562. ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
  1563. event_data->file_name,
  1564. event_data->mem_seg[i].size);
  1565. if (ret < 0) {
  1566. cnss_pr_err("Fail to save fw mem data: %d\n",
  1567. ret);
  1568. break;
  1569. }
  1570. }
  1571. kfree(data);
  1572. return ret;
  1573. invalid_mem_save:
  1574. cnss_pr_err("FW Mem type %d not allocated. Invalid save request\n",
  1575. event_data->mem_type);
  1576. kfree(data);
  1577. return -EINVAL;
  1578. }
  1579. static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
  1580. {
  1581. cnss_bus_free_qdss_mem(plat_priv);
  1582. return 0;
  1583. }
  1584. static int cnss_qdss_trace_req_data_hdlr(struct cnss_plat_data *plat_priv,
  1585. void *data)
  1586. {
  1587. int ret = 0;
  1588. struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
  1589. if (!plat_priv)
  1590. return -ENODEV;
  1591. ret = cnss_wlfw_qdss_data_send_sync(plat_priv, event_data->file_name,
  1592. event_data->total_size);
  1593. kfree(data);
  1594. return ret;
  1595. }
  1596. static void cnss_driver_event_work(struct work_struct *work)
  1597. {
  1598. struct cnss_plat_data *plat_priv =
  1599. container_of(work, struct cnss_plat_data, event_work);
  1600. struct cnss_driver_event *event;
  1601. unsigned long flags;
  1602. int ret = 0;
  1603. if (!plat_priv) {
  1604. cnss_pr_err("plat_priv is NULL!\n");
  1605. return;
  1606. }
  1607. cnss_pm_stay_awake(plat_priv);
  1608. spin_lock_irqsave(&plat_priv->event_lock, flags);
  1609. while (!list_empty(&plat_priv->event_list)) {
  1610. event = list_first_entry(&plat_priv->event_list,
  1611. struct cnss_driver_event, list);
  1612. list_del(&event->list);
  1613. spin_unlock_irqrestore(&plat_priv->event_lock, flags);
  1614. cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
  1615. cnss_driver_event_to_str(event->type),
  1616. event->sync ? "-sync" : "", event->type,
  1617. plat_priv->driver_state);
  1618. switch (event->type) {
  1619. case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
  1620. ret = cnss_wlfw_server_arrive(plat_priv, event->data);
  1621. break;
  1622. case CNSS_DRIVER_EVENT_SERVER_EXIT:
  1623. ret = cnss_wlfw_server_exit(plat_priv);
  1624. break;
  1625. case CNSS_DRIVER_EVENT_REQUEST_MEM:
  1626. ret = cnss_bus_alloc_fw_mem(plat_priv);
  1627. if (ret)
  1628. break;
  1629. ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
  1630. break;
  1631. case CNSS_DRIVER_EVENT_FW_MEM_READY:
  1632. ret = cnss_fw_mem_ready_hdlr(plat_priv);
  1633. break;
  1634. case CNSS_DRIVER_EVENT_FW_READY:
  1635. ret = cnss_fw_ready_hdlr(plat_priv);
  1636. break;
  1637. case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
  1638. ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
  1639. break;
  1640. case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
  1641. ret = cnss_cold_boot_cal_done_hdlr(plat_priv,
  1642. event->data);
  1643. break;
  1644. case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
  1645. ret = cnss_bus_register_driver_hdlr(plat_priv,
  1646. event->data);
  1647. break;
  1648. case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
  1649. ret = cnss_bus_unregister_driver_hdlr(plat_priv);
  1650. break;
  1651. case CNSS_DRIVER_EVENT_RECOVERY:
  1652. ret = cnss_driver_recovery_hdlr(plat_priv,
  1653. event->data);
  1654. break;
  1655. case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
  1656. ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
  1657. break;
  1658. case CNSS_DRIVER_EVENT_IDLE_RESTART:
  1659. set_bit(CNSS_DRIVER_IDLE_RESTART,
  1660. &plat_priv->driver_state);
  1661. /* fall through */
  1662. case CNSS_DRIVER_EVENT_POWER_UP:
  1663. ret = cnss_power_up_hdlr(plat_priv);
  1664. break;
  1665. case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
  1666. set_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
  1667. &plat_priv->driver_state);
  1668. /* fall through */
  1669. case CNSS_DRIVER_EVENT_POWER_DOWN:
  1670. ret = cnss_power_down_hdlr(plat_priv);
  1671. break;
  1672. case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
  1673. ret = cnss_process_wfc_call_ind_event(plat_priv,
  1674. event->data);
  1675. break;
  1676. case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
  1677. ret = cnss_process_twt_cfg_ind_event(plat_priv,
  1678. event->data);
  1679. break;
  1680. case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
  1681. ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
  1682. break;
  1683. case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
  1684. ret = cnss_fw_mem_file_save_hdlr(plat_priv,
  1685. event->data);
  1686. break;
  1687. case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
  1688. ret = cnss_qdss_trace_free_hdlr(plat_priv);
  1689. break;
  1690. case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
  1691. ret = cnss_qdss_trace_req_data_hdlr(plat_priv,
  1692. event->data);
  1693. break;
  1694. default:
  1695. cnss_pr_err("Invalid driver event type: %d",
  1696. event->type);
  1697. kfree(event);
  1698. spin_lock_irqsave(&plat_priv->event_lock, flags);
  1699. continue;
  1700. }
  1701. spin_lock_irqsave(&plat_priv->event_lock, flags);
  1702. if (event->sync) {
  1703. event->ret = ret;
  1704. complete(&event->complete);
  1705. continue;
  1706. }
  1707. spin_unlock_irqrestore(&plat_priv->event_lock, flags);
  1708. kfree(event);
  1709. spin_lock_irqsave(&plat_priv->event_lock, flags);
  1710. }
  1711. spin_unlock_irqrestore(&plat_priv->event_lock, flags);
  1712. cnss_pm_relax(plat_priv);
  1713. }
  1714. int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
  1715. phys_addr_t *pa, unsigned long attrs)
  1716. {
  1717. struct sg_table sgt;
  1718. int ret;
  1719. ret = dma_get_sgtable_attrs(dev, &sgt, va, dma, size, attrs);
  1720. if (ret) {
  1721. cnss_pr_err("Failed to get sgtable for va: 0x%pK, dma: %pa, size: 0x%zx, attrs: 0x%x\n",
  1722. va, &dma, size, attrs);
  1723. return -EINVAL;
  1724. }
  1725. *pa = page_to_phys(sg_page(sgt.sgl));
  1726. sg_free_table(&sgt);
  1727. return 0;
  1728. }
  1729. #if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
  1730. int cnss_register_subsys(struct cnss_plat_data *plat_priv)
  1731. {
  1732. int ret = 0;
  1733. struct cnss_subsys_info *subsys_info;
  1734. subsys_info = &plat_priv->subsys_info;
  1735. subsys_info->subsys_desc.name = "wlan";
  1736. subsys_info->subsys_desc.owner = THIS_MODULE;
  1737. subsys_info->subsys_desc.powerup = cnss_subsys_powerup;
  1738. subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown;
  1739. subsys_info->subsys_desc.ramdump = cnss_subsys_ramdump;
  1740. subsys_info->subsys_desc.crash_shutdown = cnss_subsys_crash_shutdown;
  1741. subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
  1742. subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
  1743. if (IS_ERR(subsys_info->subsys_device)) {
  1744. ret = PTR_ERR(subsys_info->subsys_device);
  1745. cnss_pr_err("Failed to register subsys, err = %d\n", ret);
  1746. goto out;
  1747. }
  1748. subsys_info->subsys_handle =
  1749. subsystem_get(subsys_info->subsys_desc.name);
  1750. if (!subsys_info->subsys_handle) {
  1751. cnss_pr_err("Failed to get subsys_handle!\n");
  1752. ret = -EINVAL;
  1753. goto unregister_subsys;
  1754. } else if (IS_ERR(subsys_info->subsys_handle)) {
  1755. ret = PTR_ERR(subsys_info->subsys_handle);
  1756. cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
  1757. goto unregister_subsys;
  1758. }
  1759. return 0;
  1760. unregister_subsys:
  1761. subsys_unregister(subsys_info->subsys_device);
  1762. out:
  1763. return ret;
  1764. }
  1765. void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
  1766. {
  1767. struct cnss_subsys_info *subsys_info;
  1768. subsys_info = &plat_priv->subsys_info;
  1769. subsystem_put(subsys_info->subsys_handle);
  1770. subsys_unregister(subsys_info->subsys_device);
  1771. }
  1772. static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
  1773. {
  1774. struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
  1775. return create_ramdump_device(subsys_info->subsys_desc.name,
  1776. subsys_info->subsys_desc.dev);
  1777. }
  1778. static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
  1779. void *ramdump_dev)
  1780. {
  1781. destroy_ramdump_device(ramdump_dev);
  1782. }
  1783. int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
  1784. {
  1785. struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
  1786. struct ramdump_segment segment;
  1787. memset(&segment, 0, sizeof(segment));
  1788. segment.v_address = (void __iomem *)ramdump_info->ramdump_va;
  1789. segment.size = ramdump_info->ramdump_size;
  1790. return qcom_ramdump(ramdump_info->ramdump_dev, &segment, 1);
  1791. }
  1792. int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
  1793. {
  1794. struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
  1795. struct cnss_dump_data *dump_data = &info_v2->dump_data;
  1796. struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
  1797. struct ramdump_segment *ramdump_segs, *s;
  1798. struct cnss_dump_meta_info meta_info = {0};
  1799. int i, ret = 0;
  1800. ramdump_segs = kcalloc(dump_data->nentries + 1,
  1801. sizeof(*ramdump_segs),
  1802. GFP_KERNEL);
  1803. if (!ramdump_segs)
  1804. return -ENOMEM;
  1805. s = ramdump_segs + 1;
  1806. for (i = 0; i < dump_data->nentries; i++) {
  1807. if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
  1808. cnss_pr_err("Unsupported dump type: %d",
  1809. dump_seg->type);
  1810. continue;
  1811. }
  1812. if (meta_info.entry[dump_seg->type].entry_start == 0) {
  1813. meta_info.entry[dump_seg->type].type = dump_seg->type;
  1814. meta_info.entry[dump_seg->type].entry_start = i + 1;
  1815. }
  1816. meta_info.entry[dump_seg->type].entry_num++;
  1817. s->address = dump_seg->address;
  1818. s->v_address = (void __iomem *)dump_seg->v_address;
  1819. s->size = dump_seg->size;
  1820. s++;
  1821. dump_seg++;
  1822. }
  1823. meta_info.magic = CNSS_RAMDUMP_MAGIC;
  1824. meta_info.version = CNSS_RAMDUMP_VERSION;
  1825. meta_info.chipset = plat_priv->device_id;
  1826. meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
  1827. ramdump_segs->v_address = (void __iomem *)(&meta_info);
  1828. ramdump_segs->size = sizeof(meta_info);
  1829. ret = qcom_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
  1830. dump_data->nentries + 1);
  1831. kfree(ramdump_segs);
  1832. return ret;
  1833. }
  1834. #else
  1835. static int cnss_panic_handler(struct notifier_block *nb, unsigned long action,
  1836. void *data)
  1837. {
  1838. struct cnss_plat_data *plat_priv =
  1839. container_of(nb, struct cnss_plat_data, panic_nb);
  1840. cnss_bus_dev_crash_shutdown(plat_priv);
  1841. return NOTIFY_DONE;
  1842. }
  1843. int cnss_register_subsys(struct cnss_plat_data *plat_priv)
  1844. {
  1845. int ret;
  1846. if (!plat_priv)
  1847. return -ENODEV;
  1848. plat_priv->panic_nb.notifier_call = cnss_panic_handler;
  1849. ret = atomic_notifier_chain_register(&panic_notifier_list,
  1850. &plat_priv->panic_nb);
  1851. if (ret) {
  1852. cnss_pr_err("Failed to register panic handler\n");
  1853. return -EINVAL;
  1854. }
  1855. return 0;
  1856. }
  1857. void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
  1858. {
  1859. int ret;
  1860. ret = atomic_notifier_chain_unregister(&panic_notifier_list,
  1861. &plat_priv->panic_nb);
  1862. if (ret)
  1863. cnss_pr_err("Failed to unregister panic handler\n");
  1864. }
  1865. #if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
  1866. static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
  1867. {
  1868. return &plat_priv->plat_dev->dev;
  1869. }
  1870. static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
  1871. void *ramdump_dev)
  1872. {
  1873. }
  1874. #endif
  1875. #if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
  1876. int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
  1877. {
  1878. struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
  1879. struct qcom_dump_segment segment;
  1880. struct list_head head;
  1881. INIT_LIST_HEAD(&head);
  1882. memset(&segment, 0, sizeof(segment));
  1883. segment.va = ramdump_info->ramdump_va;
  1884. segment.size = ramdump_info->ramdump_size;
  1885. list_add(&segment.node, &head);
  1886. return qcom_dump(&head, ramdump_info->ramdump_dev);
  1887. }
  1888. int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
  1889. {
  1890. struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
  1891. struct cnss_dump_data *dump_data = &info_v2->dump_data;
  1892. struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
  1893. struct qcom_dump_segment *seg;
  1894. struct cnss_dump_meta_info meta_info = {0};
  1895. struct list_head head;
  1896. int i, ret = 0;
  1897. if (!dump_enabled()) {
  1898. cnss_pr_info("Dump collection is not enabled\n");
  1899. return ret;
  1900. }
  1901. INIT_LIST_HEAD(&head);
  1902. for (i = 0; i < dump_data->nentries; i++) {
  1903. if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
  1904. cnss_pr_err("Unsupported dump type: %d",
  1905. dump_seg->type);
  1906. continue;
  1907. }
  1908. seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
  1909. if (!seg)
  1910. continue;
  1911. if (meta_info.entry[dump_seg->type].entry_start == 0) {
  1912. meta_info.entry[dump_seg->type].type = dump_seg->type;
  1913. meta_info.entry[dump_seg->type].entry_start = i + 1;
  1914. }
  1915. meta_info.entry[dump_seg->type].entry_num++;
  1916. seg->da = dump_seg->address;
  1917. seg->va = dump_seg->v_address;
  1918. seg->size = dump_seg->size;
  1919. list_add_tail(&seg->node, &head);
  1920. dump_seg++;
  1921. }
  1922. seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
  1923. if (!seg)
  1924. goto do_elf_dump;
  1925. meta_info.magic = CNSS_RAMDUMP_MAGIC;
  1926. meta_info.version = CNSS_RAMDUMP_VERSION;
  1927. meta_info.chipset = plat_priv->device_id;
  1928. meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
  1929. seg->va = &meta_info;
  1930. seg->size = sizeof(meta_info);
  1931. list_add(&seg->node, &head);
  1932. do_elf_dump:
  1933. ret = qcom_elf_dump(&head, info_v2->ramdump_dev, ELF_CLASS);
  1934. while (!list_empty(&head)) {
  1935. seg = list_first_entry(&head, struct qcom_dump_segment, node);
  1936. list_del(&seg->node);
  1937. kfree(seg);
  1938. }
  1939. return ret;
  1940. }
  1941. #else
  1942. int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
  1943. {
  1944. return 0;
  1945. }
  1946. int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
  1947. {
  1948. return 0;
  1949. }
  1950. #endif /* CONFIG_QCOM_RAMDUMP */
  1951. #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
  1952. #if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
  1953. static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
  1954. {
  1955. struct cnss_ramdump_info *ramdump_info;
  1956. struct msm_dump_entry dump_entry;
  1957. ramdump_info = &plat_priv->ramdump_info;
  1958. ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
  1959. ramdump_info->dump_data.len = ramdump_info->ramdump_size;
  1960. ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
  1961. ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
  1962. strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
  1963. sizeof(ramdump_info->dump_data.name));
  1964. dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
  1965. dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
  1966. return msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
  1967. &dump_entry);
  1968. }
  1969. static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
  1970. {
  1971. int ret = 0;
  1972. struct device *dev;
  1973. struct cnss_ramdump_info *ramdump_info;
  1974. u32 ramdump_size = 0;
  1975. dev = &plat_priv->plat_dev->dev;
  1976. ramdump_info = &plat_priv->ramdump_info;
  1977. if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
  1978. &ramdump_size) == 0) {
  1979. ramdump_info->ramdump_va =
  1980. dma_alloc_coherent(dev, ramdump_size,
  1981. &ramdump_info->ramdump_pa,
  1982. GFP_KERNEL);
  1983. if (ramdump_info->ramdump_va)
  1984. ramdump_info->ramdump_size = ramdump_size;
  1985. }
  1986. cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
  1987. ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
  1988. if (ramdump_info->ramdump_size == 0) {
  1989. cnss_pr_info("Ramdump will not be collected");
  1990. goto out;
  1991. }
  1992. ret = cnss_init_dump_entry(plat_priv);
  1993. if (ret) {
  1994. cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
  1995. goto free_ramdump;
  1996. }
  1997. ramdump_info->ramdump_dev = cnss_create_ramdump_device(plat_priv);
  1998. if (!ramdump_info->ramdump_dev) {
  1999. cnss_pr_err("Failed to create ramdump device!");
  2000. ret = -ENOMEM;
  2001. goto free_ramdump;
  2002. }
  2003. return 0;
  2004. free_ramdump:
  2005. dma_free_coherent(dev, ramdump_info->ramdump_size,
  2006. ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
  2007. out:
  2008. return ret;
  2009. }
  2010. static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
  2011. {
  2012. struct device *dev;
  2013. struct cnss_ramdump_info *ramdump_info;
  2014. dev = &plat_priv->plat_dev->dev;
  2015. ramdump_info = &plat_priv->ramdump_info;
  2016. if (ramdump_info->ramdump_dev)
  2017. cnss_destroy_ramdump_device(plat_priv,
  2018. ramdump_info->ramdump_dev);
  2019. if (ramdump_info->ramdump_va)
  2020. dma_free_coherent(dev, ramdump_info->ramdump_size,
  2021. ramdump_info->ramdump_va,
  2022. ramdump_info->ramdump_pa);
  2023. }
  2024. /**
  2025. * cnss_ignore_dump_data_reg_fail - Ignore Ramdump table register failure
  2026. * @ret: Error returned by msm_dump_data_register_nominidump
  2027. *
  2028. * For Lahaina GKI boot, we dont have support for mem dump feature. So
  2029. * ignore failure.
  2030. *
  2031. * Return: Same given error code if mem dump feature enabled, 0 otherwise
  2032. */
  2033. static int cnss_ignore_dump_data_reg_fail(int ret)
  2034. {
  2035. return ret;
  2036. }
  2037. static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
  2038. {
  2039. int ret = 0;
  2040. struct cnss_ramdump_info_v2 *info_v2;
  2041. struct cnss_dump_data *dump_data;
  2042. struct msm_dump_entry dump_entry;
  2043. struct device *dev = &plat_priv->plat_dev->dev;
  2044. u32 ramdump_size = 0;
  2045. info_v2 = &plat_priv->ramdump_info_v2;
  2046. dump_data = &info_v2->dump_data;
  2047. if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
  2048. &ramdump_size) == 0)
  2049. info_v2->ramdump_size = ramdump_size;
  2050. cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
  2051. info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
  2052. if (!info_v2->dump_data_vaddr)
  2053. return -ENOMEM;
  2054. dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
  2055. dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
  2056. dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
  2057. dump_data->seg_version = CNSS_DUMP_SEG_VER;
  2058. strlcpy(dump_data->name, CNSS_DUMP_NAME,
  2059. sizeof(dump_data->name));
  2060. dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
  2061. dump_entry.addr = virt_to_phys(dump_data);
  2062. ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
  2063. &dump_entry);
  2064. if (ret) {
  2065. ret = cnss_ignore_dump_data_reg_fail(ret);
  2066. cnss_pr_err("Failed to setup dump table, %s (%d)\n",
  2067. ret ? "Error" : "Ignoring", ret);
  2068. goto free_ramdump;
  2069. }
  2070. info_v2->ramdump_dev = cnss_create_ramdump_device(plat_priv);
  2071. if (!info_v2->ramdump_dev) {
  2072. cnss_pr_err("Failed to create ramdump device!\n");
  2073. ret = -ENOMEM;
  2074. goto free_ramdump;
  2075. }
  2076. return 0;
  2077. free_ramdump:
  2078. kfree(info_v2->dump_data_vaddr);
  2079. info_v2->dump_data_vaddr = NULL;
  2080. return ret;
  2081. }
  2082. static void cnss_unregister_ramdump_v2(struct cnss_plat_data *plat_priv)
  2083. {
  2084. struct cnss_ramdump_info_v2 *info_v2;
  2085. info_v2 = &plat_priv->ramdump_info_v2;
  2086. if (info_v2->ramdump_dev)
  2087. cnss_destroy_ramdump_device(plat_priv, info_v2->ramdump_dev);
  2088. kfree(info_v2->dump_data_vaddr);
  2089. info_v2->dump_data_vaddr = NULL;
  2090. info_v2->dump_data_valid = false;
  2091. }
  2092. int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
  2093. {
  2094. int ret = 0;
  2095. switch (plat_priv->device_id) {
  2096. case QCA6174_DEVICE_ID:
  2097. ret = cnss_register_ramdump_v1(plat_priv);
  2098. break;
  2099. case QCA6290_DEVICE_ID:
  2100. case QCA6390_DEVICE_ID:
  2101. case QCA6490_DEVICE_ID:
  2102. case WCN7850_DEVICE_ID:
  2103. ret = cnss_register_ramdump_v2(plat_priv);
  2104. break;
  2105. default:
  2106. cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
  2107. ret = -ENODEV;
  2108. break;
  2109. }
  2110. return ret;
  2111. }
  2112. void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
  2113. {
  2114. switch (plat_priv->device_id) {
  2115. case QCA6174_DEVICE_ID:
  2116. cnss_unregister_ramdump_v1(plat_priv);
  2117. break;
  2118. case QCA6290_DEVICE_ID:
  2119. case QCA6390_DEVICE_ID:
  2120. case QCA6490_DEVICE_ID:
  2121. case WCN7850_DEVICE_ID:
  2122. cnss_unregister_ramdump_v2(plat_priv);
  2123. break;
  2124. default:
  2125. cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
  2126. break;
  2127. }
  2128. }
  2129. #else
  2130. int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
  2131. {
  2132. return 0;
  2133. }
  2134. void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv) {}
  2135. #endif /* CONFIG_QCOM_MEMORY_DUMP_V2 */
  2136. #if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
  2137. int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
  2138. enum cnss_fw_dump_type type, int seg_no,
  2139. void *va, phys_addr_t pa, size_t size)
  2140. {
  2141. struct md_region md_entry;
  2142. int ret;
  2143. switch (type) {
  2144. case CNSS_FW_IMAGE:
  2145. snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
  2146. seg_no);
  2147. break;
  2148. case CNSS_FW_RDDM:
  2149. snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
  2150. seg_no);
  2151. break;
  2152. case CNSS_FW_REMOTE_HEAP:
  2153. snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
  2154. seg_no);
  2155. break;
  2156. default:
  2157. cnss_pr_err("Unknown dump type ID: %d\n", type);
  2158. return -EINVAL;
  2159. }
  2160. md_entry.phys_addr = pa;
  2161. md_entry.virt_addr = (uintptr_t)va;
  2162. md_entry.size = size;
  2163. md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
  2164. cnss_pr_dbg("Mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
  2165. md_entry.name, va, &pa, size);
  2166. ret = msm_minidump_add_region(&md_entry);
  2167. if (ret < 0)
  2168. cnss_pr_err("Failed to add mini dump region, err = %d\n", ret);
  2169. return ret;
  2170. }
  2171. int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
  2172. enum cnss_fw_dump_type type, int seg_no,
  2173. void *va, phys_addr_t pa, size_t size)
  2174. {
  2175. struct md_region md_entry;
  2176. int ret;
  2177. switch (type) {
  2178. case CNSS_FW_IMAGE:
  2179. snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
  2180. seg_no);
  2181. break;
  2182. case CNSS_FW_RDDM:
  2183. snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
  2184. seg_no);
  2185. break;
  2186. case CNSS_FW_REMOTE_HEAP:
  2187. snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
  2188. seg_no);
  2189. break;
  2190. default:
  2191. cnss_pr_err("Unknown dump type ID: %d\n", type);
  2192. return -EINVAL;
  2193. }
  2194. md_entry.phys_addr = pa;
  2195. md_entry.virt_addr = (uintptr_t)va;
  2196. md_entry.size = size;
  2197. md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
  2198. cnss_pr_dbg("Remove mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
  2199. md_entry.name, va, &pa, size);
  2200. ret = msm_minidump_remove_region(&md_entry);
  2201. if (ret)
  2202. cnss_pr_err("Failed to remove mini dump region, err = %d\n",
  2203. ret);
  2204. return ret;
  2205. }
  2206. #else
  2207. int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
  2208. enum cnss_fw_dump_type type, int seg_no,
  2209. void *va, phys_addr_t pa, size_t size)
  2210. {
  2211. return 0;
  2212. }
  2213. int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
  2214. enum cnss_fw_dump_type type, int seg_no,
  2215. void *va, phys_addr_t pa, size_t size)
  2216. {
  2217. return 0;
  2218. }
  2219. #endif /* CONFIG_QCOM_MINIDUMP */
  2220. int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
  2221. const struct firmware **fw_entry,
  2222. const char *filename)
  2223. {
  2224. if (IS_ENABLED(CONFIG_CNSS_REQ_FW_DIRECT))
  2225. return request_firmware_direct(fw_entry, filename,
  2226. &plat_priv->plat_dev->dev);
  2227. else
  2228. return firmware_request_nowarn(fw_entry, filename,
  2229. &plat_priv->plat_dev->dev);
  2230. }
  2231. #if IS_ENABLED(CONFIG_INTERCONNECT)
  2232. /**
  2233. * cnss_register_bus_scale() - Setup interconnect voting data
  2234. * @plat_priv: Platform data structure
  2235. *
  2236. * For different interconnect path configured in device tree setup voting data
  2237. * for list of bandwidth requirements.
  2238. *
  2239. * Result: 0 for success. -EINVAL if not configured
  2240. */
  2241. static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
  2242. {
  2243. int ret = -EINVAL;
  2244. u32 idx, i, j, cfg_arr_size, *cfg_arr = NULL;
  2245. struct cnss_bus_bw_info *bus_bw_info, *tmp;
  2246. struct device *dev = &plat_priv->plat_dev->dev;
  2247. INIT_LIST_HEAD(&plat_priv->icc.list_head);
  2248. ret = of_property_read_u32(dev->of_node,
  2249. "qcom,icc-path-count",
  2250. &plat_priv->icc.path_count);
  2251. if (ret) {
  2252. cnss_pr_err("Platform Bus Interconnect path not configured\n");
  2253. return -EINVAL;
  2254. }
  2255. ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
  2256. "qcom,bus-bw-cfg-count",
  2257. &plat_priv->icc.bus_bw_cfg_count);
  2258. if (ret) {
  2259. cnss_pr_err("Failed to get Bus BW Config table size\n");
  2260. goto cleanup;
  2261. }
  2262. cfg_arr_size = plat_priv->icc.path_count *
  2263. plat_priv->icc.bus_bw_cfg_count * CNSS_ICC_VOTE_MAX;
  2264. cfg_arr = kcalloc(cfg_arr_size, sizeof(*cfg_arr), GFP_KERNEL);
  2265. if (!cfg_arr) {
  2266. cnss_pr_err("Failed to alloc cfg table mem\n");
  2267. ret = -ENOMEM;
  2268. goto cleanup;
  2269. }
  2270. ret = of_property_read_u32_array(plat_priv->plat_dev->dev.of_node,
  2271. "qcom,bus-bw-cfg", cfg_arr,
  2272. cfg_arr_size);
  2273. if (ret) {
  2274. cnss_pr_err("Invalid Bus BW Config Table\n");
  2275. goto cleanup;
  2276. }
  2277. cnss_pr_dbg("ICC Path_Count: %d BW_CFG_Count: %d\n",
  2278. plat_priv->icc.path_count, plat_priv->icc.bus_bw_cfg_count);
  2279. for (idx = 0; idx < plat_priv->icc.path_count; idx++) {
  2280. bus_bw_info = devm_kzalloc(dev, sizeof(*bus_bw_info),
  2281. GFP_KERNEL);
  2282. if (!bus_bw_info) {
  2283. ret = -ENOMEM;
  2284. goto out;
  2285. }
  2286. ret = of_property_read_string_index(dev->of_node,
  2287. "interconnect-names", idx,
  2288. &bus_bw_info->icc_name);
  2289. if (ret)
  2290. goto out;
  2291. bus_bw_info->icc_path =
  2292. of_icc_get(&plat_priv->plat_dev->dev,
  2293. bus_bw_info->icc_name);
  2294. if (IS_ERR(bus_bw_info->icc_path)) {
  2295. ret = PTR_ERR(bus_bw_info->icc_path);
  2296. if (ret != -EPROBE_DEFER) {
  2297. cnss_pr_err("Failed to get Interconnect path for %s. Err: %d\n",
  2298. bus_bw_info->icc_name, ret);
  2299. goto out;
  2300. }
  2301. }
  2302. bus_bw_info->cfg_table =
  2303. devm_kcalloc(dev, plat_priv->icc.bus_bw_cfg_count,
  2304. sizeof(*bus_bw_info->cfg_table),
  2305. GFP_KERNEL);
  2306. if (!bus_bw_info->cfg_table) {
  2307. ret = -ENOMEM;
  2308. goto out;
  2309. }
  2310. cnss_pr_dbg("ICC Vote CFG for path: %s\n",
  2311. bus_bw_info->icc_name);
  2312. for (i = 0, j = (idx * plat_priv->icc.bus_bw_cfg_count *
  2313. CNSS_ICC_VOTE_MAX);
  2314. i < plat_priv->icc.bus_bw_cfg_count;
  2315. i++, j += 2) {
  2316. bus_bw_info->cfg_table[i].avg_bw = cfg_arr[j];
  2317. bus_bw_info->cfg_table[i].peak_bw = cfg_arr[j + 1];
  2318. cnss_pr_dbg("ICC Vote BW: %d avg: %d peak: %d\n",
  2319. i, bus_bw_info->cfg_table[i].avg_bw,
  2320. bus_bw_info->cfg_table[i].peak_bw);
  2321. }
  2322. list_add_tail(&bus_bw_info->list,
  2323. &plat_priv->icc.list_head);
  2324. }
  2325. kfree(cfg_arr);
  2326. return 0;
  2327. out:
  2328. list_for_each_entry_safe(bus_bw_info, tmp,
  2329. &plat_priv->icc.list_head, list) {
  2330. list_del(&bus_bw_info->list);
  2331. }
  2332. cleanup:
  2333. kfree(cfg_arr);
  2334. memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
  2335. return ret;
  2336. }
  2337. static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
  2338. {
  2339. struct cnss_bus_bw_info *bus_bw_info, *tmp;
  2340. list_for_each_entry_safe(bus_bw_info, tmp,
  2341. &plat_priv->icc.list_head, list) {
  2342. list_del(&bus_bw_info->list);
  2343. if (bus_bw_info->icc_path)
  2344. icc_put(bus_bw_info->icc_path);
  2345. }
  2346. memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
  2347. }
  2348. #else
  2349. static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
  2350. {
  2351. return 0;
  2352. }
  2353. static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv) {}
  2354. #endif /* CONFIG_INTERCONNECT */
  2355. void cnss_daemon_connection_update_cb(void *cb_ctx, bool status)
  2356. {
  2357. struct cnss_plat_data *plat_priv = cb_ctx;
  2358. if (!plat_priv) {
  2359. cnss_pr_err("%s: Invalid context\n", __func__);
  2360. return;
  2361. }
  2362. if (status) {
  2363. cnss_pr_info("CNSS Daemon connected\n");
  2364. set_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
  2365. complete(&plat_priv->daemon_connected);
  2366. } else {
  2367. cnss_pr_info("CNSS Daemon disconnected\n");
  2368. reinit_completion(&plat_priv->daemon_connected);
  2369. clear_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
  2370. }
  2371. }
  2372. static ssize_t enable_hds_store(struct device *dev,
  2373. struct device_attribute *attr,
  2374. const char *buf, size_t count)
  2375. {
  2376. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2377. unsigned int enable_hds = 0;
  2378. if (!plat_priv)
  2379. return -ENODEV;
  2380. if (sscanf(buf, "%du", &enable_hds) != 1) {
  2381. cnss_pr_err("Invalid enable_hds sysfs command\n");
  2382. return -EINVAL;
  2383. }
  2384. if (enable_hds)
  2385. plat_priv->hds_enabled = true;
  2386. else
  2387. plat_priv->hds_enabled = false;
  2388. cnss_pr_dbg("%s HDS file download, count is %zu\n",
  2389. plat_priv->hds_enabled ? "Enable" : "Disable", count);
  2390. return count;
  2391. }
  2392. static ssize_t recovery_store(struct device *dev,
  2393. struct device_attribute *attr,
  2394. const char *buf, size_t count)
  2395. {
  2396. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2397. unsigned int recovery = 0;
  2398. if (!plat_priv)
  2399. return -ENODEV;
  2400. if (sscanf(buf, "%du", &recovery) != 1) {
  2401. cnss_pr_err("Invalid recovery sysfs command\n");
  2402. return -EINVAL;
  2403. }
  2404. if (recovery)
  2405. plat_priv->recovery_enabled = true;
  2406. else
  2407. plat_priv->recovery_enabled = false;
  2408. cnss_pr_dbg("%s WLAN recovery, count is %zu\n",
  2409. plat_priv->recovery_enabled ? "Enable" : "Disable", count);
  2410. return count;
  2411. }
  2412. static ssize_t shutdown_store(struct device *dev,
  2413. struct device_attribute *attr,
  2414. const char *buf, size_t count)
  2415. {
  2416. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2417. if (plat_priv) {
  2418. set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
  2419. del_timer(&plat_priv->fw_boot_timer);
  2420. complete_all(&plat_priv->power_up_complete);
  2421. complete_all(&plat_priv->cal_complete);
  2422. }
  2423. cnss_pr_dbg("Received shutdown notification\n");
  2424. return count;
  2425. }
  2426. static ssize_t fs_ready_store(struct device *dev,
  2427. struct device_attribute *attr,
  2428. const char *buf, size_t count)
  2429. {
  2430. int fs_ready = 0;
  2431. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2432. if (sscanf(buf, "%du", &fs_ready) != 1)
  2433. return -EINVAL;
  2434. cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
  2435. fs_ready, count);
  2436. if (!plat_priv) {
  2437. cnss_pr_err("plat_priv is NULL\n");
  2438. return count;
  2439. }
  2440. if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
  2441. cnss_pr_dbg("QMI is bypassed\n");
  2442. return count;
  2443. }
  2444. switch (plat_priv->device_id) {
  2445. case QCA6290_DEVICE_ID:
  2446. case QCA6390_DEVICE_ID:
  2447. case QCA6490_DEVICE_ID:
  2448. case WCN7850_DEVICE_ID:
  2449. break;
  2450. default:
  2451. cnss_pr_err("Not supported for device ID 0x%lx\n",
  2452. plat_priv->device_id);
  2453. return count;
  2454. }
  2455. if (fs_ready == FILE_SYSTEM_READY && plat_priv->cbc_enabled) {
  2456. cnss_driver_event_post(plat_priv,
  2457. CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
  2458. 0, NULL);
  2459. }
  2460. return count;
  2461. }
  2462. static ssize_t qdss_trace_start_store(struct device *dev,
  2463. struct device_attribute *attr,
  2464. const char *buf, size_t count)
  2465. {
  2466. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2467. wlfw_qdss_trace_start(plat_priv);
  2468. cnss_pr_dbg("Received QDSS start command\n");
  2469. return count;
  2470. }
  2471. static ssize_t qdss_trace_stop_store(struct device *dev,
  2472. struct device_attribute *attr,
  2473. const char *buf, size_t count)
  2474. {
  2475. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2476. u32 option = 0;
  2477. if (sscanf(buf, "%du", &option) != 1)
  2478. return -EINVAL;
  2479. wlfw_qdss_trace_stop(plat_priv, option);
  2480. cnss_pr_dbg("Received QDSS stop command\n");
  2481. return count;
  2482. }
  2483. static ssize_t qdss_conf_download_store(struct device *dev,
  2484. struct device_attribute *attr,
  2485. const char *buf, size_t count)
  2486. {
  2487. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2488. cnss_wlfw_qdss_dnld_send_sync(plat_priv);
  2489. cnss_pr_dbg("Received QDSS download config command\n");
  2490. return count;
  2491. }
  2492. static ssize_t hw_trace_override_store(struct device *dev,
  2493. struct device_attribute *attr,
  2494. const char *buf, size_t count)
  2495. {
  2496. struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
  2497. int tmp = 0;
  2498. if (sscanf(buf, "%du", &tmp) != 1)
  2499. return -EINVAL;
  2500. plat_priv->hw_trc_override = tmp;
  2501. cnss_pr_dbg("Received QDSS hw_trc_override indication\n");
  2502. return count;
  2503. }
  2504. static DEVICE_ATTR_WO(fs_ready);
  2505. static DEVICE_ATTR_WO(shutdown);
  2506. static DEVICE_ATTR_WO(recovery);
  2507. static DEVICE_ATTR_WO(enable_hds);
  2508. static DEVICE_ATTR_WO(qdss_trace_start);
  2509. static DEVICE_ATTR_WO(qdss_trace_stop);
  2510. static DEVICE_ATTR_WO(qdss_conf_download);
  2511. static DEVICE_ATTR_WO(hw_trace_override);
  2512. static struct attribute *cnss_attrs[] = {
  2513. &dev_attr_fs_ready.attr,
  2514. &dev_attr_shutdown.attr,
  2515. &dev_attr_recovery.attr,
  2516. &dev_attr_enable_hds.attr,
  2517. &dev_attr_qdss_trace_start.attr,
  2518. &dev_attr_qdss_trace_stop.attr,
  2519. &dev_attr_qdss_conf_download.attr,
  2520. &dev_attr_hw_trace_override.attr,
  2521. NULL,
  2522. };
  2523. static struct attribute_group cnss_attr_group = {
  2524. .attrs = cnss_attrs,
  2525. };
  2526. static int cnss_create_sysfs_link(struct cnss_plat_data *plat_priv)
  2527. {
  2528. struct device *dev = &plat_priv->plat_dev->dev;
  2529. int ret;
  2530. ret = sysfs_create_link(kernel_kobj, &dev->kobj, "cnss");
  2531. if (ret) {
  2532. cnss_pr_err("Failed to create cnss link, err = %d\n",
  2533. ret);
  2534. goto out;
  2535. }
  2536. /* This is only for backward compatibility. */
  2537. ret = sysfs_create_link(kernel_kobj, &dev->kobj, "shutdown_wlan");
  2538. if (ret) {
  2539. cnss_pr_err("Failed to create shutdown_wlan link, err = %d\n",
  2540. ret);
  2541. goto rm_cnss_link;
  2542. }
  2543. return 0;
  2544. rm_cnss_link:
  2545. sysfs_remove_link(kernel_kobj, "cnss");
  2546. out:
  2547. return ret;
  2548. }
  2549. static void cnss_remove_sysfs_link(struct cnss_plat_data *plat_priv)
  2550. {
  2551. sysfs_remove_link(kernel_kobj, "shutdown_wlan");
  2552. sysfs_remove_link(kernel_kobj, "cnss");
  2553. }
  2554. static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
  2555. {
  2556. int ret = 0;
  2557. ret = devm_device_add_group(&plat_priv->plat_dev->dev,
  2558. &cnss_attr_group);
  2559. if (ret) {
  2560. cnss_pr_err("Failed to create cnss device group, err = %d\n",
  2561. ret);
  2562. goto out;
  2563. }
  2564. cnss_create_sysfs_link(plat_priv);
  2565. return 0;
  2566. out:
  2567. return ret;
  2568. }
  2569. static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
  2570. {
  2571. cnss_remove_sysfs_link(plat_priv);
  2572. devm_device_remove_group(&plat_priv->plat_dev->dev, &cnss_attr_group);
  2573. }
  2574. static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
  2575. {
  2576. spin_lock_init(&plat_priv->event_lock);
  2577. plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
  2578. WQ_UNBOUND, 1);
  2579. if (!plat_priv->event_wq) {
  2580. cnss_pr_err("Failed to create event workqueue!\n");
  2581. return -EFAULT;
  2582. }
  2583. INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
  2584. INIT_LIST_HEAD(&plat_priv->event_list);
  2585. return 0;
  2586. }
  2587. static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
  2588. {
  2589. destroy_workqueue(plat_priv->event_wq);
  2590. }
  2591. static int cnss_reboot_notifier(struct notifier_block *nb,
  2592. unsigned long action,
  2593. void *data)
  2594. {
  2595. struct cnss_plat_data *plat_priv =
  2596. container_of(nb, struct cnss_plat_data, reboot_nb);
  2597. set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
  2598. del_timer(&plat_priv->fw_boot_timer);
  2599. complete_all(&plat_priv->power_up_complete);
  2600. complete_all(&plat_priv->cal_complete);
  2601. cnss_pr_dbg("Reboot is in progress with action %d\n", action);
  2602. return NOTIFY_DONE;
  2603. }
  2604. static int cnss_misc_init(struct cnss_plat_data *plat_priv)
  2605. {
  2606. int ret;
  2607. timer_setup(&plat_priv->fw_boot_timer,
  2608. cnss_bus_fw_boot_timeout_hdlr, 0);
  2609. ret = register_pm_notifier(&cnss_pm_notifier);
  2610. if (ret)
  2611. cnss_pr_err("Failed to register PM notifier, err = %d\n", ret);
  2612. plat_priv->reboot_nb.notifier_call = cnss_reboot_notifier;
  2613. ret = register_reboot_notifier(&plat_priv->reboot_nb);
  2614. if (ret)
  2615. cnss_pr_err("Failed to register reboot notifier, err = %d\n",
  2616. ret);
  2617. ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
  2618. if (ret)
  2619. cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
  2620. ret);
  2621. INIT_WORK(&plat_priv->recovery_work, cnss_recovery_work_handler);
  2622. init_completion(&plat_priv->power_up_complete);
  2623. init_completion(&plat_priv->cal_complete);
  2624. init_completion(&plat_priv->rddm_complete);
  2625. init_completion(&plat_priv->recovery_complete);
  2626. init_completion(&plat_priv->daemon_connected);
  2627. mutex_init(&plat_priv->dev_lock);
  2628. mutex_init(&plat_priv->driver_ops_lock);
  2629. plat_priv->recovery_ws =
  2630. wakeup_source_register(&plat_priv->plat_dev->dev,
  2631. "CNSS_FW_RECOVERY");
  2632. if (!plat_priv->recovery_ws)
  2633. cnss_pr_err("Failed to setup FW recovery wake source\n");
  2634. ret = cnss_plat_ipc_register(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
  2635. cnss_daemon_connection_update_cb,
  2636. plat_priv);
  2637. if (ret)
  2638. cnss_pr_err("QMI IPC connection call back register failed, err = %d\n",
  2639. ret);
  2640. return 0;
  2641. }
  2642. static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
  2643. {
  2644. cnss_plat_ipc_unregister(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
  2645. plat_priv);
  2646. complete_all(&plat_priv->recovery_complete);
  2647. complete_all(&plat_priv->rddm_complete);
  2648. complete_all(&plat_priv->cal_complete);
  2649. complete_all(&plat_priv->power_up_complete);
  2650. complete_all(&plat_priv->daemon_connected);
  2651. device_init_wakeup(&plat_priv->plat_dev->dev, false);
  2652. unregister_reboot_notifier(&plat_priv->reboot_nb);
  2653. unregister_pm_notifier(&cnss_pm_notifier);
  2654. del_timer(&plat_priv->fw_boot_timer);
  2655. wakeup_source_unregister(plat_priv->recovery_ws);
  2656. }
  2657. static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
  2658. {
  2659. plat_priv->ctrl_params.quirks = CNSS_QUIRKS_DEFAULT;
  2660. plat_priv->cbc_enabled = !IS_ENABLED(CONFIG_CNSS_EMULATION) &&
  2661. of_property_read_bool(plat_priv->plat_dev->dev.of_node,
  2662. "qcom,wlan-cbc-enabled");
  2663. plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
  2664. plat_priv->ctrl_params.mhi_m2_timeout = CNSS_MHI_M2_TIMEOUT_DEFAULT;
  2665. plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
  2666. plat_priv->ctrl_params.bdf_type = CNSS_BDF_TYPE_DEFAULT;
  2667. plat_priv->ctrl_params.time_sync_period = CNSS_TIME_SYNC_PERIOD_DEFAULT;
  2668. /* Set adsp_pc_enabled default value to true as ADSP pc is always
  2669. * enabled by default
  2670. */
  2671. plat_priv->adsp_pc_enabled = true;
  2672. }
  2673. static void cnss_get_pm_domain_info(struct cnss_plat_data *plat_priv)
  2674. {
  2675. struct device *dev = &plat_priv->plat_dev->dev;
  2676. plat_priv->use_pm_domain =
  2677. of_property_read_bool(dev->of_node, "use-pm-domain");
  2678. cnss_pr_dbg("use-pm-domain is %d\n", plat_priv->use_pm_domain);
  2679. }
  2680. static void cnss_get_wlaon_pwr_ctrl_info(struct cnss_plat_data *plat_priv)
  2681. {
  2682. struct device *dev = &plat_priv->plat_dev->dev;
  2683. plat_priv->set_wlaon_pwr_ctrl =
  2684. of_property_read_bool(dev->of_node, "qcom,set-wlaon-pwr-ctrl");
  2685. cnss_pr_dbg("set_wlaon_pwr_ctrl is %d\n",
  2686. plat_priv->set_wlaon_pwr_ctrl);
  2687. }
  2688. static bool cnss_use_fw_path_with_prefix(struct cnss_plat_data *plat_priv)
  2689. {
  2690. return (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
  2691. "qcom,converged-dt") ||
  2692. of_property_read_bool(plat_priv->plat_dev->dev.of_node,
  2693. "qcom,same-dt-multi-dev"));
  2694. }
  2695. static const struct platform_device_id cnss_platform_id_table[] = {
  2696. { .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
  2697. { .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
  2698. { .name = "qca6390", .driver_data = QCA6390_DEVICE_ID, },
  2699. { .name = "qca6490", .driver_data = QCA6490_DEVICE_ID, },
  2700. { .name = "wcn7850", .driver_data = WCN7850_DEVICE_ID, },
  2701. { },
  2702. };
  2703. static const struct of_device_id cnss_of_match_table[] = {
  2704. {
  2705. .compatible = "qcom,cnss",
  2706. .data = (void *)&cnss_platform_id_table[0]},
  2707. {
  2708. .compatible = "qcom,cnss-qca6290",
  2709. .data = (void *)&cnss_platform_id_table[1]},
  2710. {
  2711. .compatible = "qcom,cnss-qca6390",
  2712. .data = (void *)&cnss_platform_id_table[2]},
  2713. {
  2714. .compatible = "qcom,cnss-qca6490",
  2715. .data = (void *)&cnss_platform_id_table[3]},
  2716. {
  2717. .compatible = "qcom,cnss-wcn7850",
  2718. .data = (void *)&cnss_platform_id_table[4]},
  2719. { },
  2720. };
  2721. MODULE_DEVICE_TABLE(of, cnss_of_match_table);
  2722. static inline bool
  2723. cnss_use_nv_mac(struct cnss_plat_data *plat_priv)
  2724. {
  2725. return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
  2726. "use-nv-mac");
  2727. }
  2728. static int cnss_probe(struct platform_device *plat_dev)
  2729. {
  2730. int ret = 0;
  2731. struct cnss_plat_data *plat_priv;
  2732. const struct of_device_id *of_id;
  2733. const struct platform_device_id *device_id;
  2734. int retry = 0;
  2735. if (cnss_get_plat_priv(plat_dev)) {
  2736. cnss_pr_err("Driver is already initialized!\n");
  2737. ret = -EEXIST;
  2738. goto out;
  2739. }
  2740. of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
  2741. if (!of_id || !of_id->data) {
  2742. cnss_pr_err("Failed to find of match device!\n");
  2743. ret = -ENODEV;
  2744. goto out;
  2745. }
  2746. device_id = of_id->data;
  2747. plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
  2748. GFP_KERNEL);
  2749. if (!plat_priv) {
  2750. ret = -ENOMEM;
  2751. goto out;
  2752. }
  2753. plat_priv->plat_dev = plat_dev;
  2754. plat_priv->device_id = device_id->driver_data;
  2755. plat_priv->bus_type = cnss_get_bus_type(plat_priv->device_id);
  2756. plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
  2757. plat_priv->use_fw_path_with_prefix =
  2758. cnss_use_fw_path_with_prefix(plat_priv);
  2759. cnss_set_plat_priv(plat_dev, plat_priv);
  2760. platform_set_drvdata(plat_dev, plat_priv);
  2761. INIT_LIST_HEAD(&plat_priv->vreg_list);
  2762. INIT_LIST_HEAD(&plat_priv->clk_list);
  2763. cnss_get_pm_domain_info(plat_priv);
  2764. cnss_get_wlaon_pwr_ctrl_info(plat_priv);
  2765. cnss_get_tcs_info(plat_priv);
  2766. cnss_get_cpr_info(plat_priv);
  2767. cnss_aop_mbox_init(plat_priv);
  2768. cnss_init_control_params(plat_priv);
  2769. ret = cnss_get_resources(plat_priv);
  2770. if (ret)
  2771. goto reset_ctx;
  2772. ret = cnss_register_esoc(plat_priv);
  2773. if (ret)
  2774. goto free_res;
  2775. ret = cnss_register_bus_scale(plat_priv);
  2776. if (ret)
  2777. goto unreg_esoc;
  2778. ret = cnss_create_sysfs(plat_priv);
  2779. if (ret)
  2780. goto unreg_bus_scale;
  2781. ret = cnss_event_work_init(plat_priv);
  2782. if (ret)
  2783. goto remove_sysfs;
  2784. ret = cnss_qmi_init(plat_priv);
  2785. if (ret)
  2786. goto deinit_event_work;
  2787. ret = cnss_dms_init(plat_priv);
  2788. if (ret)
  2789. goto deinit_qmi;
  2790. ret = cnss_debugfs_create(plat_priv);
  2791. if (ret)
  2792. goto deinit_dms;
  2793. ret = cnss_misc_init(plat_priv);
  2794. if (ret)
  2795. goto destroy_debugfs;
  2796. /* Make sure all platform related init are done before
  2797. * device power on and bus init.
  2798. */
  2799. if (!test_bit(SKIP_DEVICE_BOOT, &plat_priv->ctrl_params.quirks)) {
  2800. retry:
  2801. ret = cnss_power_on_device(plat_priv);
  2802. if (ret)
  2803. goto deinit_misc;
  2804. ret = cnss_bus_init(plat_priv);
  2805. if (ret) {
  2806. if ((ret != -EPROBE_DEFER) &&
  2807. retry++ < POWER_ON_RETRY_MAX_TIMES) {
  2808. cnss_power_off_device(plat_priv);
  2809. cnss_pr_dbg("Retry cnss_bus_init #%d\n", retry);
  2810. msleep(POWER_ON_RETRY_DELAY_MS * retry);
  2811. goto retry;
  2812. }
  2813. goto power_off;
  2814. }
  2815. }
  2816. cnss_register_coex_service(plat_priv);
  2817. cnss_register_ims_service(plat_priv);
  2818. ret = cnss_genl_init();
  2819. if (ret < 0)
  2820. cnss_pr_err("CNSS genl init failed %d\n", ret);
  2821. cnss_pr_info("Platform driver probed successfully.\n");
  2822. return 0;
  2823. power_off:
  2824. if (!test_bit(SKIP_DEVICE_BOOT, &plat_priv->ctrl_params.quirks))
  2825. cnss_power_off_device(plat_priv);
  2826. deinit_misc:
  2827. cnss_misc_deinit(plat_priv);
  2828. destroy_debugfs:
  2829. cnss_debugfs_destroy(plat_priv);
  2830. deinit_dms:
  2831. cnss_dms_deinit(plat_priv);
  2832. deinit_qmi:
  2833. cnss_qmi_deinit(plat_priv);
  2834. deinit_event_work:
  2835. cnss_event_work_deinit(plat_priv);
  2836. remove_sysfs:
  2837. cnss_remove_sysfs(plat_priv);
  2838. unreg_bus_scale:
  2839. cnss_unregister_bus_scale(plat_priv);
  2840. unreg_esoc:
  2841. cnss_unregister_esoc(plat_priv);
  2842. free_res:
  2843. cnss_put_resources(plat_priv);
  2844. reset_ctx:
  2845. platform_set_drvdata(plat_dev, NULL);
  2846. cnss_set_plat_priv(plat_dev, NULL);
  2847. out:
  2848. return ret;
  2849. }
  2850. static int cnss_remove(struct platform_device *plat_dev)
  2851. {
  2852. struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
  2853. cnss_genl_exit();
  2854. cnss_unregister_ims_service(plat_priv);
  2855. cnss_unregister_coex_service(plat_priv);
  2856. cnss_bus_deinit(plat_priv);
  2857. cnss_misc_deinit(plat_priv);
  2858. cnss_debugfs_destroy(plat_priv);
  2859. cnss_dms_deinit(plat_priv);
  2860. cnss_qmi_deinit(plat_priv);
  2861. cnss_event_work_deinit(plat_priv);
  2862. cnss_remove_sysfs(plat_priv);
  2863. cnss_unregister_bus_scale(plat_priv);
  2864. cnss_unregister_esoc(plat_priv);
  2865. cnss_put_resources(plat_priv);
  2866. if (!IS_ERR_OR_NULL(plat_priv->mbox_chan))
  2867. mbox_free_channel(plat_priv->mbox_chan);
  2868. platform_set_drvdata(plat_dev, NULL);
  2869. plat_env = NULL;
  2870. return 0;
  2871. }
  2872. static struct platform_driver cnss_platform_driver = {
  2873. .probe = cnss_probe,
  2874. .remove = cnss_remove,
  2875. .driver = {
  2876. .name = "cnss2",
  2877. .of_match_table = cnss_of_match_table,
  2878. #ifdef CONFIG_CNSS_ASYNC
  2879. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  2880. #endif
  2881. },
  2882. };
  2883. /**
  2884. * cnss_is_valid_dt_node_found - Check if valid device tree node present
  2885. *
  2886. * Valid device tree node means a node with "compatible" property from the
  2887. * device match table and "status" property is not disabled.
  2888. *
  2889. * Return: true if valid device tree node found, false if not found
  2890. */
  2891. static bool cnss_is_valid_dt_node_found(void)
  2892. {
  2893. struct device_node *dn = NULL;
  2894. for_each_matching_node(dn, cnss_of_match_table) {
  2895. if (of_device_is_available(dn))
  2896. break;
  2897. }
  2898. if (dn)
  2899. return true;
  2900. return false;
  2901. }
  2902. static int __init cnss_initialize(void)
  2903. {
  2904. int ret = 0;
  2905. if (!cnss_is_valid_dt_node_found())
  2906. return -ENODEV;
  2907. cnss_debug_init();
  2908. ret = platform_driver_register(&cnss_platform_driver);
  2909. if (ret)
  2910. cnss_debug_deinit();
  2911. return ret;
  2912. }
  2913. static void __exit cnss_exit(void)
  2914. {
  2915. platform_driver_unregister(&cnss_platform_driver);
  2916. cnss_debug_deinit();
  2917. }
  2918. module_init(cnss_initialize);
  2919. module_exit(cnss_exit);
  2920. MODULE_LICENSE("GPL v2");
  2921. MODULE_DESCRIPTION("CNSS2 Platform Driver");