qmi.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */
  3. #include <linux/module.h>
  4. #include <linux/soc/qcom/qmi.h>
  5. #include "bus.h"
  6. #include "debug.h"
  7. #include "main.h"
  8. #include "qmi.h"
  9. #include "genl.h"
  10. #define WLFW_SERVICE_INS_ID_V01 1
  11. #define WLFW_CLIENT_ID 0x4b4e454c
  12. #define BDF_FILE_NAME_PREFIX "bdwlan"
  13. #define ELF_BDF_FILE_NAME "bdwlan.elf"
  14. #define ELF_BDF_FILE_NAME_GF "bdwlang.elf"
  15. #define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e"
  16. #define ELF_BDF_FILE_NAME_GF_PREFIX "bdwlang.e"
  17. #define BIN_BDF_FILE_NAME "bdwlan.bin"
  18. #define BIN_BDF_FILE_NAME_GF "bdwlang.bin"
  19. #define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b"
  20. #define BIN_BDF_FILE_NAME_GF_PREFIX "bdwlang.b"
  21. #define REGDB_FILE_NAME "regdb.bin"
  22. #define HDS_FILE_NAME "hds.bin"
  23. #define CHIP_ID_GF_MASK 0x10
  24. #define QDSS_TRACE_CONFIG_FILE "qdss_trace_config"
  25. #ifdef CONFIG_CNSS2_DEBUG
  26. #define QDSS_DEBUG_FILE_STR "debug_"
  27. #else
  28. #define QDSS_DEBUG_FILE_STR ""
  29. #endif
  30. #define HW_V1_NUMBER "v1"
  31. #define HW_V2_NUMBER "v2"
  32. #define QMI_WLFW_TIMEOUT_MS (plat_priv->ctrl_params.qmi_timeout)
  33. #define QMI_WLFW_TIMEOUT_JF msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS)
  34. #define COEX_TIMEOUT QMI_WLFW_TIMEOUT_JF
  35. #define IMS_TIMEOUT QMI_WLFW_TIMEOUT_JF
  36. #define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K
  37. #define IMSPRIVATE_SERVICE_MAX_MSG_LEN SZ_8K
  38. #define DMS_QMI_MAX_MSG_LEN SZ_256
  39. #define QMI_WLFW_MAC_READY_TIMEOUT_MS 50
  40. #define QMI_WLFW_MAC_READY_MAX_RETRY 200
  41. #ifdef CONFIG_CNSS2_DEBUG
  42. static bool ignore_qmi_failure;
  43. #define CNSS_QMI_ASSERT() CNSS_ASSERT(ignore_qmi_failure)
  44. void cnss_ignore_qmi_failure(bool ignore)
  45. {
  46. ignore_qmi_failure = ignore;
  47. }
  48. #else
  49. #define CNSS_QMI_ASSERT() do { } while (0)
  50. void cnss_ignore_qmi_failure(bool ignore) { }
  51. #endif
  52. static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode)
  53. {
  54. switch (mode) {
  55. case CNSS_MISSION:
  56. return "MISSION";
  57. case CNSS_FTM:
  58. return "FTM";
  59. case CNSS_EPPING:
  60. return "EPPING";
  61. case CNSS_WALTEST:
  62. return "WALTEST";
  63. case CNSS_OFF:
  64. return "OFF";
  65. case CNSS_CCPM:
  66. return "CCPM";
  67. case CNSS_QVIT:
  68. return "QVIT";
  69. case CNSS_CALIBRATION:
  70. return "CALIBRATION";
  71. default:
  72. return "UNKNOWN";
  73. }
  74. };
  75. static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
  76. {
  77. struct wlfw_ind_register_req_msg_v01 *req;
  78. struct wlfw_ind_register_resp_msg_v01 *resp;
  79. struct qmi_txn txn;
  80. int ret = 0;
  81. cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
  82. plat_priv->driver_state);
  83. req = kzalloc(sizeof(*req), GFP_KERNEL);
  84. if (!req)
  85. return -ENOMEM;
  86. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  87. if (!resp) {
  88. kfree(req);
  89. return -ENOMEM;
  90. }
  91. req->client_id_valid = 1;
  92. req->client_id = WLFW_CLIENT_ID;
  93. req->request_mem_enable_valid = 1;
  94. req->request_mem_enable = 1;
  95. req->fw_mem_ready_enable_valid = 1;
  96. req->fw_mem_ready_enable = 1;
  97. /* fw_ready indication is replaced by fw_init_done in HST/HSP */
  98. req->fw_init_done_enable_valid = 1;
  99. req->fw_init_done_enable = 1;
  100. req->pin_connect_result_enable_valid = 1;
  101. req->pin_connect_result_enable = 1;
  102. req->cal_done_enable_valid = 1;
  103. req->cal_done_enable = 1;
  104. req->qdss_trace_req_mem_enable_valid = 1;
  105. req->qdss_trace_req_mem_enable = 1;
  106. req->qdss_trace_save_enable_valid = 1;
  107. req->qdss_trace_save_enable = 1;
  108. req->qdss_trace_free_enable_valid = 1;
  109. req->qdss_trace_free_enable = 1;
  110. req->respond_get_info_enable_valid = 1;
  111. req->respond_get_info_enable = 1;
  112. req->wfc_call_twt_config_enable_valid = 1;
  113. req->wfc_call_twt_config_enable = 1;
  114. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  115. wlfw_ind_register_resp_msg_v01_ei, resp);
  116. if (ret < 0) {
  117. cnss_pr_err("Failed to initialize txn for indication register request, err: %d\n",
  118. ret);
  119. goto out;
  120. }
  121. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  122. QMI_WLFW_IND_REGISTER_REQ_V01,
  123. WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
  124. wlfw_ind_register_req_msg_v01_ei, req);
  125. if (ret < 0) {
  126. qmi_txn_cancel(&txn);
  127. cnss_pr_err("Failed to send indication register request, err: %d\n",
  128. ret);
  129. goto out;
  130. }
  131. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  132. if (ret < 0) {
  133. cnss_pr_err("Failed to wait for response of indication register request, err: %d\n",
  134. ret);
  135. goto out;
  136. }
  137. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  138. cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
  139. resp->resp.result, resp->resp.error);
  140. ret = -resp->resp.result;
  141. goto out;
  142. }
  143. if (resp->fw_status_valid) {
  144. if (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01) {
  145. ret = -EALREADY;
  146. goto qmi_registered;
  147. }
  148. }
  149. kfree(req);
  150. kfree(resp);
  151. return 0;
  152. out:
  153. CNSS_QMI_ASSERT();
  154. qmi_registered:
  155. kfree(req);
  156. kfree(resp);
  157. return ret;
  158. }
  159. static void cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data *plat_priv,
  160. struct wlfw_host_cap_req_msg_v01 *req)
  161. {
  162. if (plat_priv->device_id == WCN7850_DEVICE_ID) {
  163. req->mlo_capable_valid = 1;
  164. req->mlo_capable = 1;
  165. req->mlo_chip_id_valid = 1;
  166. req->mlo_chip_id = 0;
  167. req->mlo_group_id_valid = 1;
  168. req->mlo_group_id = 0;
  169. req->max_mlo_peer_valid = 1;
  170. /* Max peer number generally won't change for the same device
  171. * but needs to be synced with host driver.
  172. */
  173. req->max_mlo_peer = 32;
  174. req->mlo_num_chips_valid = 1;
  175. req->mlo_num_chips = 1;
  176. req->mlo_chip_info_valid = 1;
  177. req->mlo_chip_info[0].chip_id = 0;
  178. req->mlo_chip_info[0].num_local_links = 2;
  179. req->mlo_chip_info[0].hw_link_id[0] = 0;
  180. req->mlo_chip_info[0].hw_link_id[1] = 1;
  181. req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
  182. req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
  183. }
  184. }
  185. static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
  186. {
  187. struct wlfw_host_cap_req_msg_v01 *req;
  188. struct wlfw_host_cap_resp_msg_v01 *resp;
  189. struct qmi_txn txn;
  190. int ret = 0;
  191. u64 iova_start = 0, iova_size = 0,
  192. iova_ipa_start = 0, iova_ipa_size = 0;
  193. u64 feature_list = 0;
  194. cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
  195. plat_priv->driver_state);
  196. req = kzalloc(sizeof(*req), GFP_KERNEL);
  197. if (!req)
  198. return -ENOMEM;
  199. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  200. if (!resp) {
  201. kfree(req);
  202. return -ENOMEM;
  203. }
  204. req->num_clients_valid = 1;
  205. req->num_clients = 1;
  206. cnss_pr_dbg("Number of clients is %d\n", req->num_clients);
  207. req->wake_msi = cnss_bus_get_wake_irq(plat_priv);
  208. if (req->wake_msi) {
  209. cnss_pr_dbg("WAKE MSI base data is %d\n", req->wake_msi);
  210. req->wake_msi_valid = 1;
  211. }
  212. req->bdf_support_valid = 1;
  213. req->bdf_support = 1;
  214. req->m3_support_valid = 1;
  215. req->m3_support = 1;
  216. req->m3_cache_support_valid = 1;
  217. req->m3_cache_support = 1;
  218. req->cal_done_valid = 1;
  219. req->cal_done = plat_priv->cal_done;
  220. cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done);
  221. if (!cnss_bus_get_iova(plat_priv, &iova_start, &iova_size) &&
  222. !cnss_bus_get_iova_ipa(plat_priv, &iova_ipa_start,
  223. &iova_ipa_size)) {
  224. req->ddr_range_valid = 1;
  225. req->ddr_range[0].start = iova_start;
  226. req->ddr_range[0].size = iova_size + iova_ipa_size;
  227. cnss_pr_dbg("Sending iova starting 0x%llx with size 0x%llx\n",
  228. req->ddr_range[0].start, req->ddr_range[0].size);
  229. }
  230. req->host_build_type_valid = 1;
  231. req->host_build_type = cnss_get_host_build_type();
  232. cnss_wlfw_host_cap_parse_mlo(plat_priv, req);
  233. ret = cnss_get_feature_list(plat_priv, &feature_list);
  234. if (!ret) {
  235. req->feature_list_valid = 1;
  236. req->feature_list = feature_list;
  237. cnss_pr_dbg("Sending feature list 0x%llx\n",
  238. req->feature_list);
  239. }
  240. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  241. wlfw_host_cap_resp_msg_v01_ei, resp);
  242. if (ret < 0) {
  243. cnss_pr_err("Failed to initialize txn for host capability request, err: %d\n",
  244. ret);
  245. goto out;
  246. }
  247. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  248. QMI_WLFW_HOST_CAP_REQ_V01,
  249. WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
  250. wlfw_host_cap_req_msg_v01_ei, req);
  251. if (ret < 0) {
  252. qmi_txn_cancel(&txn);
  253. cnss_pr_err("Failed to send host capability request, err: %d\n",
  254. ret);
  255. goto out;
  256. }
  257. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  258. if (ret < 0) {
  259. cnss_pr_err("Failed to wait for response of host capability request, err: %d\n",
  260. ret);
  261. goto out;
  262. }
  263. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  264. cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
  265. resp->resp.result, resp->resp.error);
  266. ret = -resp->resp.result;
  267. goto out;
  268. }
  269. kfree(req);
  270. kfree(resp);
  271. return 0;
  272. out:
  273. CNSS_QMI_ASSERT();
  274. kfree(req);
  275. kfree(resp);
  276. return ret;
  277. }
  278. int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
  279. {
  280. struct wlfw_respond_mem_req_msg_v01 *req;
  281. struct wlfw_respond_mem_resp_msg_v01 *resp;
  282. struct qmi_txn txn;
  283. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  284. int ret = 0, i;
  285. cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
  286. plat_priv->driver_state);
  287. req = kzalloc(sizeof(*req), GFP_KERNEL);
  288. if (!req)
  289. return -ENOMEM;
  290. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  291. if (!resp) {
  292. kfree(req);
  293. return -ENOMEM;
  294. }
  295. req->mem_seg_len = plat_priv->fw_mem_seg_len;
  296. for (i = 0; i < req->mem_seg_len; i++) {
  297. if (!fw_mem[i].pa || !fw_mem[i].size) {
  298. if (fw_mem[i].type == 0) {
  299. cnss_pr_err("Invalid memory for FW type, segment = %d\n",
  300. i);
  301. ret = -EINVAL;
  302. goto out;
  303. }
  304. cnss_pr_err("Memory for FW is not available for type: %u\n",
  305. fw_mem[i].type);
  306. ret = -ENOMEM;
  307. goto out;
  308. }
  309. cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
  310. fw_mem[i].va, &fw_mem[i].pa,
  311. fw_mem[i].size, fw_mem[i].type);
  312. req->mem_seg[i].addr = fw_mem[i].pa;
  313. req->mem_seg[i].size = fw_mem[i].size;
  314. req->mem_seg[i].type = fw_mem[i].type;
  315. }
  316. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  317. wlfw_respond_mem_resp_msg_v01_ei, resp);
  318. if (ret < 0) {
  319. cnss_pr_err("Failed to initialize txn for respond memory request, err: %d\n",
  320. ret);
  321. goto out;
  322. }
  323. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  324. QMI_WLFW_RESPOND_MEM_REQ_V01,
  325. WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN,
  326. wlfw_respond_mem_req_msg_v01_ei, req);
  327. if (ret < 0) {
  328. qmi_txn_cancel(&txn);
  329. cnss_pr_err("Failed to send respond memory request, err: %d\n",
  330. ret);
  331. goto out;
  332. }
  333. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  334. if (ret < 0) {
  335. cnss_pr_err("Failed to wait for response of respond memory request, err: %d\n",
  336. ret);
  337. goto out;
  338. }
  339. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  340. cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
  341. resp->resp.result, resp->resp.error);
  342. ret = -resp->resp.result;
  343. goto out;
  344. }
  345. kfree(req);
  346. kfree(resp);
  347. return 0;
  348. out:
  349. CNSS_QMI_ASSERT();
  350. kfree(req);
  351. kfree(resp);
  352. return ret;
  353. }
  354. int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
  355. {
  356. struct wlfw_cap_req_msg_v01 *req;
  357. struct wlfw_cap_resp_msg_v01 *resp;
  358. struct qmi_txn txn;
  359. char *fw_build_timestamp;
  360. int ret = 0, i;
  361. cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
  362. plat_priv->driver_state);
  363. req = kzalloc(sizeof(*req), GFP_KERNEL);
  364. if (!req)
  365. return -ENOMEM;
  366. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  367. if (!resp) {
  368. kfree(req);
  369. return -ENOMEM;
  370. }
  371. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  372. wlfw_cap_resp_msg_v01_ei, resp);
  373. if (ret < 0) {
  374. cnss_pr_err("Failed to initialize txn for target capability request, err: %d\n",
  375. ret);
  376. goto out;
  377. }
  378. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  379. QMI_WLFW_CAP_REQ_V01,
  380. WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
  381. wlfw_cap_req_msg_v01_ei, req);
  382. if (ret < 0) {
  383. qmi_txn_cancel(&txn);
  384. cnss_pr_err("Failed to send respond target capability request, err: %d\n",
  385. ret);
  386. goto out;
  387. }
  388. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  389. if (ret < 0) {
  390. cnss_pr_err("Failed to wait for response of target capability request, err: %d\n",
  391. ret);
  392. goto out;
  393. }
  394. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  395. cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
  396. resp->resp.result, resp->resp.error);
  397. ret = -resp->resp.result;
  398. goto out;
  399. }
  400. if (resp->chip_info_valid) {
  401. plat_priv->chip_info.chip_id = resp->chip_info.chip_id;
  402. plat_priv->chip_info.chip_family = resp->chip_info.chip_family;
  403. }
  404. if (resp->board_info_valid)
  405. plat_priv->board_info.board_id = resp->board_info.board_id;
  406. else
  407. plat_priv->board_info.board_id = 0xFF;
  408. if (resp->soc_info_valid)
  409. plat_priv->soc_info.soc_id = resp->soc_info.soc_id;
  410. if (resp->fw_version_info_valid) {
  411. plat_priv->fw_version_info.fw_version =
  412. resp->fw_version_info.fw_version;
  413. fw_build_timestamp = resp->fw_version_info.fw_build_timestamp;
  414. fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN] = '\0';
  415. strlcpy(plat_priv->fw_version_info.fw_build_timestamp,
  416. resp->fw_version_info.fw_build_timestamp,
  417. QMI_WLFW_MAX_TIMESTAMP_LEN + 1);
  418. }
  419. if (resp->fw_build_id_valid) {
  420. resp->fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN] = '\0';
  421. strlcpy(plat_priv->fw_build_id, resp->fw_build_id,
  422. QMI_WLFW_MAX_BUILD_ID_LEN + 1);
  423. }
  424. if (resp->voltage_mv_valid) {
  425. plat_priv->cpr_info.voltage = resp->voltage_mv;
  426. cnss_pr_dbg("Voltage for CPR: %dmV\n",
  427. plat_priv->cpr_info.voltage);
  428. cnss_update_cpr_info(plat_priv);
  429. }
  430. if (resp->time_freq_hz_valid) {
  431. plat_priv->device_freq_hz = resp->time_freq_hz;
  432. cnss_pr_dbg("Device frequency is %d HZ\n",
  433. plat_priv->device_freq_hz);
  434. }
  435. if (resp->otp_version_valid)
  436. plat_priv->otp_version = resp->otp_version;
  437. if (resp->dev_mem_info_valid) {
  438. for (i = 0; i < QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
  439. plat_priv->dev_mem_info[i].start =
  440. resp->dev_mem_info[i].start;
  441. plat_priv->dev_mem_info[i].size =
  442. resp->dev_mem_info[i].size;
  443. cnss_pr_buf("Device memory info[%d]: start = 0x%llx, size = 0x%llx\n",
  444. i, plat_priv->dev_mem_info[i].start,
  445. plat_priv->dev_mem_info[i].size);
  446. }
  447. }
  448. if (resp->fw_caps_valid)
  449. plat_priv->fw_pcie_gen_switch =
  450. !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01);
  451. cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, otp_version: 0x%x\n",
  452. plat_priv->chip_info.chip_id,
  453. plat_priv->chip_info.chip_family,
  454. plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
  455. plat_priv->otp_version);
  456. cnss_pr_dbg("fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s\n",
  457. plat_priv->fw_version_info.fw_version,
  458. plat_priv->fw_version_info.fw_build_timestamp,
  459. plat_priv->fw_build_id);
  460. kfree(req);
  461. kfree(resp);
  462. return 0;
  463. out:
  464. CNSS_QMI_ASSERT();
  465. kfree(req);
  466. kfree(resp);
  467. return ret;
  468. }
  469. static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv,
  470. u32 bdf_type, char *filename,
  471. u32 filename_len)
  472. {
  473. char filename_tmp[MAX_FIRMWARE_NAME_LEN];
  474. int ret = 0;
  475. switch (bdf_type) {
  476. case CNSS_BDF_ELF:
  477. /* Board ID will be equal or less than 0xFF in GF mask case */
  478. if (plat_priv->board_info.board_id == 0xFF) {
  479. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  480. snprintf(filename_tmp, filename_len,
  481. ELF_BDF_FILE_NAME_GF);
  482. else
  483. snprintf(filename_tmp, filename_len,
  484. ELF_BDF_FILE_NAME);
  485. } else if (plat_priv->board_info.board_id < 0xFF) {
  486. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  487. snprintf(filename_tmp, filename_len,
  488. ELF_BDF_FILE_NAME_GF_PREFIX "%02x",
  489. plat_priv->board_info.board_id);
  490. else
  491. snprintf(filename_tmp, filename_len,
  492. ELF_BDF_FILE_NAME_PREFIX "%02x",
  493. plat_priv->board_info.board_id);
  494. } else {
  495. snprintf(filename_tmp, filename_len,
  496. BDF_FILE_NAME_PREFIX "%02x.e%02x",
  497. plat_priv->board_info.board_id >> 8 & 0xFF,
  498. plat_priv->board_info.board_id & 0xFF);
  499. }
  500. break;
  501. case CNSS_BDF_BIN:
  502. if (plat_priv->board_info.board_id == 0xFF) {
  503. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  504. snprintf(filename_tmp, filename_len,
  505. BIN_BDF_FILE_NAME_GF);
  506. else
  507. snprintf(filename_tmp, filename_len,
  508. BIN_BDF_FILE_NAME);
  509. } else if (plat_priv->board_info.board_id < 0xFF) {
  510. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  511. snprintf(filename_tmp, filename_len,
  512. BIN_BDF_FILE_NAME_GF_PREFIX "%02x",
  513. plat_priv->board_info.board_id);
  514. else
  515. snprintf(filename_tmp, filename_len,
  516. BIN_BDF_FILE_NAME_PREFIX "%02x",
  517. plat_priv->board_info.board_id);
  518. } else {
  519. snprintf(filename_tmp, filename_len,
  520. BDF_FILE_NAME_PREFIX "%02x.b%02x",
  521. plat_priv->board_info.board_id >> 8 & 0xFF,
  522. plat_priv->board_info.board_id & 0xFF);
  523. }
  524. break;
  525. case CNSS_BDF_REGDB:
  526. snprintf(filename_tmp, filename_len, REGDB_FILE_NAME);
  527. break;
  528. case CNSS_BDF_HDS:
  529. snprintf(filename_tmp, filename_len, HDS_FILE_NAME);
  530. break;
  531. default:
  532. cnss_pr_err("Invalid BDF type: %d\n",
  533. plat_priv->ctrl_params.bdf_type);
  534. ret = -EINVAL;
  535. break;
  536. }
  537. if (!ret)
  538. cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
  539. return ret;
  540. }
  541. int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
  542. u32 bdf_type)
  543. {
  544. struct wlfw_bdf_download_req_msg_v01 *req;
  545. struct wlfw_bdf_download_resp_msg_v01 *resp;
  546. struct qmi_txn txn;
  547. char filename[MAX_FIRMWARE_NAME_LEN];
  548. const struct firmware *fw_entry = NULL;
  549. const u8 *temp;
  550. unsigned int remaining;
  551. int ret = 0;
  552. cnss_pr_dbg("Sending BDF download message, state: 0x%lx, type: %d\n",
  553. plat_priv->driver_state, bdf_type);
  554. req = kzalloc(sizeof(*req), GFP_KERNEL);
  555. if (!req)
  556. return -ENOMEM;
  557. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  558. if (!resp) {
  559. kfree(req);
  560. return -ENOMEM;
  561. }
  562. ret = cnss_get_bdf_file_name(plat_priv, bdf_type,
  563. filename, sizeof(filename));
  564. if (ret)
  565. goto err_req_fw;
  566. if (bdf_type == CNSS_BDF_REGDB)
  567. ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
  568. filename);
  569. else
  570. ret = firmware_request_nowarn(&fw_entry, filename,
  571. &plat_priv->plat_dev->dev);
  572. if (ret) {
  573. cnss_pr_err("Failed to load BDF: %s, ret: %d\n", filename, ret);
  574. goto err_req_fw;
  575. }
  576. temp = fw_entry->data;
  577. remaining = fw_entry->size;
  578. cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining);
  579. while (remaining) {
  580. req->valid = 1;
  581. req->file_id_valid = 1;
  582. req->file_id = plat_priv->board_info.board_id;
  583. req->total_size_valid = 1;
  584. req->total_size = remaining;
  585. req->seg_id_valid = 1;
  586. req->data_valid = 1;
  587. req->end_valid = 1;
  588. req->bdf_type_valid = 1;
  589. req->bdf_type = bdf_type;
  590. if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
  591. req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
  592. } else {
  593. req->data_len = remaining;
  594. req->end = 1;
  595. }
  596. memcpy(req->data, temp, req->data_len);
  597. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  598. wlfw_bdf_download_resp_msg_v01_ei, resp);
  599. if (ret < 0) {
  600. cnss_pr_err("Failed to initialize txn for BDF download request, err: %d\n",
  601. ret);
  602. goto err_send;
  603. }
  604. ret = qmi_send_request
  605. (&plat_priv->qmi_wlfw, NULL, &txn,
  606. QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
  607. WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
  608. wlfw_bdf_download_req_msg_v01_ei, req);
  609. if (ret < 0) {
  610. qmi_txn_cancel(&txn);
  611. cnss_pr_err("Failed to send respond BDF download request, err: %d\n",
  612. ret);
  613. goto err_send;
  614. }
  615. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  616. if (ret < 0) {
  617. cnss_pr_err("Failed to wait for response of BDF download request, err: %d\n",
  618. ret);
  619. goto err_send;
  620. }
  621. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  622. cnss_pr_err("BDF download request failed, result: %d, err: %d\n",
  623. resp->resp.result, resp->resp.error);
  624. ret = -resp->resp.result;
  625. goto err_send;
  626. }
  627. remaining -= req->data_len;
  628. temp += req->data_len;
  629. req->seg_id++;
  630. }
  631. release_firmware(fw_entry);
  632. if (resp->host_bdf_data_valid) {
  633. /* QCA6490 enable S3E regulator for IPA configuration only */
  634. if (!(resp->host_bdf_data & QMI_WLFW_HW_XPA_V01))
  635. cnss_enable_int_pow_amp_vreg(plat_priv);
  636. plat_priv->cbc_file_download =
  637. resp->host_bdf_data & QMI_WLFW_CBC_FILE_DOWNLOAD_V01;
  638. cnss_pr_info("Host BDF config: HW_XPA: %d CalDB: %d\n",
  639. resp->host_bdf_data & QMI_WLFW_HW_XPA_V01,
  640. plat_priv->cbc_file_download);
  641. }
  642. kfree(req);
  643. kfree(resp);
  644. return 0;
  645. err_send:
  646. release_firmware(fw_entry);
  647. err_req_fw:
  648. if (!(bdf_type == CNSS_BDF_REGDB ||
  649. test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state) ||
  650. ret == -EAGAIN))
  651. CNSS_QMI_ASSERT();
  652. kfree(req);
  653. kfree(resp);
  654. return ret;
  655. }
  656. int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
  657. {
  658. struct wlfw_m3_info_req_msg_v01 *req;
  659. struct wlfw_m3_info_resp_msg_v01 *resp;
  660. struct qmi_txn txn;
  661. struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
  662. int ret = 0;
  663. cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
  664. plat_priv->driver_state);
  665. req = kzalloc(sizeof(*req), GFP_KERNEL);
  666. if (!req)
  667. return -ENOMEM;
  668. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  669. if (!resp) {
  670. kfree(req);
  671. return -ENOMEM;
  672. }
  673. if (!m3_mem->pa || !m3_mem->size) {
  674. cnss_pr_err("Memory for M3 is not available\n");
  675. ret = -ENOMEM;
  676. goto out;
  677. }
  678. cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
  679. m3_mem->va, &m3_mem->pa, m3_mem->size);
  680. req->addr = plat_priv->m3_mem.pa;
  681. req->size = plat_priv->m3_mem.size;
  682. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  683. wlfw_m3_info_resp_msg_v01_ei, resp);
  684. if (ret < 0) {
  685. cnss_pr_err("Failed to initialize txn for M3 information request, err: %d\n",
  686. ret);
  687. goto out;
  688. }
  689. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  690. QMI_WLFW_M3_INFO_REQ_V01,
  691. WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
  692. wlfw_m3_info_req_msg_v01_ei, req);
  693. if (ret < 0) {
  694. qmi_txn_cancel(&txn);
  695. cnss_pr_err("Failed to send M3 information request, err: %d\n",
  696. ret);
  697. goto out;
  698. }
  699. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  700. if (ret < 0) {
  701. cnss_pr_err("Failed to wait for response of M3 information request, err: %d\n",
  702. ret);
  703. goto out;
  704. }
  705. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  706. cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
  707. resp->resp.result, resp->resp.error);
  708. ret = -resp->resp.result;
  709. goto out;
  710. }
  711. kfree(req);
  712. kfree(resp);
  713. return 0;
  714. out:
  715. CNSS_QMI_ASSERT();
  716. kfree(req);
  717. kfree(resp);
  718. return ret;
  719. }
  720. int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
  721. u8 *mac, u32 mac_len)
  722. {
  723. struct wlfw_mac_addr_req_msg_v01 req;
  724. struct wlfw_mac_addr_resp_msg_v01 resp = {0};
  725. struct qmi_txn txn;
  726. int ret;
  727. if (!plat_priv || !mac || mac_len != QMI_WLFW_MAC_ADDR_SIZE_V01)
  728. return -EINVAL;
  729. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  730. wlfw_mac_addr_resp_msg_v01_ei, &resp);
  731. if (ret < 0) {
  732. cnss_pr_err("Failed to initialize txn for mac req, err: %d\n",
  733. ret);
  734. ret = -EIO;
  735. goto out;
  736. }
  737. cnss_pr_dbg("Sending WLAN mac req [%pM], state: 0x%lx\n",
  738. mac, plat_priv->driver_state);
  739. memcpy(req.mac_addr, mac, mac_len);
  740. req.mac_addr_valid = 1;
  741. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  742. QMI_WLFW_MAC_ADDR_REQ_V01,
  743. WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN,
  744. wlfw_mac_addr_req_msg_v01_ei, &req);
  745. if (ret < 0) {
  746. qmi_txn_cancel(&txn);
  747. cnss_pr_err("Failed to send mac req, err: %d\n", ret);
  748. ret = -EIO;
  749. goto out;
  750. }
  751. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  752. if (ret < 0) {
  753. cnss_pr_err("Failed to wait for resp of mac req, err: %d\n",
  754. ret);
  755. ret = -EIO;
  756. goto out;
  757. }
  758. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  759. cnss_pr_err("WLAN mac req failed, result: %d, err: %d\n",
  760. resp.resp.result);
  761. ret = -resp.resp.result;
  762. }
  763. out:
  764. return ret;
  765. }
  766. int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name,
  767. u32 total_size)
  768. {
  769. int ret = 0;
  770. struct wlfw_qdss_trace_data_req_msg_v01 *req;
  771. struct wlfw_qdss_trace_data_resp_msg_v01 *resp;
  772. unsigned char *p_qdss_trace_data_temp, *p_qdss_trace_data = NULL;
  773. unsigned int remaining;
  774. struct qmi_txn txn;
  775. cnss_pr_dbg("%s\n", __func__);
  776. req = kzalloc(sizeof(*req), GFP_KERNEL);
  777. if (!req)
  778. return -ENOMEM;
  779. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  780. if (!resp) {
  781. kfree(req);
  782. return -ENOMEM;
  783. }
  784. p_qdss_trace_data = kzalloc(total_size, GFP_KERNEL);
  785. if (!p_qdss_trace_data) {
  786. ret = ENOMEM;
  787. goto end;
  788. }
  789. remaining = total_size;
  790. p_qdss_trace_data_temp = p_qdss_trace_data;
  791. while (remaining && resp->end == 0) {
  792. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  793. wlfw_qdss_trace_data_resp_msg_v01_ei, resp);
  794. if (ret < 0) {
  795. cnss_pr_err("Fail to init txn for QDSS trace resp %d\n",
  796. ret);
  797. goto fail;
  798. }
  799. ret = qmi_send_request
  800. (&plat_priv->qmi_wlfw, NULL, &txn,
  801. QMI_WLFW_QDSS_TRACE_DATA_REQ_V01,
  802. WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN,
  803. wlfw_qdss_trace_data_req_msg_v01_ei, req);
  804. if (ret < 0) {
  805. qmi_txn_cancel(&txn);
  806. cnss_pr_err("Fail to send QDSS trace data req %d\n",
  807. ret);
  808. goto fail;
  809. }
  810. ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
  811. if (ret < 0) {
  812. cnss_pr_err("QDSS trace resp wait failed with rc %d\n",
  813. ret);
  814. goto fail;
  815. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  816. cnss_pr_err("QMI QDSS trace request rejected, result:%d error:%d\n",
  817. resp->resp.result, resp->resp.error);
  818. ret = -resp->resp.result;
  819. goto fail;
  820. } else {
  821. ret = 0;
  822. }
  823. cnss_pr_dbg("%s: response total size %d data len %d",
  824. __func__, resp->total_size, resp->data_len);
  825. if ((resp->total_size_valid == 1 &&
  826. resp->total_size == total_size) &&
  827. (resp->seg_id_valid == 1 && resp->seg_id == req->seg_id) &&
  828. (resp->data_valid == 1 &&
  829. resp->data_len <= QMI_WLFW_MAX_DATA_SIZE_V01)) {
  830. memcpy(p_qdss_trace_data_temp,
  831. resp->data, resp->data_len);
  832. } else {
  833. cnss_pr_err("%s: Unmatched qdss trace data, Expect total_size %u, seg_id %u, Recv total_size_valid %u, total_size %u, seg_id_valid %u, seg_id %u, data_len_valid %u, data_len %u",
  834. __func__,
  835. total_size, req->seg_id,
  836. resp->total_size_valid,
  837. resp->total_size,
  838. resp->seg_id_valid,
  839. resp->seg_id,
  840. resp->data_valid,
  841. resp->data_len);
  842. ret = -1;
  843. goto fail;
  844. }
  845. remaining -= resp->data_len;
  846. p_qdss_trace_data_temp += resp->data_len;
  847. req->seg_id++;
  848. }
  849. if (remaining == 0 && (resp->end_valid && resp->end)) {
  850. ret = cnss_genl_send_msg(p_qdss_trace_data,
  851. CNSS_GENL_MSG_TYPE_QDSS, file_name,
  852. total_size);
  853. if (ret < 0) {
  854. cnss_pr_err("Fail to save QDSS trace data: %d\n",
  855. ret);
  856. ret = -1;
  857. goto fail;
  858. }
  859. } else {
  860. cnss_pr_err("%s: QDSS trace file corrupted: remaining %u, end_valid %u, end %u",
  861. __func__,
  862. remaining, resp->end_valid, resp->end);
  863. ret = -1;
  864. goto fail;
  865. }
  866. fail:
  867. kfree(p_qdss_trace_data);
  868. end:
  869. kfree(req);
  870. kfree(resp);
  871. return ret;
  872. }
  873. void cnss_get_qdss_cfg_filename(struct cnss_plat_data *plat_priv,
  874. char *filename, u32 filename_len)
  875. {
  876. char filename_tmp[MAX_FIRMWARE_NAME_LEN];
  877. char *debug_str = QDSS_DEBUG_FILE_STR;
  878. if (plat_priv->device_id == WCN7850_DEVICE_ID)
  879. debug_str = "";
  880. if (plat_priv->device_version.major_version == FW_V2_NUMBER)
  881. snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
  882. "_%s%s.cfg", debug_str, HW_V2_NUMBER);
  883. else
  884. snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
  885. "_%s%s.cfg", debug_str, HW_V1_NUMBER);
  886. cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
  887. }
  888. int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv)
  889. {
  890. struct wlfw_qdss_trace_config_download_req_msg_v01 *req;
  891. struct wlfw_qdss_trace_config_download_resp_msg_v01 *resp;
  892. struct qmi_txn txn;
  893. const struct firmware *fw_entry = NULL;
  894. const u8 *temp;
  895. char qdss_cfg_filename[MAX_FIRMWARE_NAME_LEN];
  896. unsigned int remaining;
  897. int ret = 0;
  898. cnss_pr_dbg("Sending QDSS config download message, state: 0x%lx\n",
  899. plat_priv->driver_state);
  900. req = kzalloc(sizeof(*req), GFP_KERNEL);
  901. if (!req)
  902. return -ENOMEM;
  903. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  904. if (!resp) {
  905. kfree(req);
  906. return -ENOMEM;
  907. }
  908. cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename, sizeof(qdss_cfg_filename));
  909. ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
  910. qdss_cfg_filename);
  911. if (ret) {
  912. cnss_pr_dbg("Unable to load %s\n",
  913. qdss_cfg_filename);
  914. goto err_req_fw;
  915. }
  916. temp = fw_entry->data;
  917. remaining = fw_entry->size;
  918. cnss_pr_dbg("Downloading QDSS: %s, size: %u\n",
  919. qdss_cfg_filename, remaining);
  920. while (remaining) {
  921. req->total_size_valid = 1;
  922. req->total_size = remaining;
  923. req->seg_id_valid = 1;
  924. req->data_valid = 1;
  925. req->end_valid = 1;
  926. if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
  927. req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
  928. } else {
  929. req->data_len = remaining;
  930. req->end = 1;
  931. }
  932. memcpy(req->data, temp, req->data_len);
  933. ret = qmi_txn_init
  934. (&plat_priv->qmi_wlfw, &txn,
  935. wlfw_qdss_trace_config_download_resp_msg_v01_ei,
  936. resp);
  937. if (ret < 0) {
  938. cnss_pr_err("Failed to initialize txn for QDSS download request, err: %d\n",
  939. ret);
  940. goto err_send;
  941. }
  942. ret = qmi_send_request
  943. (&plat_priv->qmi_wlfw, NULL, &txn,
  944. QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01,
  945. WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
  946. wlfw_qdss_trace_config_download_req_msg_v01_ei, req);
  947. if (ret < 0) {
  948. qmi_txn_cancel(&txn);
  949. cnss_pr_err("Failed to send respond QDSS download request, err: %d\n",
  950. ret);
  951. goto err_send;
  952. }
  953. ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
  954. if (ret < 0) {
  955. cnss_pr_err("Failed to wait for response of QDSS download request, err: %d\n",
  956. ret);
  957. goto err_send;
  958. }
  959. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  960. cnss_pr_err("QDSS download request failed, result: %d, err: %d\n",
  961. resp->resp.result, resp->resp.error);
  962. ret = -resp->resp.result;
  963. goto err_send;
  964. }
  965. remaining -= req->data_len;
  966. temp += req->data_len;
  967. req->seg_id++;
  968. }
  969. release_firmware(fw_entry);
  970. kfree(req);
  971. kfree(resp);
  972. return 0;
  973. err_send:
  974. release_firmware(fw_entry);
  975. err_req_fw:
  976. kfree(req);
  977. kfree(resp);
  978. return ret;
  979. }
  980. static int wlfw_send_qdss_trace_mode_req
  981. (struct cnss_plat_data *plat_priv,
  982. enum wlfw_qdss_trace_mode_enum_v01 mode,
  983. unsigned long long option)
  984. {
  985. int rc = 0;
  986. int tmp = 0;
  987. struct wlfw_qdss_trace_mode_req_msg_v01 *req;
  988. struct wlfw_qdss_trace_mode_resp_msg_v01 *resp;
  989. struct qmi_txn txn;
  990. if (!plat_priv)
  991. return -ENODEV;
  992. req = kzalloc(sizeof(*req), GFP_KERNEL);
  993. if (!req)
  994. return -ENOMEM;
  995. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  996. if (!resp) {
  997. kfree(req);
  998. return -ENOMEM;
  999. }
  1000. req->mode_valid = 1;
  1001. req->mode = mode;
  1002. req->option_valid = 1;
  1003. req->option = option;
  1004. tmp = plat_priv->hw_trc_override;
  1005. req->hw_trc_disable_override_valid = 1;
  1006. req->hw_trc_disable_override =
  1007. (tmp > QMI_PARAM_DISABLE_V01 ? QMI_PARAM_DISABLE_V01 :
  1008. (tmp < 0 ? QMI_PARAM_INVALID_V01 : tmp));
  1009. cnss_pr_dbg("%s: mode %u, option %llu, hw_trc_disable_override: %u",
  1010. __func__, mode, option, req->hw_trc_disable_override);
  1011. rc = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1012. wlfw_qdss_trace_mode_resp_msg_v01_ei, resp);
  1013. if (rc < 0) {
  1014. cnss_pr_err("Fail to init txn for QDSS Mode resp %d\n",
  1015. rc);
  1016. goto out;
  1017. }
  1018. rc = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1019. QMI_WLFW_QDSS_TRACE_MODE_REQ_V01,
  1020. WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN,
  1021. wlfw_qdss_trace_mode_req_msg_v01_ei, req);
  1022. if (rc < 0) {
  1023. qmi_txn_cancel(&txn);
  1024. cnss_pr_err("Fail to send QDSS Mode req %d\n", rc);
  1025. goto out;
  1026. }
  1027. rc = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
  1028. if (rc < 0) {
  1029. cnss_pr_err("QDSS Mode resp wait failed with rc %d\n",
  1030. rc);
  1031. goto out;
  1032. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1033. cnss_pr_err("QMI QDSS Mode request rejected, result:%d error:%d\n",
  1034. resp->resp.result, resp->resp.error);
  1035. rc = -resp->resp.result;
  1036. goto out;
  1037. }
  1038. kfree(resp);
  1039. kfree(req);
  1040. return rc;
  1041. out:
  1042. kfree(resp);
  1043. kfree(req);
  1044. CNSS_QMI_ASSERT();
  1045. return rc;
  1046. }
  1047. int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv)
  1048. {
  1049. return wlfw_send_qdss_trace_mode_req(plat_priv,
  1050. QMI_WLFW_QDSS_TRACE_ON_V01, 0);
  1051. }
  1052. int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option)
  1053. {
  1054. return wlfw_send_qdss_trace_mode_req(plat_priv, QMI_WLFW_QDSS_TRACE_OFF_V01,
  1055. option);
  1056. }
  1057. int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
  1058. enum cnss_driver_mode mode)
  1059. {
  1060. struct wlfw_wlan_mode_req_msg_v01 *req;
  1061. struct wlfw_wlan_mode_resp_msg_v01 *resp;
  1062. struct qmi_txn txn;
  1063. int ret = 0;
  1064. if (!plat_priv)
  1065. return -ENODEV;
  1066. cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
  1067. cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
  1068. if (mode == CNSS_OFF &&
  1069. test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
  1070. cnss_pr_dbg("Recovery is in progress, ignore mode off request\n");
  1071. return 0;
  1072. }
  1073. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1074. if (!req)
  1075. return -ENOMEM;
  1076. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1077. if (!resp) {
  1078. kfree(req);
  1079. return -ENOMEM;
  1080. }
  1081. req->mode = (enum wlfw_driver_mode_enum_v01)mode;
  1082. req->hw_debug_valid = 1;
  1083. req->hw_debug = 0;
  1084. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1085. wlfw_wlan_mode_resp_msg_v01_ei, resp);
  1086. if (ret < 0) {
  1087. cnss_pr_err("Failed to initialize txn for mode request, mode: %s(%d), err: %d\n",
  1088. cnss_qmi_mode_to_str(mode), mode, ret);
  1089. goto out;
  1090. }
  1091. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1092. QMI_WLFW_WLAN_MODE_REQ_V01,
  1093. WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
  1094. wlfw_wlan_mode_req_msg_v01_ei, req);
  1095. if (ret < 0) {
  1096. qmi_txn_cancel(&txn);
  1097. cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
  1098. cnss_qmi_mode_to_str(mode), mode, ret);
  1099. goto out;
  1100. }
  1101. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1102. if (ret < 0) {
  1103. cnss_pr_err("Failed to wait for response of mode request, mode: %s(%d), err: %d\n",
  1104. cnss_qmi_mode_to_str(mode), mode, ret);
  1105. goto out;
  1106. }
  1107. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1108. cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
  1109. cnss_qmi_mode_to_str(mode), mode, resp->resp.result,
  1110. resp->resp.error);
  1111. ret = -resp->resp.result;
  1112. goto out;
  1113. }
  1114. kfree(req);
  1115. kfree(resp);
  1116. return 0;
  1117. out:
  1118. if (mode == CNSS_OFF) {
  1119. cnss_pr_dbg("WLFW service is disconnected while sending mode off request\n");
  1120. ret = 0;
  1121. } else {
  1122. CNSS_QMI_ASSERT();
  1123. }
  1124. kfree(req);
  1125. kfree(resp);
  1126. return ret;
  1127. }
  1128. int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
  1129. struct cnss_wlan_enable_cfg *config,
  1130. const char *host_version)
  1131. {
  1132. struct wlfw_wlan_cfg_req_msg_v01 *req;
  1133. struct wlfw_wlan_cfg_resp_msg_v01 *resp;
  1134. struct qmi_txn txn;
  1135. u32 i;
  1136. int ret = 0;
  1137. if (!plat_priv)
  1138. return -ENODEV;
  1139. cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
  1140. plat_priv->driver_state);
  1141. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1142. if (!req)
  1143. return -ENOMEM;
  1144. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1145. if (!resp) {
  1146. kfree(req);
  1147. return -ENOMEM;
  1148. }
  1149. req->host_version_valid = 1;
  1150. strlcpy(req->host_version, host_version,
  1151. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  1152. req->tgt_cfg_valid = 1;
  1153. if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
  1154. req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
  1155. else
  1156. req->tgt_cfg_len = config->num_ce_tgt_cfg;
  1157. for (i = 0; i < req->tgt_cfg_len; i++) {
  1158. req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
  1159. req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
  1160. req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
  1161. req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
  1162. req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
  1163. }
  1164. req->svc_cfg_valid = 1;
  1165. if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
  1166. req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
  1167. else
  1168. req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
  1169. for (i = 0; i < req->svc_cfg_len; i++) {
  1170. req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
  1171. req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
  1172. req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
  1173. }
  1174. req->shadow_reg_v2_valid = 1;
  1175. if (config->num_shadow_reg_v2_cfg >
  1176. QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
  1177. req->shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
  1178. else
  1179. req->shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
  1180. memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg,
  1181. sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
  1182. * req->shadow_reg_v2_len);
  1183. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1184. wlfw_wlan_cfg_resp_msg_v01_ei, resp);
  1185. if (ret < 0) {
  1186. cnss_pr_err("Failed to initialize txn for WLAN config request, err: %d\n",
  1187. ret);
  1188. goto out;
  1189. }
  1190. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1191. QMI_WLFW_WLAN_CFG_REQ_V01,
  1192. WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
  1193. wlfw_wlan_cfg_req_msg_v01_ei, req);
  1194. if (ret < 0) {
  1195. qmi_txn_cancel(&txn);
  1196. cnss_pr_err("Failed to send WLAN config request, err: %d\n",
  1197. ret);
  1198. goto out;
  1199. }
  1200. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1201. if (ret < 0) {
  1202. cnss_pr_err("Failed to wait for response of WLAN config request, err: %d\n",
  1203. ret);
  1204. goto out;
  1205. }
  1206. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1207. cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
  1208. resp->resp.result, resp->resp.error);
  1209. ret = -resp->resp.result;
  1210. goto out;
  1211. }
  1212. kfree(req);
  1213. kfree(resp);
  1214. return 0;
  1215. out:
  1216. CNSS_QMI_ASSERT();
  1217. kfree(req);
  1218. kfree(resp);
  1219. return ret;
  1220. }
  1221. int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
  1222. u32 offset, u32 mem_type,
  1223. u32 data_len, u8 *data)
  1224. {
  1225. struct wlfw_athdiag_read_req_msg_v01 *req;
  1226. struct wlfw_athdiag_read_resp_msg_v01 *resp;
  1227. struct qmi_txn txn;
  1228. int ret = 0;
  1229. if (!plat_priv)
  1230. return -ENODEV;
  1231. if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
  1232. cnss_pr_err("Invalid parameters for athdiag read: data %pK, data_len %u\n",
  1233. data, data_len);
  1234. return -EINVAL;
  1235. }
  1236. cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
  1237. plat_priv->driver_state, offset, mem_type, data_len);
  1238. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1239. if (!req)
  1240. return -ENOMEM;
  1241. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1242. if (!resp) {
  1243. kfree(req);
  1244. return -ENOMEM;
  1245. }
  1246. req->offset = offset;
  1247. req->mem_type = mem_type;
  1248. req->data_len = data_len;
  1249. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1250. wlfw_athdiag_read_resp_msg_v01_ei, resp);
  1251. if (ret < 0) {
  1252. cnss_pr_err("Failed to initialize txn for athdiag read request, err: %d\n",
  1253. ret);
  1254. goto out;
  1255. }
  1256. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1257. QMI_WLFW_ATHDIAG_READ_REQ_V01,
  1258. WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN,
  1259. wlfw_athdiag_read_req_msg_v01_ei, req);
  1260. if (ret < 0) {
  1261. qmi_txn_cancel(&txn);
  1262. cnss_pr_err("Failed to send athdiag read request, err: %d\n",
  1263. ret);
  1264. goto out;
  1265. }
  1266. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1267. if (ret < 0) {
  1268. cnss_pr_err("Failed to wait for response of athdiag read request, err: %d\n",
  1269. ret);
  1270. goto out;
  1271. }
  1272. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1273. cnss_pr_err("Athdiag read request failed, result: %d, err: %d\n",
  1274. resp->resp.result, resp->resp.error);
  1275. ret = -resp->resp.result;
  1276. goto out;
  1277. }
  1278. if (!resp->data_valid || resp->data_len != data_len) {
  1279. cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
  1280. resp->data_valid, resp->data_len);
  1281. ret = -EINVAL;
  1282. goto out;
  1283. }
  1284. memcpy(data, resp->data, resp->data_len);
  1285. kfree(req);
  1286. kfree(resp);
  1287. return 0;
  1288. out:
  1289. kfree(req);
  1290. kfree(resp);
  1291. return ret;
  1292. }
  1293. int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
  1294. u32 offset, u32 mem_type,
  1295. u32 data_len, u8 *data)
  1296. {
  1297. struct wlfw_athdiag_write_req_msg_v01 *req;
  1298. struct wlfw_athdiag_write_resp_msg_v01 *resp;
  1299. struct qmi_txn txn;
  1300. int ret = 0;
  1301. if (!plat_priv)
  1302. return -ENODEV;
  1303. if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
  1304. cnss_pr_err("Invalid parameters for athdiag write: data %pK, data_len %u\n",
  1305. data, data_len);
  1306. return -EINVAL;
  1307. }
  1308. cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %pK\n",
  1309. plat_priv->driver_state, offset, mem_type, data_len, data);
  1310. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1311. if (!req)
  1312. return -ENOMEM;
  1313. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1314. if (!resp) {
  1315. kfree(req);
  1316. return -ENOMEM;
  1317. }
  1318. req->offset = offset;
  1319. req->mem_type = mem_type;
  1320. req->data_len = data_len;
  1321. memcpy(req->data, data, data_len);
  1322. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1323. wlfw_athdiag_write_resp_msg_v01_ei, resp);
  1324. if (ret < 0) {
  1325. cnss_pr_err("Failed to initialize txn for athdiag write request, err: %d\n",
  1326. ret);
  1327. goto out;
  1328. }
  1329. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1330. QMI_WLFW_ATHDIAG_WRITE_REQ_V01,
  1331. WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
  1332. wlfw_athdiag_write_req_msg_v01_ei, req);
  1333. if (ret < 0) {
  1334. qmi_txn_cancel(&txn);
  1335. cnss_pr_err("Failed to send athdiag write request, err: %d\n",
  1336. ret);
  1337. goto out;
  1338. }
  1339. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1340. if (ret < 0) {
  1341. cnss_pr_err("Failed to wait for response of athdiag write request, err: %d\n",
  1342. ret);
  1343. goto out;
  1344. }
  1345. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1346. cnss_pr_err("Athdiag write request failed, result: %d, err: %d\n",
  1347. resp->resp.result, resp->resp.error);
  1348. ret = -resp->resp.result;
  1349. goto out;
  1350. }
  1351. kfree(req);
  1352. kfree(resp);
  1353. return 0;
  1354. out:
  1355. kfree(req);
  1356. kfree(resp);
  1357. return ret;
  1358. }
  1359. int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
  1360. u8 fw_log_mode)
  1361. {
  1362. struct wlfw_ini_req_msg_v01 *req;
  1363. struct wlfw_ini_resp_msg_v01 *resp;
  1364. struct qmi_txn txn;
  1365. int ret = 0;
  1366. if (!plat_priv)
  1367. return -ENODEV;
  1368. cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
  1369. plat_priv->driver_state, fw_log_mode);
  1370. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1371. if (!req)
  1372. return -ENOMEM;
  1373. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1374. if (!resp) {
  1375. kfree(req);
  1376. return -ENOMEM;
  1377. }
  1378. req->enablefwlog_valid = 1;
  1379. req->enablefwlog = fw_log_mode;
  1380. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1381. wlfw_ini_resp_msg_v01_ei, resp);
  1382. if (ret < 0) {
  1383. cnss_pr_err("Failed to initialize txn for ini request, fw_log_mode: %d, err: %d\n",
  1384. fw_log_mode, ret);
  1385. goto out;
  1386. }
  1387. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1388. QMI_WLFW_INI_REQ_V01,
  1389. WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
  1390. wlfw_ini_req_msg_v01_ei, req);
  1391. if (ret < 0) {
  1392. qmi_txn_cancel(&txn);
  1393. cnss_pr_err("Failed to send ini request, fw_log_mode: %d, err: %d\n",
  1394. fw_log_mode, ret);
  1395. goto out;
  1396. }
  1397. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1398. if (ret < 0) {
  1399. cnss_pr_err("Failed to wait for response of ini request, fw_log_mode: %d, err: %d\n",
  1400. fw_log_mode, ret);
  1401. goto out;
  1402. }
  1403. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1404. cnss_pr_err("Ini request failed, fw_log_mode: %d, result: %d, err: %d\n",
  1405. fw_log_mode, resp->resp.result, resp->resp.error);
  1406. ret = -resp->resp.result;
  1407. goto out;
  1408. }
  1409. kfree(req);
  1410. kfree(resp);
  1411. return 0;
  1412. out:
  1413. kfree(req);
  1414. kfree(resp);
  1415. return ret;
  1416. }
  1417. int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv)
  1418. {
  1419. struct wlfw_pcie_gen_switch_req_msg_v01 req;
  1420. struct wlfw_pcie_gen_switch_resp_msg_v01 resp = {0};
  1421. struct qmi_txn txn;
  1422. int ret = 0;
  1423. if (!plat_priv)
  1424. return -ENODEV;
  1425. if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 ||
  1426. !plat_priv->fw_pcie_gen_switch) {
  1427. cnss_pr_dbg("PCIE Gen speed not setup\n");
  1428. return 0;
  1429. }
  1430. cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n",
  1431. plat_priv->pcie_gen_speed, plat_priv->driver_state);
  1432. req.pcie_speed = (enum wlfw_pcie_gen_speed_v01)
  1433. plat_priv->pcie_gen_speed;
  1434. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1435. wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp);
  1436. if (ret < 0) {
  1437. cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n",
  1438. ret);
  1439. goto out;
  1440. }
  1441. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1442. QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01,
  1443. WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
  1444. wlfw_pcie_gen_switch_req_msg_v01_ei, &req);
  1445. if (ret < 0) {
  1446. qmi_txn_cancel(&txn);
  1447. cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret);
  1448. goto out;
  1449. }
  1450. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1451. if (ret < 0) {
  1452. cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n",
  1453. ret);
  1454. goto out;
  1455. }
  1456. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  1457. cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n",
  1458. plat_priv->pcie_gen_speed, resp.resp.result,
  1459. resp.resp.error);
  1460. ret = -resp.resp.result;
  1461. }
  1462. out:
  1463. /* Reset PCIE Gen speed after one time use */
  1464. plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01;
  1465. return ret;
  1466. }
  1467. int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv)
  1468. {
  1469. struct wlfw_antenna_switch_req_msg_v01 *req;
  1470. struct wlfw_antenna_switch_resp_msg_v01 *resp;
  1471. struct qmi_txn txn;
  1472. int ret = 0;
  1473. if (!plat_priv)
  1474. return -ENODEV;
  1475. cnss_pr_dbg("Sending antenna switch sync request, state: 0x%lx\n",
  1476. plat_priv->driver_state);
  1477. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1478. if (!req)
  1479. return -ENOMEM;
  1480. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1481. if (!resp) {
  1482. kfree(req);
  1483. return -ENOMEM;
  1484. }
  1485. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1486. wlfw_antenna_switch_resp_msg_v01_ei, resp);
  1487. if (ret < 0) {
  1488. cnss_pr_err("Failed to initialize txn for antenna switch request, err: %d\n",
  1489. ret);
  1490. goto out;
  1491. }
  1492. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1493. QMI_WLFW_ANTENNA_SWITCH_REQ_V01,
  1494. WLFW_ANTENNA_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
  1495. wlfw_antenna_switch_req_msg_v01_ei, req);
  1496. if (ret < 0) {
  1497. qmi_txn_cancel(&txn);
  1498. cnss_pr_err("Failed to send antenna switch request, err: %d\n",
  1499. ret);
  1500. goto out;
  1501. }
  1502. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1503. if (ret < 0) {
  1504. cnss_pr_err("Failed to wait for response of antenna switch request, err: %d\n",
  1505. ret);
  1506. goto out;
  1507. }
  1508. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1509. cnss_pr_dbg("Antenna switch request failed, result: %d, err: %d\n",
  1510. resp->resp.result, resp->resp.error);
  1511. ret = -resp->resp.result;
  1512. goto out;
  1513. }
  1514. if (resp->antenna_valid)
  1515. plat_priv->antenna = resp->antenna;
  1516. cnss_pr_dbg("Antenna valid: %u, antenna 0x%llx\n",
  1517. resp->antenna_valid, resp->antenna);
  1518. kfree(req);
  1519. kfree(resp);
  1520. return 0;
  1521. out:
  1522. kfree(req);
  1523. kfree(resp);
  1524. return ret;
  1525. }
  1526. int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
  1527. {
  1528. struct wlfw_antenna_grant_req_msg_v01 *req;
  1529. struct wlfw_antenna_grant_resp_msg_v01 *resp;
  1530. struct qmi_txn txn;
  1531. int ret = 0;
  1532. if (!plat_priv)
  1533. return -ENODEV;
  1534. cnss_pr_dbg("Sending antenna grant sync request, state: 0x%lx, grant 0x%llx\n",
  1535. plat_priv->driver_state, plat_priv->grant);
  1536. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1537. if (!req)
  1538. return -ENOMEM;
  1539. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1540. if (!resp) {
  1541. kfree(req);
  1542. return -ENOMEM;
  1543. }
  1544. req->grant_valid = 1;
  1545. req->grant = plat_priv->grant;
  1546. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1547. wlfw_antenna_grant_resp_msg_v01_ei, resp);
  1548. if (ret < 0) {
  1549. cnss_pr_err("Failed to initialize txn for antenna grant request, err: %d\n",
  1550. ret);
  1551. goto out;
  1552. }
  1553. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1554. QMI_WLFW_ANTENNA_GRANT_REQ_V01,
  1555. WLFW_ANTENNA_GRANT_REQ_MSG_V01_MAX_MSG_LEN,
  1556. wlfw_antenna_grant_req_msg_v01_ei, req);
  1557. if (ret < 0) {
  1558. qmi_txn_cancel(&txn);
  1559. cnss_pr_err("Failed to send antenna grant request, err: %d\n",
  1560. ret);
  1561. goto out;
  1562. }
  1563. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1564. if (ret < 0) {
  1565. cnss_pr_err("Failed to wait for response of antenna grant request, err: %d\n",
  1566. ret);
  1567. goto out;
  1568. }
  1569. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1570. cnss_pr_err("Antenna grant request failed, result: %d, err: %d\n",
  1571. resp->resp.result, resp->resp.error);
  1572. ret = -resp->resp.result;
  1573. goto out;
  1574. }
  1575. kfree(req);
  1576. kfree(resp);
  1577. return 0;
  1578. out:
  1579. kfree(req);
  1580. kfree(resp);
  1581. return ret;
  1582. }
  1583. int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
  1584. {
  1585. struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
  1586. struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
  1587. struct qmi_txn txn;
  1588. struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
  1589. int ret = 0;
  1590. int i;
  1591. cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
  1592. plat_priv->driver_state);
  1593. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1594. if (!req)
  1595. return -ENOMEM;
  1596. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1597. if (!resp) {
  1598. kfree(req);
  1599. return -ENOMEM;
  1600. }
  1601. req->mem_seg_len = plat_priv->qdss_mem_seg_len;
  1602. for (i = 0; i < req->mem_seg_len; i++) {
  1603. cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
  1604. qdss_mem[i].va, &qdss_mem[i].pa,
  1605. qdss_mem[i].size, qdss_mem[i].type);
  1606. req->mem_seg[i].addr = qdss_mem[i].pa;
  1607. req->mem_seg[i].size = qdss_mem[i].size;
  1608. req->mem_seg[i].type = qdss_mem[i].type;
  1609. }
  1610. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1611. wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
  1612. if (ret < 0) {
  1613. cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
  1614. ret);
  1615. goto out;
  1616. }
  1617. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1618. QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
  1619. WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
  1620. wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
  1621. if (ret < 0) {
  1622. qmi_txn_cancel(&txn);
  1623. cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
  1624. ret);
  1625. goto out;
  1626. }
  1627. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1628. if (ret < 0) {
  1629. cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
  1630. ret);
  1631. goto out;
  1632. }
  1633. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1634. cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
  1635. resp->resp.result, resp->resp.error);
  1636. ret = -resp->resp.result;
  1637. goto out;
  1638. }
  1639. kfree(req);
  1640. kfree(resp);
  1641. return 0;
  1642. out:
  1643. kfree(req);
  1644. kfree(resp);
  1645. return ret;
  1646. }
  1647. static int cnss_wlfw_wfc_call_status_send_sync
  1648. (struct cnss_plat_data *plat_priv,
  1649. const struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg)
  1650. {
  1651. struct wlfw_wfc_call_status_req_msg_v01 *req;
  1652. struct wlfw_wfc_call_status_resp_msg_v01 *resp;
  1653. struct qmi_txn txn;
  1654. int ret = 0;
  1655. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  1656. cnss_pr_err("Drop IMS WFC indication as FW not initialized\n");
  1657. return -EINVAL;
  1658. }
  1659. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1660. if (!req)
  1661. return -ENOMEM;
  1662. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1663. if (!resp) {
  1664. kfree(req);
  1665. return -ENOMEM;
  1666. }
  1667. /**
  1668. * WFC Call r1 design has CNSS as pass thru using opaque hex buffer.
  1669. * But in r2 update QMI structure is expanded and as an effect qmi
  1670. * decoded structures have padding. Thus we cannot use buffer design.
  1671. * For backward compatibility for r1 design copy only wfc_call_active
  1672. * value in hex buffer.
  1673. */
  1674. req->wfc_call_status_len = sizeof(ind_msg->wfc_call_active);
  1675. req->wfc_call_status[0] = ind_msg->wfc_call_active;
  1676. /* wfc_call_active is mandatory in IMS indication */
  1677. req->wfc_call_active_valid = 1;
  1678. req->wfc_call_active = ind_msg->wfc_call_active;
  1679. req->all_wfc_calls_held_valid = ind_msg->all_wfc_calls_held_valid;
  1680. req->all_wfc_calls_held = ind_msg->all_wfc_calls_held;
  1681. req->is_wfc_emergency_valid = ind_msg->is_wfc_emergency_valid;
  1682. req->is_wfc_emergency = ind_msg->is_wfc_emergency;
  1683. req->twt_ims_start_valid = ind_msg->twt_ims_start_valid;
  1684. req->twt_ims_start = ind_msg->twt_ims_start;
  1685. req->twt_ims_int_valid = ind_msg->twt_ims_int_valid;
  1686. req->twt_ims_int = ind_msg->twt_ims_int;
  1687. req->media_quality_valid = ind_msg->media_quality_valid;
  1688. req->media_quality =
  1689. (enum wlfw_wfc_media_quality_v01)ind_msg->media_quality;
  1690. cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n",
  1691. plat_priv->driver_state);
  1692. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1693. wlfw_wfc_call_status_resp_msg_v01_ei, resp);
  1694. if (ret < 0) {
  1695. cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n",
  1696. ret);
  1697. goto out;
  1698. }
  1699. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1700. QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
  1701. WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
  1702. wlfw_wfc_call_status_req_msg_v01_ei, req);
  1703. if (ret < 0) {
  1704. qmi_txn_cancel(&txn);
  1705. cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n",
  1706. ret);
  1707. goto out;
  1708. }
  1709. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1710. if (ret < 0) {
  1711. cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n",
  1712. ret);
  1713. goto out;
  1714. }
  1715. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1716. cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n",
  1717. resp->resp.result, resp->resp.error);
  1718. ret = -resp->resp.result;
  1719. goto out;
  1720. }
  1721. ret = 0;
  1722. out:
  1723. kfree(req);
  1724. kfree(resp);
  1725. return ret;
  1726. }
  1727. int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
  1728. {
  1729. struct wlfw_dynamic_feature_mask_req_msg_v01 *req;
  1730. struct wlfw_dynamic_feature_mask_resp_msg_v01 *resp;
  1731. struct qmi_txn txn;
  1732. int ret = 0;
  1733. cnss_pr_dbg("Sending dynamic feature mask 0x%llx, state: 0x%lx\n",
  1734. plat_priv->dynamic_feature,
  1735. plat_priv->driver_state);
  1736. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1737. if (!req)
  1738. return -ENOMEM;
  1739. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1740. if (!resp) {
  1741. kfree(req);
  1742. return -ENOMEM;
  1743. }
  1744. req->mask_valid = 1;
  1745. req->mask = plat_priv->dynamic_feature;
  1746. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1747. wlfw_dynamic_feature_mask_resp_msg_v01_ei, resp);
  1748. if (ret < 0) {
  1749. cnss_pr_err("Fail to initialize txn for dynamic feature mask request: err %d\n",
  1750. ret);
  1751. goto out;
  1752. }
  1753. ret = qmi_send_request
  1754. (&plat_priv->qmi_wlfw, NULL, &txn,
  1755. QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01,
  1756. WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN,
  1757. wlfw_dynamic_feature_mask_req_msg_v01_ei, req);
  1758. if (ret < 0) {
  1759. qmi_txn_cancel(&txn);
  1760. cnss_pr_err("Fail to send dynamic feature mask request: err %d\n",
  1761. ret);
  1762. goto out;
  1763. }
  1764. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1765. if (ret < 0) {
  1766. cnss_pr_err("Fail to wait for response of dynamic feature mask request, err %d\n",
  1767. ret);
  1768. goto out;
  1769. }
  1770. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1771. cnss_pr_err("Dynamic feature mask request failed, result: %d, err: %d\n",
  1772. resp->resp.result, resp->resp.error);
  1773. ret = -resp->resp.result;
  1774. goto out;
  1775. }
  1776. out:
  1777. kfree(req);
  1778. kfree(resp);
  1779. return ret;
  1780. }
  1781. int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
  1782. void *cmd, int cmd_len)
  1783. {
  1784. struct wlfw_get_info_req_msg_v01 *req;
  1785. struct wlfw_get_info_resp_msg_v01 *resp;
  1786. struct qmi_txn txn;
  1787. int ret = 0;
  1788. cnss_pr_buf("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n",
  1789. type, cmd_len, plat_priv->driver_state);
  1790. if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
  1791. return -EINVAL;
  1792. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1793. if (!req)
  1794. return -ENOMEM;
  1795. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1796. if (!resp) {
  1797. kfree(req);
  1798. return -ENOMEM;
  1799. }
  1800. req->type = type;
  1801. req->data_len = cmd_len;
  1802. memcpy(req->data, cmd, req->data_len);
  1803. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1804. wlfw_get_info_resp_msg_v01_ei, resp);
  1805. if (ret < 0) {
  1806. cnss_pr_err("Failed to initialize txn for get info request, err: %d\n",
  1807. ret);
  1808. goto out;
  1809. }
  1810. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1811. QMI_WLFW_GET_INFO_REQ_V01,
  1812. WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN,
  1813. wlfw_get_info_req_msg_v01_ei, req);
  1814. if (ret < 0) {
  1815. qmi_txn_cancel(&txn);
  1816. cnss_pr_err("Failed to send get info request, err: %d\n",
  1817. ret);
  1818. goto out;
  1819. }
  1820. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1821. if (ret < 0) {
  1822. cnss_pr_err("Failed to wait for response of get info request, err: %d\n",
  1823. ret);
  1824. goto out;
  1825. }
  1826. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1827. cnss_pr_err("Get info request failed, result: %d, err: %d\n",
  1828. resp->resp.result, resp->resp.error);
  1829. ret = -resp->resp.result;
  1830. goto out;
  1831. }
  1832. kfree(req);
  1833. kfree(resp);
  1834. return 0;
  1835. out:
  1836. kfree(req);
  1837. kfree(resp);
  1838. return ret;
  1839. }
  1840. unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
  1841. {
  1842. return QMI_WLFW_TIMEOUT_MS;
  1843. }
  1844. static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
  1845. struct sockaddr_qrtr *sq,
  1846. struct qmi_txn *txn, const void *data)
  1847. {
  1848. struct cnss_plat_data *plat_priv =
  1849. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  1850. const struct wlfw_request_mem_ind_msg_v01 *ind_msg = data;
  1851. int i;
  1852. cnss_pr_dbg("Received QMI WLFW request memory indication\n");
  1853. if (!txn) {
  1854. cnss_pr_err("Spurious indication\n");
  1855. return;
  1856. }
  1857. plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len;
  1858. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  1859. cnss_pr_dbg("FW requests for memory, size: 0x%x, type: %u\n",
  1860. ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
  1861. plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
  1862. plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
  1863. if (plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
  1864. plat_priv->fw_mem[i].attrs |=
  1865. DMA_ATTR_FORCE_CONTIGUOUS;
  1866. if (plat_priv->fw_mem[i].type == CNSS_MEM_CAL_V01)
  1867. plat_priv->cal_mem = &plat_priv->fw_mem[i];
  1868. }
  1869. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
  1870. 0, NULL);
  1871. }
  1872. static void cnss_wlfw_fw_mem_ready_ind_cb(struct qmi_handle *qmi_wlfw,
  1873. struct sockaddr_qrtr *sq,
  1874. struct qmi_txn *txn, const void *data)
  1875. {
  1876. struct cnss_plat_data *plat_priv =
  1877. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  1878. cnss_pr_dbg("Received QMI WLFW FW memory ready indication\n");
  1879. if (!txn) {
  1880. cnss_pr_err("Spurious indication\n");
  1881. return;
  1882. }
  1883. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_READY,
  1884. 0, NULL);
  1885. }
  1886. /**
  1887. * cnss_wlfw_fw_ready_ind_cb: FW ready indication handler (Helium arch)
  1888. *
  1889. * This event is not required for HST/ HSP as FW calibration done is
  1890. * provided in QMI_WLFW_CAL_DONE_IND_V01
  1891. */
  1892. static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw,
  1893. struct sockaddr_qrtr *sq,
  1894. struct qmi_txn *txn, const void *data)
  1895. {
  1896. struct cnss_plat_data *plat_priv =
  1897. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  1898. struct cnss_cal_info *cal_info;
  1899. if (!txn) {
  1900. cnss_pr_err("Spurious indication\n");
  1901. return;
  1902. }
  1903. if (plat_priv->device_id == QCA6390_DEVICE_ID ||
  1904. plat_priv->device_id == QCA6490_DEVICE_ID) {
  1905. cnss_pr_dbg("Ignore FW Ready Indication for HST/HSP");
  1906. return;
  1907. }
  1908. cnss_pr_dbg("Received QMI WLFW FW ready indication.\n");
  1909. cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
  1910. if (!cal_info)
  1911. return;
  1912. cal_info->cal_status = CNSS_CAL_DONE;
  1913. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
  1914. 0, cal_info);
  1915. }
  1916. static void cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle *qmi_wlfw,
  1917. struct sockaddr_qrtr *sq,
  1918. struct qmi_txn *txn, const void *data)
  1919. {
  1920. struct cnss_plat_data *plat_priv =
  1921. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  1922. cnss_pr_dbg("Received QMI WLFW FW initialization done indication\n");
  1923. if (!txn) {
  1924. cnss_pr_err("Spurious indication\n");
  1925. return;
  1926. }
  1927. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL);
  1928. }
  1929. static void cnss_wlfw_pin_result_ind_cb(struct qmi_handle *qmi_wlfw,
  1930. struct sockaddr_qrtr *sq,
  1931. struct qmi_txn *txn, const void *data)
  1932. {
  1933. struct cnss_plat_data *plat_priv =
  1934. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  1935. const struct wlfw_pin_connect_result_ind_msg_v01 *ind_msg = data;
  1936. cnss_pr_dbg("Received QMI WLFW pin connect result indication\n");
  1937. if (!txn) {
  1938. cnss_pr_err("Spurious indication\n");
  1939. return;
  1940. }
  1941. if (ind_msg->pwr_pin_result_valid)
  1942. plat_priv->pin_result.fw_pwr_pin_result =
  1943. ind_msg->pwr_pin_result;
  1944. if (ind_msg->phy_io_pin_result_valid)
  1945. plat_priv->pin_result.fw_phy_io_pin_result =
  1946. ind_msg->phy_io_pin_result;
  1947. if (ind_msg->rf_pin_result_valid)
  1948. plat_priv->pin_result.fw_rf_pin_result = ind_msg->rf_pin_result;
  1949. cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
  1950. ind_msg->pwr_pin_result, ind_msg->phy_io_pin_result,
  1951. ind_msg->rf_pin_result);
  1952. }
  1953. int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
  1954. u32 cal_file_download_size)
  1955. {
  1956. struct wlfw_cal_report_req_msg_v01 req = {0};
  1957. struct wlfw_cal_report_resp_msg_v01 resp = {0};
  1958. struct qmi_txn txn;
  1959. int ret = 0;
  1960. cnss_pr_dbg("Sending cal file report request. File size: %d, state: 0x%lx\n",
  1961. cal_file_download_size, plat_priv->driver_state);
  1962. req.cal_file_download_size_valid = 1;
  1963. req.cal_file_download_size = cal_file_download_size;
  1964. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1965. wlfw_cal_report_resp_msg_v01_ei, &resp);
  1966. if (ret < 0) {
  1967. cnss_pr_err("Failed to initialize txn for Cal Report request, err: %d\n",
  1968. ret);
  1969. goto out;
  1970. }
  1971. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1972. QMI_WLFW_CAL_REPORT_REQ_V01,
  1973. WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
  1974. wlfw_cal_report_req_msg_v01_ei, &req);
  1975. if (ret < 0) {
  1976. qmi_txn_cancel(&txn);
  1977. cnss_pr_err("Failed to send Cal Report request, err: %d\n",
  1978. ret);
  1979. goto out;
  1980. }
  1981. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1982. if (ret < 0) {
  1983. cnss_pr_err("Failed to wait for response of Cal Report request, err: %d\n",
  1984. ret);
  1985. goto out;
  1986. }
  1987. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  1988. cnss_pr_err("Cal Report request failed, result: %d, err: %d\n",
  1989. resp.resp.result, resp.resp.error);
  1990. ret = -resp.resp.result;
  1991. goto out;
  1992. }
  1993. out:
  1994. return ret;
  1995. }
  1996. static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
  1997. struct sockaddr_qrtr *sq,
  1998. struct qmi_txn *txn, const void *data)
  1999. {
  2000. struct cnss_plat_data *plat_priv =
  2001. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2002. const struct wlfw_cal_done_ind_msg_v01 *ind = data;
  2003. struct cnss_cal_info *cal_info;
  2004. cnss_pr_dbg("Received Cal done indication. File size: %d\n",
  2005. ind->cal_file_upload_size);
  2006. cnss_pr_info("Calibration took %d ms\n",
  2007. jiffies_to_msecs(jiffies - plat_priv->cal_time));
  2008. if (!txn) {
  2009. cnss_pr_err("Spurious indication\n");
  2010. return;
  2011. }
  2012. if (ind->cal_file_upload_size_valid)
  2013. plat_priv->cal_file_size = ind->cal_file_upload_size;
  2014. cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
  2015. if (!cal_info)
  2016. return;
  2017. cal_info->cal_status = CNSS_CAL_DONE;
  2018. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
  2019. 0, cal_info);
  2020. }
  2021. static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
  2022. struct sockaddr_qrtr *sq,
  2023. struct qmi_txn *txn,
  2024. const void *data)
  2025. {
  2026. struct cnss_plat_data *plat_priv =
  2027. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2028. const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
  2029. int i;
  2030. cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
  2031. if (!txn) {
  2032. cnss_pr_err("Spurious indication\n");
  2033. return;
  2034. }
  2035. if (plat_priv->qdss_mem_seg_len) {
  2036. cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
  2037. plat_priv->qdss_mem_seg_len);
  2038. return;
  2039. }
  2040. plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
  2041. for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
  2042. cnss_pr_dbg("QDSS requests for memory, size: 0x%x, type: %u\n",
  2043. ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
  2044. plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
  2045. plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
  2046. }
  2047. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
  2048. 0, NULL);
  2049. }
  2050. /**
  2051. * cnss_wlfw_fw_mem_file_save_ind_cb: Save given FW mem to filesystem
  2052. *
  2053. * QDSS_TRACE_SAVE_IND feature is overloaded to provide any host allocated
  2054. * fw memory segment for dumping to file system. Only one type of mem can be
  2055. * saved per indication and is provided in mem seg index 0.
  2056. *
  2057. * Return: None
  2058. */
  2059. static void cnss_wlfw_fw_mem_file_save_ind_cb(struct qmi_handle *qmi_wlfw,
  2060. struct sockaddr_qrtr *sq,
  2061. struct qmi_txn *txn,
  2062. const void *data)
  2063. {
  2064. struct cnss_plat_data *plat_priv =
  2065. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2066. const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
  2067. struct cnss_qmi_event_fw_mem_file_save_data *event_data;
  2068. int i = 0;
  2069. if (!txn || !data) {
  2070. cnss_pr_err("Spurious indication\n");
  2071. return;
  2072. }
  2073. cnss_pr_dbg("QMI fw_mem_file_save: source: %d mem_seg: %d type: %u len: %u\n",
  2074. ind_msg->source, ind_msg->mem_seg_valid,
  2075. ind_msg->mem_seg[0].type, ind_msg->mem_seg_len);
  2076. event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
  2077. if (!event_data)
  2078. return;
  2079. event_data->mem_type = ind_msg->mem_seg[0].type;
  2080. event_data->mem_seg_len = ind_msg->mem_seg_len;
  2081. event_data->total_size = ind_msg->total_size;
  2082. if (ind_msg->mem_seg_valid) {
  2083. if (ind_msg->mem_seg_len > QMI_WLFW_MAX_STR_LEN_V01) {
  2084. cnss_pr_err("Invalid seg len indication\n");
  2085. goto free_event_data;
  2086. }
  2087. for (i = 0; i < ind_msg->mem_seg_len; i++) {
  2088. event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
  2089. event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
  2090. if (event_data->mem_type != ind_msg->mem_seg[i].type) {
  2091. cnss_pr_err("FW Mem file save ind cannot have multiple mem types\n");
  2092. goto free_event_data;
  2093. }
  2094. cnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n",
  2095. i, ind_msg->mem_seg[i].addr,
  2096. ind_msg->mem_seg[i].size);
  2097. }
  2098. }
  2099. if (ind_msg->file_name_valid)
  2100. strlcpy(event_data->file_name, ind_msg->file_name,
  2101. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2102. if (ind_msg->source == 1) {
  2103. if (!ind_msg->file_name_valid)
  2104. strlcpy(event_data->file_name, "qdss_trace_wcss_etb",
  2105. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2106. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA,
  2107. 0, event_data);
  2108. } else {
  2109. if (event_data->mem_type == QMI_WLFW_MEM_QDSS_V01) {
  2110. if (!ind_msg->file_name_valid)
  2111. strlcpy(event_data->file_name, "qdss_trace_ddr",
  2112. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2113. } else {
  2114. if (!ind_msg->file_name_valid)
  2115. strlcpy(event_data->file_name, "fw_mem_dump",
  2116. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2117. }
  2118. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE,
  2119. 0, event_data);
  2120. }
  2121. return;
  2122. free_event_data:
  2123. kfree(event_data);
  2124. }
  2125. static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
  2126. struct sockaddr_qrtr *sq,
  2127. struct qmi_txn *txn,
  2128. const void *data)
  2129. {
  2130. struct cnss_plat_data *plat_priv =
  2131. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2132. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
  2133. 0, NULL);
  2134. }
  2135. static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw,
  2136. struct sockaddr_qrtr *sq,
  2137. struct qmi_txn *txn,
  2138. const void *data)
  2139. {
  2140. struct cnss_plat_data *plat_priv =
  2141. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2142. const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data;
  2143. cnss_pr_buf("Received QMI WLFW respond get info indication\n");
  2144. if (!txn) {
  2145. cnss_pr_err("Spurious indication\n");
  2146. return;
  2147. }
  2148. cnss_pr_buf("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n",
  2149. ind_msg->data_len, ind_msg->type,
  2150. ind_msg->is_last, ind_msg->seq_no);
  2151. if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb)
  2152. plat_priv->get_info_cb(plat_priv->get_info_cb_ctx,
  2153. (void *)ind_msg->data,
  2154. ind_msg->data_len);
  2155. }
  2156. static int cnss_ims_wfc_call_twt_cfg_send_sync
  2157. (struct cnss_plat_data *plat_priv,
  2158. const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg)
  2159. {
  2160. struct ims_private_service_wfc_call_twt_config_req_msg_v01 *req;
  2161. struct ims_private_service_wfc_call_twt_config_rsp_msg_v01 *resp;
  2162. struct qmi_txn txn;
  2163. int ret = 0;
  2164. if (!test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) {
  2165. cnss_pr_err("Drop FW WFC indication as IMS QMI not connected\n");
  2166. return -EINVAL;
  2167. }
  2168. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2169. if (!req)
  2170. return -ENOMEM;
  2171. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2172. if (!resp) {
  2173. kfree(req);
  2174. return -ENOMEM;
  2175. }
  2176. req->twt_sta_start_valid = ind_msg->twt_sta_start_valid;
  2177. req->twt_sta_start = ind_msg->twt_sta_start;
  2178. req->twt_sta_int_valid = ind_msg->twt_sta_int_valid;
  2179. req->twt_sta_int = ind_msg->twt_sta_int;
  2180. req->twt_sta_upo_valid = ind_msg->twt_sta_upo_valid;
  2181. req->twt_sta_upo = ind_msg->twt_sta_upo;
  2182. req->twt_sta_sp_valid = ind_msg->twt_sta_sp_valid;
  2183. req->twt_sta_sp = ind_msg->twt_sta_sp;
  2184. req->twt_sta_dl_valid = req->twt_sta_dl_valid;
  2185. req->twt_sta_dl = req->twt_sta_dl;
  2186. req->twt_sta_config_changed_valid =
  2187. ind_msg->twt_sta_config_changed_valid;
  2188. req->twt_sta_config_changed = ind_msg->twt_sta_config_changed;
  2189. cnss_pr_dbg("CNSS->IMS: TWT_CFG_REQ: state: 0x%lx\n",
  2190. plat_priv->driver_state);
  2191. ret =
  2192. qmi_txn_init(&plat_priv->ims_qmi, &txn,
  2193. ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei,
  2194. resp);
  2195. if (ret < 0) {
  2196. cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Txn Init Err: %d\n",
  2197. ret);
  2198. goto out;
  2199. }
  2200. ret =
  2201. qmi_send_request(&plat_priv->ims_qmi, NULL, &txn,
  2202. QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_V01,
  2203. IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_MSG_V01_MAX_MSG_LEN,
  2204. ims_private_service_wfc_call_twt_config_req_msg_v01_ei, req);
  2205. if (ret < 0) {
  2206. qmi_txn_cancel(&txn);
  2207. cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Send Err: %d\n", ret);
  2208. goto out;
  2209. }
  2210. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2211. if (ret < 0) {
  2212. cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: QMI Wait Err: %d\n", ret);
  2213. goto out;
  2214. }
  2215. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2216. cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: Result: %d Err: %d\n",
  2217. resp->resp.result, resp->resp.error);
  2218. ret = -resp->resp.result;
  2219. goto out;
  2220. }
  2221. ret = 0;
  2222. out:
  2223. kfree(req);
  2224. kfree(resp);
  2225. return ret;
  2226. }
  2227. int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv,
  2228. void *data)
  2229. {
  2230. int ret;
  2231. struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
  2232. ret = cnss_ims_wfc_call_twt_cfg_send_sync(plat_priv, ind_msg);
  2233. kfree(data);
  2234. return ret;
  2235. }
  2236. static void cnss_wlfw_process_twt_cfg_ind(struct qmi_handle *qmi_wlfw,
  2237. struct sockaddr_qrtr *sq,
  2238. struct qmi_txn *txn,
  2239. const void *data)
  2240. {
  2241. struct cnss_plat_data *plat_priv =
  2242. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2243. const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
  2244. struct wlfw_wfc_call_twt_config_ind_msg_v01 *event_data;
  2245. if (!txn) {
  2246. cnss_pr_err("FW->CNSS: TWT_CFG_IND: Spurious indication\n");
  2247. return;
  2248. }
  2249. if (!ind_msg) {
  2250. cnss_pr_err("FW->CNSS: TWT_CFG_IND: Invalid indication\n");
  2251. return;
  2252. }
  2253. cnss_pr_dbg("FW->CNSS: TWT_CFG_IND: %x %llx, %x %x, %x %x, %x %x, %x %x, %x %x\n",
  2254. ind_msg->twt_sta_start_valid, ind_msg->twt_sta_start,
  2255. ind_msg->twt_sta_int_valid, ind_msg->twt_sta_int,
  2256. ind_msg->twt_sta_upo_valid, ind_msg->twt_sta_upo,
  2257. ind_msg->twt_sta_sp_valid, ind_msg->twt_sta_sp,
  2258. ind_msg->twt_sta_dl_valid, ind_msg->twt_sta_dl,
  2259. ind_msg->twt_sta_config_changed_valid,
  2260. ind_msg->twt_sta_config_changed);
  2261. event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
  2262. if (!event_data)
  2263. return;
  2264. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND, 0,
  2265. event_data);
  2266. }
  2267. static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
  2268. {
  2269. .type = QMI_INDICATION,
  2270. .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
  2271. .ei = wlfw_request_mem_ind_msg_v01_ei,
  2272. .decoded_size = sizeof(struct wlfw_request_mem_ind_msg_v01),
  2273. .fn = cnss_wlfw_request_mem_ind_cb
  2274. },
  2275. {
  2276. .type = QMI_INDICATION,
  2277. .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
  2278. .ei = wlfw_fw_mem_ready_ind_msg_v01_ei,
  2279. .decoded_size = sizeof(struct wlfw_fw_mem_ready_ind_msg_v01),
  2280. .fn = cnss_wlfw_fw_mem_ready_ind_cb
  2281. },
  2282. {
  2283. .type = QMI_INDICATION,
  2284. .msg_id = QMI_WLFW_FW_READY_IND_V01,
  2285. .ei = wlfw_fw_ready_ind_msg_v01_ei,
  2286. .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
  2287. .fn = cnss_wlfw_fw_ready_ind_cb
  2288. },
  2289. {
  2290. .type = QMI_INDICATION,
  2291. .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
  2292. .ei = wlfw_fw_init_done_ind_msg_v01_ei,
  2293. .decoded_size = sizeof(struct wlfw_fw_init_done_ind_msg_v01),
  2294. .fn = cnss_wlfw_fw_init_done_ind_cb
  2295. },
  2296. {
  2297. .type = QMI_INDICATION,
  2298. .msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01,
  2299. .ei = wlfw_pin_connect_result_ind_msg_v01_ei,
  2300. .decoded_size =
  2301. sizeof(struct wlfw_pin_connect_result_ind_msg_v01),
  2302. .fn = cnss_wlfw_pin_result_ind_cb
  2303. },
  2304. {
  2305. .type = QMI_INDICATION,
  2306. .msg_id = QMI_WLFW_CAL_DONE_IND_V01,
  2307. .ei = wlfw_cal_done_ind_msg_v01_ei,
  2308. .decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
  2309. .fn = cnss_wlfw_cal_done_ind_cb
  2310. },
  2311. {
  2312. .type = QMI_INDICATION,
  2313. .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
  2314. .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
  2315. .decoded_size =
  2316. sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
  2317. .fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
  2318. },
  2319. {
  2320. .type = QMI_INDICATION,
  2321. .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
  2322. .ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
  2323. .decoded_size =
  2324. sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
  2325. .fn = cnss_wlfw_fw_mem_file_save_ind_cb
  2326. },
  2327. {
  2328. .type = QMI_INDICATION,
  2329. .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
  2330. .ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
  2331. .decoded_size =
  2332. sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
  2333. .fn = cnss_wlfw_qdss_trace_free_ind_cb
  2334. },
  2335. {
  2336. .type = QMI_INDICATION,
  2337. .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01,
  2338. .ei = wlfw_respond_get_info_ind_msg_v01_ei,
  2339. .decoded_size =
  2340. sizeof(struct wlfw_respond_get_info_ind_msg_v01),
  2341. .fn = cnss_wlfw_respond_get_info_ind_cb
  2342. },
  2343. {
  2344. .type = QMI_INDICATION,
  2345. .msg_id = QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01,
  2346. .ei = wlfw_wfc_call_twt_config_ind_msg_v01_ei,
  2347. .decoded_size =
  2348. sizeof(struct wlfw_wfc_call_twt_config_ind_msg_v01),
  2349. .fn = cnss_wlfw_process_twt_cfg_ind
  2350. },
  2351. {}
  2352. };
  2353. static int cnss_wlfw_connect_to_server(struct cnss_plat_data *plat_priv,
  2354. void *data)
  2355. {
  2356. struct cnss_qmi_event_server_arrive_data *event_data = data;
  2357. struct qmi_handle *qmi_wlfw = &plat_priv->qmi_wlfw;
  2358. struct sockaddr_qrtr sq = { 0 };
  2359. int ret = 0;
  2360. if (!event_data)
  2361. return -EINVAL;
  2362. sq.sq_family = AF_QIPCRTR;
  2363. sq.sq_node = event_data->node;
  2364. sq.sq_port = event_data->port;
  2365. ret = kernel_connect(qmi_wlfw->sock, (struct sockaddr *)&sq,
  2366. sizeof(sq), 0);
  2367. if (ret < 0) {
  2368. cnss_pr_err("Failed to connect to QMI WLFW remote service port\n");
  2369. goto out;
  2370. }
  2371. set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
  2372. cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
  2373. plat_priv->driver_state);
  2374. kfree(data);
  2375. return 0;
  2376. out:
  2377. CNSS_QMI_ASSERT();
  2378. kfree(data);
  2379. return ret;
  2380. }
  2381. int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data)
  2382. {
  2383. int ret = 0;
  2384. if (!plat_priv)
  2385. return -ENODEV;
  2386. if (test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state)) {
  2387. cnss_pr_err("Unexpected WLFW server arrive\n");
  2388. CNSS_ASSERT(0);
  2389. return -EINVAL;
  2390. }
  2391. cnss_ignore_qmi_failure(false);
  2392. ret = cnss_wlfw_connect_to_server(plat_priv, data);
  2393. if (ret < 0)
  2394. goto out;
  2395. ret = cnss_wlfw_ind_register_send_sync(plat_priv);
  2396. if (ret < 0) {
  2397. if (ret == -EALREADY)
  2398. ret = 0;
  2399. goto out;
  2400. }
  2401. ret = cnss_wlfw_host_cap_send_sync(plat_priv);
  2402. if (ret < 0)
  2403. goto out;
  2404. return 0;
  2405. out:
  2406. return ret;
  2407. }
  2408. int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
  2409. {
  2410. int ret;
  2411. if (!plat_priv)
  2412. return -ENODEV;
  2413. clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
  2414. cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
  2415. plat_priv->driver_state);
  2416. cnss_qmi_deinit(plat_priv);
  2417. clear_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
  2418. ret = cnss_qmi_init(plat_priv);
  2419. if (ret < 0) {
  2420. cnss_pr_err("QMI WLFW service registraton failed, ret\n", ret);
  2421. CNSS_ASSERT(0);
  2422. }
  2423. return 0;
  2424. }
  2425. static int wlfw_new_server(struct qmi_handle *qmi_wlfw,
  2426. struct qmi_service *service)
  2427. {
  2428. struct cnss_plat_data *plat_priv =
  2429. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2430. struct cnss_qmi_event_server_arrive_data *event_data;
  2431. if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
  2432. cnss_pr_info("WLFW server delete in progress, Ignore server arrive, state: 0x%lx\n",
  2433. plat_priv->driver_state);
  2434. return 0;
  2435. }
  2436. cnss_pr_dbg("WLFW server arriving: node %u port %u\n",
  2437. service->node, service->port);
  2438. event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
  2439. if (!event_data)
  2440. return -ENOMEM;
  2441. event_data->node = service->node;
  2442. event_data->port = service->port;
  2443. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_ARRIVE,
  2444. 0, event_data);
  2445. return 0;
  2446. }
  2447. static void wlfw_del_server(struct qmi_handle *qmi_wlfw,
  2448. struct qmi_service *service)
  2449. {
  2450. struct cnss_plat_data *plat_priv =
  2451. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2452. if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
  2453. cnss_pr_info("WLFW server delete in progress, Ignore server delete, state: 0x%lx\n",
  2454. plat_priv->driver_state);
  2455. return;
  2456. }
  2457. cnss_pr_dbg("WLFW server exiting\n");
  2458. if (plat_priv) {
  2459. cnss_ignore_qmi_failure(true);
  2460. set_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
  2461. }
  2462. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_EXIT,
  2463. 0, NULL);
  2464. }
  2465. static struct qmi_ops qmi_wlfw_ops = {
  2466. .new_server = wlfw_new_server,
  2467. .del_server = wlfw_del_server,
  2468. };
  2469. int cnss_qmi_init(struct cnss_plat_data *plat_priv)
  2470. {
  2471. int ret = 0;
  2472. ret = qmi_handle_init(&plat_priv->qmi_wlfw,
  2473. QMI_WLFW_MAX_RECV_BUF_SIZE,
  2474. &qmi_wlfw_ops, qmi_wlfw_msg_handlers);
  2475. if (ret < 0) {
  2476. cnss_pr_err("Failed to initialize WLFW QMI handle, err: %d\n",
  2477. ret);
  2478. goto out;
  2479. }
  2480. ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01,
  2481. WLFW_SERVICE_VERS_V01, WLFW_SERVICE_INS_ID_V01);
  2482. if (ret < 0)
  2483. cnss_pr_err("Failed to add WLFW QMI lookup, err: %d\n", ret);
  2484. out:
  2485. return ret;
  2486. }
  2487. void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
  2488. {
  2489. qmi_handle_release(&plat_priv->qmi_wlfw);
  2490. }
  2491. int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv)
  2492. {
  2493. struct dms_get_mac_address_req_msg_v01 req;
  2494. struct dms_get_mac_address_resp_msg_v01 resp;
  2495. struct qmi_txn txn;
  2496. int ret = 0;
  2497. if (!test_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state)) {
  2498. cnss_pr_err("DMS QMI connection not established\n");
  2499. return -EINVAL;
  2500. }
  2501. cnss_pr_dbg("Requesting DMS MAC address");
  2502. memset(&resp, 0, sizeof(resp));
  2503. ret = qmi_txn_init(&plat_priv->qmi_dms, &txn,
  2504. dms_get_mac_address_resp_msg_v01_ei, &resp);
  2505. if (ret < 0) {
  2506. cnss_pr_err("Failed to initialize txn for dms, err: %d\n",
  2507. ret);
  2508. goto out;
  2509. }
  2510. req.device = DMS_DEVICE_MAC_WLAN_V01;
  2511. ret = qmi_send_request(&plat_priv->qmi_dms, NULL, &txn,
  2512. QMI_DMS_GET_MAC_ADDRESS_REQ_V01,
  2513. DMS_GET_MAC_ADDRESS_REQ_MSG_V01_MAX_MSG_LEN,
  2514. dms_get_mac_address_req_msg_v01_ei, &req);
  2515. if (ret < 0) {
  2516. qmi_txn_cancel(&txn);
  2517. cnss_pr_err("Failed to send QMI_DMS_GET_MAC_ADDRESS_REQ_V01, err: %d\n",
  2518. ret);
  2519. goto out;
  2520. }
  2521. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2522. if (ret < 0) {
  2523. cnss_pr_err("Failed to wait for QMI_DMS_GET_MAC_ADDRESS_RESP_V01, err: %d\n",
  2524. ret);
  2525. goto out;
  2526. }
  2527. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  2528. cnss_pr_err("QMI_DMS_GET_MAC_ADDRESS_REQ_V01 failed, result: %d, err: %d\n",
  2529. resp.resp.result, resp.resp.error);
  2530. ret = -resp.resp.result;
  2531. goto out;
  2532. }
  2533. if (!resp.mac_address_valid ||
  2534. resp.mac_address_len != QMI_WLFW_MAC_ADDR_SIZE_V01) {
  2535. cnss_pr_err("Invalid MAC address received from DMS\n");
  2536. plat_priv->dms.mac_valid = false;
  2537. goto out;
  2538. }
  2539. plat_priv->dms.mac_valid = true;
  2540. memcpy(plat_priv->dms.mac, resp.mac_address, QMI_WLFW_MAC_ADDR_SIZE_V01);
  2541. cnss_pr_info("Received DMS MAC: [%pM]\n", plat_priv->dms.mac);
  2542. out:
  2543. return ret;
  2544. }
  2545. static int cnss_dms_connect_to_server(struct cnss_plat_data *plat_priv,
  2546. unsigned int node, unsigned int port)
  2547. {
  2548. struct qmi_handle *qmi_dms = &plat_priv->qmi_dms;
  2549. struct sockaddr_qrtr sq = {0};
  2550. int ret = 0;
  2551. sq.sq_family = AF_QIPCRTR;
  2552. sq.sq_node = node;
  2553. sq.sq_port = port;
  2554. ret = kernel_connect(qmi_dms->sock, (struct sockaddr *)&sq,
  2555. sizeof(sq), 0);
  2556. if (ret < 0) {
  2557. cnss_pr_err("Failed to connect to QMI DMS remote service Node: %d Port: %d\n",
  2558. node, port);
  2559. goto out;
  2560. }
  2561. set_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
  2562. cnss_pr_info("QMI DMS service connected, state: 0x%lx\n",
  2563. plat_priv->driver_state);
  2564. out:
  2565. return ret;
  2566. }
  2567. static int dms_new_server(struct qmi_handle *qmi_dms,
  2568. struct qmi_service *service)
  2569. {
  2570. struct cnss_plat_data *plat_priv =
  2571. container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
  2572. if (!service)
  2573. return -EINVAL;
  2574. return cnss_dms_connect_to_server(plat_priv, service->node,
  2575. service->port);
  2576. }
  2577. static void dms_del_server(struct qmi_handle *qmi_dms,
  2578. struct qmi_service *service)
  2579. {
  2580. struct cnss_plat_data *plat_priv =
  2581. container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
  2582. clear_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
  2583. cnss_pr_info("QMI DMS service disconnected, state: 0x%lx\n",
  2584. plat_priv->driver_state);
  2585. }
  2586. static struct qmi_ops qmi_dms_ops = {
  2587. .new_server = dms_new_server,
  2588. .del_server = dms_del_server,
  2589. };
  2590. int cnss_dms_init(struct cnss_plat_data *plat_priv)
  2591. {
  2592. int ret = 0;
  2593. ret = qmi_handle_init(&plat_priv->qmi_dms, DMS_QMI_MAX_MSG_LEN,
  2594. &qmi_dms_ops, NULL);
  2595. if (ret < 0) {
  2596. cnss_pr_err("Failed to initialize DMS handle, err: %d\n", ret);
  2597. goto out;
  2598. }
  2599. ret = qmi_add_lookup(&plat_priv->qmi_dms, DMS_SERVICE_ID_V01,
  2600. DMS_SERVICE_VERS_V01, 0);
  2601. if (ret < 0)
  2602. cnss_pr_err("Failed to add DMS lookup, err: %d\n", ret);
  2603. out:
  2604. return ret;
  2605. }
  2606. void cnss_dms_deinit(struct cnss_plat_data *plat_priv)
  2607. {
  2608. qmi_handle_release(&plat_priv->qmi_dms);
  2609. }
  2610. int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
  2611. {
  2612. int ret;
  2613. struct coex_antenna_switch_to_wlan_req_msg_v01 *req;
  2614. struct coex_antenna_switch_to_wlan_resp_msg_v01 *resp;
  2615. struct qmi_txn txn;
  2616. if (!plat_priv)
  2617. return -ENODEV;
  2618. cnss_pr_dbg("Sending coex antenna switch_to_wlan\n");
  2619. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2620. if (!req)
  2621. return -ENOMEM;
  2622. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2623. if (!resp) {
  2624. kfree(req);
  2625. return -ENOMEM;
  2626. }
  2627. req->antenna = plat_priv->antenna;
  2628. ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
  2629. coex_antenna_switch_to_wlan_resp_msg_v01_ei, resp);
  2630. if (ret < 0) {
  2631. cnss_pr_err("Fail to init txn for coex antenna switch_to_wlan resp %d\n",
  2632. ret);
  2633. goto out;
  2634. }
  2635. ret = qmi_send_request
  2636. (&plat_priv->coex_qmi, NULL, &txn,
  2637. QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01,
  2638. COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN,
  2639. coex_antenna_switch_to_wlan_req_msg_v01_ei, req);
  2640. if (ret < 0) {
  2641. qmi_txn_cancel(&txn);
  2642. cnss_pr_err("Fail to send coex antenna switch_to_wlan req %d\n",
  2643. ret);
  2644. goto out;
  2645. }
  2646. ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
  2647. if (ret < 0) {
  2648. cnss_pr_err("Coex antenna switch_to_wlan resp wait failed with ret %d\n",
  2649. ret);
  2650. goto out;
  2651. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2652. cnss_pr_err("Coex antenna switch_to_wlan request rejected, result:%d error:%d\n",
  2653. resp->resp.result, resp->resp.error);
  2654. ret = -resp->resp.result;
  2655. goto out;
  2656. }
  2657. if (resp->grant_valid)
  2658. plat_priv->grant = resp->grant;
  2659. cnss_pr_dbg("Coex antenna grant: 0x%llx\n", resp->grant);
  2660. kfree(resp);
  2661. kfree(req);
  2662. return 0;
  2663. out:
  2664. kfree(resp);
  2665. kfree(req);
  2666. return ret;
  2667. }
  2668. int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
  2669. {
  2670. int ret;
  2671. struct coex_antenna_switch_to_mdm_req_msg_v01 *req;
  2672. struct coex_antenna_switch_to_mdm_resp_msg_v01 *resp;
  2673. struct qmi_txn txn;
  2674. if (!plat_priv)
  2675. return -ENODEV;
  2676. cnss_pr_dbg("Sending coex antenna switch_to_mdm\n");
  2677. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2678. if (!req)
  2679. return -ENOMEM;
  2680. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2681. if (!resp) {
  2682. kfree(req);
  2683. return -ENOMEM;
  2684. }
  2685. req->antenna = plat_priv->antenna;
  2686. ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
  2687. coex_antenna_switch_to_mdm_resp_msg_v01_ei, resp);
  2688. if (ret < 0) {
  2689. cnss_pr_err("Fail to init txn for coex antenna switch_to_mdm resp %d\n",
  2690. ret);
  2691. goto out;
  2692. }
  2693. ret = qmi_send_request
  2694. (&plat_priv->coex_qmi, NULL, &txn,
  2695. QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01,
  2696. COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN,
  2697. coex_antenna_switch_to_mdm_req_msg_v01_ei, req);
  2698. if (ret < 0) {
  2699. qmi_txn_cancel(&txn);
  2700. cnss_pr_err("Fail to send coex antenna switch_to_mdm req %d\n",
  2701. ret);
  2702. goto out;
  2703. }
  2704. ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
  2705. if (ret < 0) {
  2706. cnss_pr_err("Coex antenna switch_to_mdm resp wait failed with ret %d\n",
  2707. ret);
  2708. goto out;
  2709. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2710. cnss_pr_err("Coex antenna switch_to_mdm request rejected, result:%d error:%d\n",
  2711. resp->resp.result, resp->resp.error);
  2712. ret = -resp->resp.result;
  2713. goto out;
  2714. }
  2715. kfree(resp);
  2716. kfree(req);
  2717. return 0;
  2718. out:
  2719. kfree(resp);
  2720. kfree(req);
  2721. return ret;
  2722. }
  2723. static int coex_new_server(struct qmi_handle *qmi,
  2724. struct qmi_service *service)
  2725. {
  2726. struct cnss_plat_data *plat_priv =
  2727. container_of(qmi, struct cnss_plat_data, coex_qmi);
  2728. struct sockaddr_qrtr sq = { 0 };
  2729. int ret = 0;
  2730. cnss_pr_dbg("COEX server arrive: node %u port %u\n",
  2731. service->node, service->port);
  2732. sq.sq_family = AF_QIPCRTR;
  2733. sq.sq_node = service->node;
  2734. sq.sq_port = service->port;
  2735. ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
  2736. if (ret < 0) {
  2737. cnss_pr_err("Fail to connect to remote service port\n");
  2738. return ret;
  2739. }
  2740. set_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
  2741. cnss_pr_dbg("COEX Server Connected: 0x%lx\n",
  2742. plat_priv->driver_state);
  2743. return 0;
  2744. }
  2745. static void coex_del_server(struct qmi_handle *qmi,
  2746. struct qmi_service *service)
  2747. {
  2748. struct cnss_plat_data *plat_priv =
  2749. container_of(qmi, struct cnss_plat_data, coex_qmi);
  2750. cnss_pr_dbg("COEX server exit\n");
  2751. clear_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
  2752. }
  2753. static struct qmi_ops coex_qmi_ops = {
  2754. .new_server = coex_new_server,
  2755. .del_server = coex_del_server,
  2756. };
  2757. int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
  2758. { int ret;
  2759. ret = qmi_handle_init(&plat_priv->coex_qmi,
  2760. COEX_SERVICE_MAX_MSG_LEN,
  2761. &coex_qmi_ops, NULL);
  2762. if (ret < 0)
  2763. return ret;
  2764. ret = qmi_add_lookup(&plat_priv->coex_qmi, COEX_SERVICE_ID_V01,
  2765. COEX_SERVICE_VERS_V01, 0);
  2766. return ret;
  2767. }
  2768. void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv)
  2769. {
  2770. qmi_handle_release(&plat_priv->coex_qmi);
  2771. }
  2772. /* IMS Service */
  2773. int ims_subscribe_for_indication_send_async(struct cnss_plat_data *plat_priv)
  2774. {
  2775. int ret;
  2776. struct ims_private_service_subscribe_for_indications_req_msg_v01 *req;
  2777. struct qmi_txn *txn;
  2778. if (!plat_priv)
  2779. return -ENODEV;
  2780. cnss_pr_dbg("Sending ASYNC ims subscribe for indication\n");
  2781. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2782. if (!req)
  2783. return -ENOMEM;
  2784. req->wfc_call_status_valid = 1;
  2785. req->wfc_call_status = 1;
  2786. txn = &plat_priv->txn;
  2787. ret = qmi_txn_init(&plat_priv->ims_qmi, txn, NULL, NULL);
  2788. if (ret < 0) {
  2789. cnss_pr_err("Fail to init txn for ims subscribe for indication resp %d\n",
  2790. ret);
  2791. goto out;
  2792. }
  2793. ret = qmi_send_request
  2794. (&plat_priv->ims_qmi, NULL, txn,
  2795. QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
  2796. IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN,
  2797. ims_private_service_subscribe_for_indications_req_msg_v01_ei, req);
  2798. if (ret < 0) {
  2799. qmi_txn_cancel(txn);
  2800. cnss_pr_err("Fail to send ims subscribe for indication req %d\n",
  2801. ret);
  2802. goto out;
  2803. }
  2804. kfree(req);
  2805. return 0;
  2806. out:
  2807. kfree(req);
  2808. return ret;
  2809. }
  2810. static void ims_subscribe_for_indication_resp_cb(struct qmi_handle *qmi,
  2811. struct sockaddr_qrtr *sq,
  2812. struct qmi_txn *txn,
  2813. const void *data)
  2814. {
  2815. const
  2816. struct ims_private_service_subscribe_for_indications_rsp_msg_v01 *resp =
  2817. data;
  2818. cnss_pr_dbg("Received IMS subscribe indication response\n");
  2819. if (!txn) {
  2820. cnss_pr_err("spurious response\n");
  2821. return;
  2822. }
  2823. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2824. cnss_pr_err("IMS subscribe for indication request rejected, result:%d error:%d\n",
  2825. resp->resp.result, resp->resp.error);
  2826. txn->result = -resp->resp.result;
  2827. }
  2828. }
  2829. int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv,
  2830. void *data)
  2831. {
  2832. int ret;
  2833. struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
  2834. ret = cnss_wlfw_wfc_call_status_send_sync(plat_priv, ind_msg);
  2835. kfree(data);
  2836. return ret;
  2837. }
  2838. static void
  2839. cnss_ims_process_wfc_call_ind_cb(struct qmi_handle *ims_qmi,
  2840. struct sockaddr_qrtr *sq,
  2841. struct qmi_txn *txn, const void *data)
  2842. {
  2843. struct cnss_plat_data *plat_priv =
  2844. container_of(ims_qmi, struct cnss_plat_data, ims_qmi);
  2845. const
  2846. struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
  2847. struct ims_private_service_wfc_call_status_ind_msg_v01 *event_data;
  2848. if (!txn) {
  2849. cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Spurious indication\n");
  2850. return;
  2851. }
  2852. if (!ind_msg) {
  2853. cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Invalid indication\n");
  2854. return;
  2855. }
  2856. cnss_pr_dbg("IMS->CNSS: WFC_CALL_IND: %x, %x %x, %x %x, %x %llx, %x %x, %x %x\n",
  2857. ind_msg->wfc_call_active, ind_msg->all_wfc_calls_held_valid,
  2858. ind_msg->all_wfc_calls_held,
  2859. ind_msg->is_wfc_emergency_valid, ind_msg->is_wfc_emergency,
  2860. ind_msg->twt_ims_start_valid, ind_msg->twt_ims_start,
  2861. ind_msg->twt_ims_int_valid, ind_msg->twt_ims_int,
  2862. ind_msg->media_quality_valid, ind_msg->media_quality);
  2863. event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
  2864. if (!event_data)
  2865. return;
  2866. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND,
  2867. 0, event_data);
  2868. }
  2869. static struct qmi_msg_handler qmi_ims_msg_handlers[] = {
  2870. {
  2871. .type = QMI_RESPONSE,
  2872. .msg_id =
  2873. QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
  2874. .ei =
  2875. ims_private_service_subscribe_for_indications_rsp_msg_v01_ei,
  2876. .decoded_size = sizeof(struct
  2877. ims_private_service_subscribe_for_indications_rsp_msg_v01),
  2878. .fn = ims_subscribe_for_indication_resp_cb
  2879. },
  2880. {
  2881. .type = QMI_INDICATION,
  2882. .msg_id = QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01,
  2883. .ei = ims_private_service_wfc_call_status_ind_msg_v01_ei,
  2884. .decoded_size =
  2885. sizeof(struct ims_private_service_wfc_call_status_ind_msg_v01),
  2886. .fn = cnss_ims_process_wfc_call_ind_cb
  2887. },
  2888. {}
  2889. };
  2890. static int ims_new_server(struct qmi_handle *qmi,
  2891. struct qmi_service *service)
  2892. {
  2893. struct cnss_plat_data *plat_priv =
  2894. container_of(qmi, struct cnss_plat_data, ims_qmi);
  2895. struct sockaddr_qrtr sq = { 0 };
  2896. int ret = 0;
  2897. cnss_pr_dbg("IMS server arrive: node %u port %u\n",
  2898. service->node, service->port);
  2899. sq.sq_family = AF_QIPCRTR;
  2900. sq.sq_node = service->node;
  2901. sq.sq_port = service->port;
  2902. ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
  2903. if (ret < 0) {
  2904. cnss_pr_err("Fail to connect to remote service port\n");
  2905. return ret;
  2906. }
  2907. set_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
  2908. cnss_pr_dbg("IMS Server Connected: 0x%lx\n",
  2909. plat_priv->driver_state);
  2910. ret = ims_subscribe_for_indication_send_async(plat_priv);
  2911. return ret;
  2912. }
  2913. static void ims_del_server(struct qmi_handle *qmi,
  2914. struct qmi_service *service)
  2915. {
  2916. struct cnss_plat_data *plat_priv =
  2917. container_of(qmi, struct cnss_plat_data, ims_qmi);
  2918. cnss_pr_dbg("IMS server exit\n");
  2919. clear_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
  2920. }
  2921. static struct qmi_ops ims_qmi_ops = {
  2922. .new_server = ims_new_server,
  2923. .del_server = ims_del_server,
  2924. };
  2925. int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
  2926. { int ret;
  2927. ret = qmi_handle_init(&plat_priv->ims_qmi,
  2928. IMSPRIVATE_SERVICE_MAX_MSG_LEN,
  2929. &ims_qmi_ops, qmi_ims_msg_handlers);
  2930. if (ret < 0)
  2931. return ret;
  2932. ret = qmi_add_lookup(&plat_priv->ims_qmi, IMSPRIVATE_SERVICE_ID_V01,
  2933. IMSPRIVATE_SERVICE_VERS_V01, 0);
  2934. return ret;
  2935. }
  2936. void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv)
  2937. {
  2938. qmi_handle_release(&plat_priv->ims_qmi);
  2939. }