qmi.c 101 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/soc/qcom/qmi.h>
  8. #include "bus.h"
  9. #include "debug.h"
  10. #include "main.h"
  11. #include "qmi.h"
  12. #include "genl.h"
  13. #define WLFW_SERVICE_INS_ID_V01 1
  14. #define WLFW_CLIENT_ID 0x4b4e454c
  15. #define BDF_FILE_NAME_PREFIX "bdwlan"
  16. #define ELF_BDF_FILE_NAME "bdwlan.elf"
  17. #define ELF_BDF_FILE_NAME_GF "bdwlang.elf"
  18. #define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e"
  19. #define ELF_BDF_FILE_NAME_GF_PREFIX "bdwlang.e"
  20. #define BIN_BDF_FILE_NAME "bdwlan.bin"
  21. #define BIN_BDF_FILE_NAME_GF "bdwlang.bin"
  22. #define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b"
  23. #define BIN_BDF_FILE_NAME_GF_PREFIX "bdwlang.b"
  24. #define REGDB_FILE_NAME "regdb.bin"
  25. #define HDS_FILE_NAME "hds.bin"
  26. #define CHIP_ID_GF_MASK 0x10
  27. #define CONN_ROAM_FILE_NAME "wlan-connection-roaming"
  28. #define INI_EXT ".ini"
  29. #define INI_FILE_NAME_LEN 100
  30. #define QDSS_TRACE_CONFIG_FILE "qdss_trace_config"
  31. #ifdef CONFIG_CNSS2_DEBUG
  32. #define QDSS_DEBUG_FILE_STR "debug_"
  33. #else
  34. #define QDSS_DEBUG_FILE_STR ""
  35. #endif
  36. #define HW_V1_NUMBER "v1"
  37. #define HW_V2_NUMBER "v2"
  38. #define QMI_WLFW_TIMEOUT_MS (plat_priv->ctrl_params.qmi_timeout)
  39. #define QMI_WLFW_TIMEOUT_JF msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS)
  40. #define COEX_TIMEOUT QMI_WLFW_TIMEOUT_JF
  41. #define IMS_TIMEOUT QMI_WLFW_TIMEOUT_JF
  42. #define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K
  43. #define IMSPRIVATE_SERVICE_MAX_MSG_LEN SZ_8K
  44. #define DMS_QMI_MAX_MSG_LEN SZ_256
  45. #define MAX_SHADOW_REG_RESERVED 2
  46. #define MAX_NUM_SHADOW_REG_V3 (QMI_WLFW_MAX_NUM_SHADOW_REG_V3_USAGE_V01 - \
  47. MAX_SHADOW_REG_RESERVED)
  48. #define QMI_WLFW_MAC_READY_TIMEOUT_MS 50
  49. #define QMI_WLFW_MAC_READY_MAX_RETRY 200
  50. #ifdef CONFIG_CNSS2_DEBUG
  51. static bool ignore_qmi_failure;
  52. #define CNSS_QMI_ASSERT() CNSS_ASSERT(ignore_qmi_failure)
  53. void cnss_ignore_qmi_failure(bool ignore)
  54. {
  55. ignore_qmi_failure = ignore;
  56. }
  57. #else
  58. #define CNSS_QMI_ASSERT() do { } while (0)
  59. void cnss_ignore_qmi_failure(bool ignore) { }
  60. #endif
  61. static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode)
  62. {
  63. switch (mode) {
  64. case CNSS_MISSION:
  65. return "MISSION";
  66. case CNSS_FTM:
  67. return "FTM";
  68. case CNSS_EPPING:
  69. return "EPPING";
  70. case CNSS_WALTEST:
  71. return "WALTEST";
  72. case CNSS_OFF:
  73. return "OFF";
  74. case CNSS_CCPM:
  75. return "CCPM";
  76. case CNSS_QVIT:
  77. return "QVIT";
  78. case CNSS_CALIBRATION:
  79. return "CALIBRATION";
  80. default:
  81. return "UNKNOWN";
  82. }
  83. }
  84. static int qmi_send_wait(struct qmi_handle *qmi, void *req, void *rsp,
  85. struct qmi_elem_info *req_ei,
  86. struct qmi_elem_info *rsp_ei,
  87. int req_id, size_t req_len,
  88. unsigned long timeout)
  89. {
  90. struct qmi_txn txn;
  91. int ret;
  92. char *err_msg;
  93. struct qmi_response_type_v01 *resp = rsp;
  94. ret = qmi_txn_init(qmi, &txn, rsp_ei, rsp);
  95. if (ret < 0) {
  96. err_msg = "Qmi fail: fail to init txn,";
  97. goto out;
  98. }
  99. ret = qmi_send_request(qmi, NULL, &txn, req_id,
  100. req_len, req_ei, req);
  101. if (ret < 0) {
  102. qmi_txn_cancel(&txn);
  103. err_msg = "Qmi fail: fail to send req,";
  104. goto out;
  105. }
  106. ret = qmi_txn_wait(&txn, timeout);
  107. if (ret < 0) {
  108. err_msg = "Qmi fail: wait timeout,";
  109. goto out;
  110. } else if (resp->result != QMI_RESULT_SUCCESS_V01) {
  111. err_msg = "Qmi fail: request rejected,";
  112. cnss_pr_err("Qmi fail: respons with error:%d\n",
  113. resp->error);
  114. ret = -resp->result;
  115. goto out;
  116. }
  117. cnss_pr_dbg("req %x success\n", req_id);
  118. return 0;
  119. out:
  120. cnss_pr_err("%s req %x, ret %d\n", err_msg, req_id, ret);
  121. return ret;
  122. }
  123. static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
  124. {
  125. struct wlfw_ind_register_req_msg_v01 *req;
  126. struct wlfw_ind_register_resp_msg_v01 *resp;
  127. struct qmi_txn txn;
  128. int ret = 0;
  129. cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
  130. plat_priv->driver_state);
  131. req = kzalloc(sizeof(*req), GFP_KERNEL);
  132. if (!req)
  133. return -ENOMEM;
  134. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  135. if (!resp) {
  136. kfree(req);
  137. return -ENOMEM;
  138. }
  139. req->client_id_valid = 1;
  140. req->client_id = WLFW_CLIENT_ID;
  141. req->request_mem_enable_valid = 1;
  142. req->request_mem_enable = 1;
  143. req->fw_mem_ready_enable_valid = 1;
  144. req->fw_mem_ready_enable = 1;
  145. /* fw_ready indication is replaced by fw_init_done in HST/HSP */
  146. req->fw_init_done_enable_valid = 1;
  147. req->fw_init_done_enable = 1;
  148. req->pin_connect_result_enable_valid = 1;
  149. req->pin_connect_result_enable = 1;
  150. req->cal_done_enable_valid = 1;
  151. req->cal_done_enable = 1;
  152. req->qdss_trace_req_mem_enable_valid = 1;
  153. req->qdss_trace_req_mem_enable = 1;
  154. req->qdss_trace_save_enable_valid = 1;
  155. req->qdss_trace_save_enable = 1;
  156. req->qdss_trace_free_enable_valid = 1;
  157. req->qdss_trace_free_enable = 1;
  158. req->respond_get_info_enable_valid = 1;
  159. req->respond_get_info_enable = 1;
  160. req->wfc_call_twt_config_enable_valid = 1;
  161. req->wfc_call_twt_config_enable = 1;
  162. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  163. wlfw_ind_register_resp_msg_v01_ei, resp);
  164. if (ret < 0) {
  165. cnss_pr_err("Failed to initialize txn for indication register request, err: %d\n",
  166. ret);
  167. goto out;
  168. }
  169. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  170. QMI_WLFW_IND_REGISTER_REQ_V01,
  171. WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
  172. wlfw_ind_register_req_msg_v01_ei, req);
  173. if (ret < 0) {
  174. qmi_txn_cancel(&txn);
  175. cnss_pr_err("Failed to send indication register request, err: %d\n",
  176. ret);
  177. goto out;
  178. }
  179. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  180. if (ret < 0) {
  181. cnss_pr_err("Failed to wait for response of indication register request, err: %d\n",
  182. ret);
  183. goto out;
  184. }
  185. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  186. cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
  187. resp->resp.result, resp->resp.error);
  188. ret = -resp->resp.result;
  189. goto out;
  190. }
  191. if (resp->fw_status_valid) {
  192. if (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01) {
  193. ret = -EALREADY;
  194. goto qmi_registered;
  195. }
  196. }
  197. kfree(req);
  198. kfree(resp);
  199. return 0;
  200. out:
  201. CNSS_QMI_ASSERT();
  202. qmi_registered:
  203. kfree(req);
  204. kfree(resp);
  205. return ret;
  206. }
  207. static void cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data *plat_priv,
  208. struct wlfw_host_cap_req_msg_v01 *req)
  209. {
  210. if (plat_priv->device_id == KIWI_DEVICE_ID ||
  211. plat_priv->device_id == MANGO_DEVICE_ID) {
  212. req->mlo_capable_valid = 1;
  213. req->mlo_capable = 1;
  214. req->mlo_chip_id_valid = 1;
  215. req->mlo_chip_id = 0;
  216. req->mlo_group_id_valid = 1;
  217. req->mlo_group_id = 0;
  218. req->max_mlo_peer_valid = 1;
  219. /* Max peer number generally won't change for the same device
  220. * but needs to be synced with host driver.
  221. */
  222. req->max_mlo_peer = 32;
  223. req->mlo_num_chips_valid = 1;
  224. req->mlo_num_chips = 1;
  225. req->mlo_chip_info_valid = 1;
  226. req->mlo_chip_info[0].chip_id = 0;
  227. req->mlo_chip_info[0].num_local_links = 2;
  228. req->mlo_chip_info[0].hw_link_id[0] = 0;
  229. req->mlo_chip_info[0].hw_link_id[1] = 1;
  230. req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
  231. req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
  232. }
  233. }
  234. static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
  235. {
  236. struct wlfw_host_cap_req_msg_v01 *req;
  237. struct wlfw_host_cap_resp_msg_v01 *resp;
  238. struct qmi_txn txn;
  239. int ret = 0;
  240. u64 iova_start = 0, iova_size = 0,
  241. iova_ipa_start = 0, iova_ipa_size = 0;
  242. u64 feature_list = 0;
  243. cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
  244. plat_priv->driver_state);
  245. req = kzalloc(sizeof(*req), GFP_KERNEL);
  246. if (!req)
  247. return -ENOMEM;
  248. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  249. if (!resp) {
  250. kfree(req);
  251. return -ENOMEM;
  252. }
  253. req->num_clients_valid = 1;
  254. req->num_clients = 1;
  255. cnss_pr_dbg("Number of clients is %d\n", req->num_clients);
  256. req->wake_msi = cnss_bus_get_wake_irq(plat_priv);
  257. if (req->wake_msi) {
  258. cnss_pr_dbg("WAKE MSI base data is %d\n", req->wake_msi);
  259. req->wake_msi_valid = 1;
  260. }
  261. req->bdf_support_valid = 1;
  262. req->bdf_support = 1;
  263. req->m3_support_valid = 1;
  264. req->m3_support = 1;
  265. req->m3_cache_support_valid = 1;
  266. req->m3_cache_support = 1;
  267. req->cal_done_valid = 1;
  268. req->cal_done = plat_priv->cal_done;
  269. cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done);
  270. if (cnss_bus_is_smmu_s1_enabled(plat_priv) &&
  271. !cnss_bus_get_iova(plat_priv, &iova_start, &iova_size) &&
  272. !cnss_bus_get_iova_ipa(plat_priv, &iova_ipa_start,
  273. &iova_ipa_size)) {
  274. req->ddr_range_valid = 1;
  275. req->ddr_range[0].start = iova_start;
  276. req->ddr_range[0].size = iova_size + iova_ipa_size;
  277. cnss_pr_dbg("Sending iova starting 0x%llx with size 0x%llx\n",
  278. req->ddr_range[0].start, req->ddr_range[0].size);
  279. }
  280. req->host_build_type_valid = 1;
  281. req->host_build_type = cnss_get_host_build_type();
  282. cnss_wlfw_host_cap_parse_mlo(plat_priv, req);
  283. ret = cnss_get_feature_list(plat_priv, &feature_list);
  284. if (!ret) {
  285. req->feature_list_valid = 1;
  286. req->feature_list = feature_list;
  287. cnss_pr_dbg("Sending feature list 0x%llx\n",
  288. req->feature_list);
  289. }
  290. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  291. wlfw_host_cap_resp_msg_v01_ei, resp);
  292. if (ret < 0) {
  293. cnss_pr_err("Failed to initialize txn for host capability request, err: %d\n",
  294. ret);
  295. goto out;
  296. }
  297. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  298. QMI_WLFW_HOST_CAP_REQ_V01,
  299. WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
  300. wlfw_host_cap_req_msg_v01_ei, req);
  301. if (ret < 0) {
  302. qmi_txn_cancel(&txn);
  303. cnss_pr_err("Failed to send host capability request, err: %d\n",
  304. ret);
  305. goto out;
  306. }
  307. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  308. if (ret < 0) {
  309. cnss_pr_err("Failed to wait for response of host capability request, err: %d\n",
  310. ret);
  311. goto out;
  312. }
  313. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  314. cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
  315. resp->resp.result, resp->resp.error);
  316. ret = -resp->resp.result;
  317. goto out;
  318. }
  319. kfree(req);
  320. kfree(resp);
  321. return 0;
  322. out:
  323. CNSS_QMI_ASSERT();
  324. kfree(req);
  325. kfree(resp);
  326. return ret;
  327. }
  328. int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
  329. {
  330. struct wlfw_respond_mem_req_msg_v01 *req;
  331. struct wlfw_respond_mem_resp_msg_v01 *resp;
  332. struct qmi_txn txn;
  333. struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
  334. int ret = 0, i;
  335. cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
  336. plat_priv->driver_state);
  337. req = kzalloc(sizeof(*req), GFP_KERNEL);
  338. if (!req)
  339. return -ENOMEM;
  340. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  341. if (!resp) {
  342. kfree(req);
  343. return -ENOMEM;
  344. }
  345. if (plat_priv->fw_mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
  346. cnss_pr_err("Invalid seg len %u\n", plat_priv->fw_mem_seg_len);
  347. ret = -EINVAL;
  348. goto out;
  349. }
  350. req->mem_seg_len = plat_priv->fw_mem_seg_len;
  351. for (i = 0; i < req->mem_seg_len; i++) {
  352. if (!fw_mem[i].pa || !fw_mem[i].size) {
  353. if (fw_mem[i].type == 0) {
  354. cnss_pr_err("Invalid memory for FW type, segment = %d\n",
  355. i);
  356. ret = -EINVAL;
  357. goto out;
  358. }
  359. cnss_pr_err("Memory for FW is not available for type: %u\n",
  360. fw_mem[i].type);
  361. ret = -ENOMEM;
  362. goto out;
  363. }
  364. cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
  365. fw_mem[i].va, &fw_mem[i].pa,
  366. fw_mem[i].size, fw_mem[i].type);
  367. req->mem_seg[i].addr = fw_mem[i].pa;
  368. req->mem_seg[i].size = fw_mem[i].size;
  369. req->mem_seg[i].type = fw_mem[i].type;
  370. }
  371. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  372. wlfw_respond_mem_resp_msg_v01_ei, resp);
  373. if (ret < 0) {
  374. cnss_pr_err("Failed to initialize txn for respond memory request, err: %d\n",
  375. ret);
  376. goto out;
  377. }
  378. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  379. QMI_WLFW_RESPOND_MEM_REQ_V01,
  380. WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN,
  381. wlfw_respond_mem_req_msg_v01_ei, req);
  382. if (ret < 0) {
  383. qmi_txn_cancel(&txn);
  384. cnss_pr_err("Failed to send respond memory request, err: %d\n",
  385. ret);
  386. goto out;
  387. }
  388. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  389. if (ret < 0) {
  390. cnss_pr_err("Failed to wait for response of respond memory request, err: %d\n",
  391. ret);
  392. goto out;
  393. }
  394. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  395. cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
  396. resp->resp.result, resp->resp.error);
  397. ret = -resp->resp.result;
  398. goto out;
  399. }
  400. kfree(req);
  401. kfree(resp);
  402. return 0;
  403. out:
  404. CNSS_QMI_ASSERT();
  405. kfree(req);
  406. kfree(resp);
  407. return ret;
  408. }
  409. int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
  410. {
  411. struct wlfw_cap_req_msg_v01 *req;
  412. struct wlfw_cap_resp_msg_v01 *resp;
  413. struct qmi_txn txn;
  414. char *fw_build_timestamp;
  415. int ret = 0, i;
  416. cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
  417. plat_priv->driver_state);
  418. req = kzalloc(sizeof(*req), GFP_KERNEL);
  419. if (!req)
  420. return -ENOMEM;
  421. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  422. if (!resp) {
  423. kfree(req);
  424. return -ENOMEM;
  425. }
  426. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  427. wlfw_cap_resp_msg_v01_ei, resp);
  428. if (ret < 0) {
  429. cnss_pr_err("Failed to initialize txn for target capability request, err: %d\n",
  430. ret);
  431. goto out;
  432. }
  433. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  434. QMI_WLFW_CAP_REQ_V01,
  435. WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
  436. wlfw_cap_req_msg_v01_ei, req);
  437. if (ret < 0) {
  438. qmi_txn_cancel(&txn);
  439. cnss_pr_err("Failed to send respond target capability request, err: %d\n",
  440. ret);
  441. goto out;
  442. }
  443. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  444. if (ret < 0) {
  445. cnss_pr_err("Failed to wait for response of target capability request, err: %d\n",
  446. ret);
  447. goto out;
  448. }
  449. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  450. cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
  451. resp->resp.result, resp->resp.error);
  452. ret = -resp->resp.result;
  453. goto out;
  454. }
  455. if (resp->chip_info_valid) {
  456. plat_priv->chip_info.chip_id = resp->chip_info.chip_id;
  457. plat_priv->chip_info.chip_family = resp->chip_info.chip_family;
  458. }
  459. if (resp->board_info_valid)
  460. plat_priv->board_info.board_id = resp->board_info.board_id;
  461. else
  462. plat_priv->board_info.board_id = 0xFF;
  463. if (resp->soc_info_valid)
  464. plat_priv->soc_info.soc_id = resp->soc_info.soc_id;
  465. if (resp->fw_version_info_valid) {
  466. plat_priv->fw_version_info.fw_version =
  467. resp->fw_version_info.fw_version;
  468. fw_build_timestamp = resp->fw_version_info.fw_build_timestamp;
  469. fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN] = '\0';
  470. strlcpy(plat_priv->fw_version_info.fw_build_timestamp,
  471. resp->fw_version_info.fw_build_timestamp,
  472. QMI_WLFW_MAX_TIMESTAMP_LEN + 1);
  473. }
  474. if (resp->fw_build_id_valid) {
  475. resp->fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN] = '\0';
  476. strlcpy(plat_priv->fw_build_id, resp->fw_build_id,
  477. QMI_WLFW_MAX_BUILD_ID_LEN + 1);
  478. }
  479. /* FW will send aop retention volatage for qca6490 */
  480. if (resp->voltage_mv_valid) {
  481. plat_priv->cpr_info.voltage = resp->voltage_mv;
  482. cnss_pr_dbg("Voltage for CPR: %dmV\n",
  483. plat_priv->cpr_info.voltage);
  484. cnss_update_cpr_info(plat_priv);
  485. }
  486. if (resp->time_freq_hz_valid) {
  487. plat_priv->device_freq_hz = resp->time_freq_hz;
  488. cnss_pr_dbg("Device frequency is %d HZ\n",
  489. plat_priv->device_freq_hz);
  490. }
  491. if (resp->otp_version_valid)
  492. plat_priv->otp_version = resp->otp_version;
  493. if (resp->dev_mem_info_valid) {
  494. for (i = 0; i < QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
  495. plat_priv->dev_mem_info[i].start =
  496. resp->dev_mem_info[i].start;
  497. plat_priv->dev_mem_info[i].size =
  498. resp->dev_mem_info[i].size;
  499. cnss_pr_buf("Device memory info[%d]: start = 0x%llx, size = 0x%llx\n",
  500. i, plat_priv->dev_mem_info[i].start,
  501. plat_priv->dev_mem_info[i].size);
  502. }
  503. }
  504. if (resp->fw_caps_valid) {
  505. plat_priv->fw_pcie_gen_switch =
  506. !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01);
  507. plat_priv->fw_caps = resp->fw_caps;
  508. }
  509. if (resp->hang_data_length_valid &&
  510. resp->hang_data_length &&
  511. resp->hang_data_length <= WLFW_MAX_HANG_EVENT_DATA_SIZE)
  512. plat_priv->hang_event_data_len = resp->hang_data_length;
  513. else
  514. plat_priv->hang_event_data_len = 0;
  515. if (resp->hang_data_addr_offset_valid)
  516. plat_priv->hang_data_addr_offset = resp->hang_data_addr_offset;
  517. else
  518. plat_priv->hang_data_addr_offset = 0;
  519. if (resp->hwid_bitmap_valid)
  520. plat_priv->hwid_bitmap = resp->hwid_bitmap;
  521. if (resp->ol_cpr_cfg_valid)
  522. cnss_aop_ol_cpr_cfg_setup(plat_priv, &resp->ol_cpr_cfg);
  523. cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, otp_version: 0x%x\n",
  524. plat_priv->chip_info.chip_id,
  525. plat_priv->chip_info.chip_family,
  526. plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
  527. plat_priv->otp_version);
  528. cnss_pr_dbg("fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s, hwid_bitmap:0x%x\n",
  529. plat_priv->fw_version_info.fw_version,
  530. plat_priv->fw_version_info.fw_build_timestamp,
  531. plat_priv->fw_build_id,
  532. plat_priv->hwid_bitmap);
  533. cnss_pr_dbg("Hang event params, Length: 0x%x, Offset Address: 0x%x\n",
  534. plat_priv->hang_event_data_len,
  535. plat_priv->hang_data_addr_offset);
  536. kfree(req);
  537. kfree(resp);
  538. return 0;
  539. out:
  540. CNSS_QMI_ASSERT();
  541. kfree(req);
  542. kfree(resp);
  543. return ret;
  544. }
  545. static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv,
  546. u32 bdf_type, char *filename,
  547. u32 filename_len)
  548. {
  549. char filename_tmp[MAX_FIRMWARE_NAME_LEN];
  550. int ret = 0;
  551. switch (bdf_type) {
  552. case CNSS_BDF_ELF:
  553. /* Board ID will be equal or less than 0xFF in GF mask case */
  554. if (plat_priv->board_info.board_id == 0xFF) {
  555. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  556. snprintf(filename_tmp, filename_len,
  557. ELF_BDF_FILE_NAME_GF);
  558. else
  559. snprintf(filename_tmp, filename_len,
  560. ELF_BDF_FILE_NAME);
  561. } else if (plat_priv->board_info.board_id < 0xFF) {
  562. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  563. snprintf(filename_tmp, filename_len,
  564. ELF_BDF_FILE_NAME_GF_PREFIX "%02x",
  565. plat_priv->board_info.board_id);
  566. else
  567. snprintf(filename_tmp, filename_len,
  568. ELF_BDF_FILE_NAME_PREFIX "%02x",
  569. plat_priv->board_info.board_id);
  570. } else {
  571. snprintf(filename_tmp, filename_len,
  572. BDF_FILE_NAME_PREFIX "%02x.e%02x",
  573. plat_priv->board_info.board_id >> 8 & 0xFF,
  574. plat_priv->board_info.board_id & 0xFF);
  575. }
  576. break;
  577. case CNSS_BDF_BIN:
  578. if (plat_priv->board_info.board_id == 0xFF) {
  579. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  580. snprintf(filename_tmp, filename_len,
  581. BIN_BDF_FILE_NAME_GF);
  582. else
  583. snprintf(filename_tmp, filename_len,
  584. BIN_BDF_FILE_NAME);
  585. } else if (plat_priv->board_info.board_id < 0xFF) {
  586. if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
  587. snprintf(filename_tmp, filename_len,
  588. BIN_BDF_FILE_NAME_GF_PREFIX "%02x",
  589. plat_priv->board_info.board_id);
  590. else
  591. snprintf(filename_tmp, filename_len,
  592. BIN_BDF_FILE_NAME_PREFIX "%02x",
  593. plat_priv->board_info.board_id);
  594. } else {
  595. snprintf(filename_tmp, filename_len,
  596. BDF_FILE_NAME_PREFIX "%02x.b%02x",
  597. plat_priv->board_info.board_id >> 8 & 0xFF,
  598. plat_priv->board_info.board_id & 0xFF);
  599. }
  600. break;
  601. case CNSS_BDF_REGDB:
  602. snprintf(filename_tmp, filename_len, REGDB_FILE_NAME);
  603. break;
  604. case CNSS_BDF_HDS:
  605. snprintf(filename_tmp, filename_len, HDS_FILE_NAME);
  606. break;
  607. default:
  608. cnss_pr_err("Invalid BDF type: %d\n",
  609. plat_priv->ctrl_params.bdf_type);
  610. ret = -EINVAL;
  611. break;
  612. }
  613. if (!ret)
  614. cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
  615. return ret;
  616. }
  617. int cnss_wlfw_ini_file_send_sync(struct cnss_plat_data *plat_priv,
  618. enum wlfw_ini_file_type_v01 file_type)
  619. {
  620. struct wlfw_ini_file_download_req_msg_v01 *req;
  621. struct wlfw_ini_file_download_resp_msg_v01 *resp;
  622. struct qmi_txn txn;
  623. int ret = 0;
  624. const struct firmware *fw;
  625. char filename[INI_FILE_NAME_LEN] = {0};
  626. char tmp_filename[INI_FILE_NAME_LEN] = {0};
  627. const u8 *temp;
  628. unsigned int remaining;
  629. bool backup_supported = false;
  630. cnss_pr_info("INI File %u download\n", file_type);
  631. req = kzalloc(sizeof(*req), GFP_KERNEL);
  632. if (!req)
  633. return -ENOMEM;
  634. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  635. if (!resp) {
  636. kfree(req);
  637. return -ENOMEM;
  638. }
  639. switch (file_type) {
  640. case WLFW_CONN_ROAM_INI_V01:
  641. snprintf(tmp_filename, sizeof(tmp_filename),
  642. CONN_ROAM_FILE_NAME);
  643. backup_supported = true;
  644. break;
  645. default:
  646. cnss_pr_err("Invalid file type: %u\n", file_type);
  647. ret = -EINVAL;
  648. goto err_req_fw;
  649. }
  650. snprintf(filename, sizeof(filename), "%s%s", tmp_filename, INI_EXT);
  651. /* Fetch the file */
  652. ret = firmware_request_nowarn(&fw, filename, &plat_priv->plat_dev->dev);
  653. if (ret) {
  654. cnss_pr_err("Failed to get INI file %s (%d), Backup file: %s",
  655. filename, ret,
  656. backup_supported ? "Supported" : "Not Supported");
  657. if (!backup_supported)
  658. goto err_req_fw;
  659. snprintf(filename, sizeof(filename),
  660. "%s-%s%s", tmp_filename, "backup", INI_EXT);
  661. ret = firmware_request_nowarn(&fw, filename,
  662. &plat_priv->plat_dev->dev);
  663. if (ret) {
  664. cnss_pr_err("Failed to get INI file %s (%d)", filename,
  665. ret);
  666. goto err_req_fw;
  667. }
  668. }
  669. temp = fw->data;
  670. remaining = fw->size;
  671. cnss_pr_dbg("Downloading INI file: %s, size: %u\n", filename,
  672. remaining);
  673. while (remaining) {
  674. req->file_type_valid = 1;
  675. req->file_type = file_type;
  676. req->total_size_valid = 1;
  677. req->total_size = remaining;
  678. req->seg_id_valid = 1;
  679. req->data_valid = 1;
  680. req->end_valid = 1;
  681. if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
  682. req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
  683. } else {
  684. req->data_len = remaining;
  685. req->end = 1;
  686. }
  687. memcpy(req->data, temp, req->data_len);
  688. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  689. wlfw_ini_file_download_resp_msg_v01_ei,
  690. resp);
  691. if (ret < 0) {
  692. cnss_pr_err("Failed to initialize txn for INI file download request, err: %d\n",
  693. ret);
  694. goto err;
  695. }
  696. ret = qmi_send_request
  697. (&plat_priv->qmi_wlfw, NULL, &txn,
  698. QMI_WLFW_INI_FILE_DOWNLOAD_REQ_V01,
  699. WLFW_INI_FILE_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
  700. wlfw_ini_file_download_req_msg_v01_ei, req);
  701. if (ret < 0) {
  702. qmi_txn_cancel(&txn);
  703. cnss_pr_err("Failed to send INI File download request, err: %d\n",
  704. ret);
  705. goto err;
  706. }
  707. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  708. if (ret < 0) {
  709. cnss_pr_err("Failed to wait for response of INI File download request, err: %d\n",
  710. ret);
  711. goto err;
  712. }
  713. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  714. cnss_pr_err("INI file download request failed, result: %d, err: %d\n",
  715. resp->resp.result, resp->resp.error);
  716. ret = -resp->resp.result;
  717. goto err;
  718. }
  719. remaining -= req->data_len;
  720. temp += req->data_len;
  721. req->seg_id++;
  722. }
  723. release_firmware(fw);
  724. kfree(req);
  725. kfree(resp);
  726. return 0;
  727. err:
  728. release_firmware(fw);
  729. err_req_fw:
  730. kfree(req);
  731. kfree(resp);
  732. return ret;
  733. }
  734. int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
  735. u32 bdf_type)
  736. {
  737. struct wlfw_bdf_download_req_msg_v01 *req;
  738. struct wlfw_bdf_download_resp_msg_v01 *resp;
  739. struct qmi_txn txn;
  740. char filename[MAX_FIRMWARE_NAME_LEN];
  741. const struct firmware *fw_entry = NULL;
  742. const u8 *temp;
  743. unsigned int remaining;
  744. int ret = 0;
  745. cnss_pr_dbg("Sending BDF download message, state: 0x%lx, type: %d\n",
  746. plat_priv->driver_state, bdf_type);
  747. req = kzalloc(sizeof(*req), GFP_KERNEL);
  748. if (!req)
  749. return -ENOMEM;
  750. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  751. if (!resp) {
  752. kfree(req);
  753. return -ENOMEM;
  754. }
  755. ret = cnss_get_bdf_file_name(plat_priv, bdf_type,
  756. filename, sizeof(filename));
  757. if (ret)
  758. goto err_req_fw;
  759. if (bdf_type == CNSS_BDF_REGDB)
  760. ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
  761. filename);
  762. else
  763. ret = firmware_request_nowarn(&fw_entry, filename,
  764. &plat_priv->plat_dev->dev);
  765. if (ret) {
  766. cnss_pr_err("Failed to load BDF: %s, ret: %d\n", filename, ret);
  767. goto err_req_fw;
  768. }
  769. temp = fw_entry->data;
  770. remaining = fw_entry->size;
  771. cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining);
  772. while (remaining) {
  773. req->valid = 1;
  774. req->file_id_valid = 1;
  775. req->file_id = plat_priv->board_info.board_id;
  776. req->total_size_valid = 1;
  777. req->total_size = remaining;
  778. req->seg_id_valid = 1;
  779. req->data_valid = 1;
  780. req->end_valid = 1;
  781. req->bdf_type_valid = 1;
  782. req->bdf_type = bdf_type;
  783. if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
  784. req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
  785. } else {
  786. req->data_len = remaining;
  787. req->end = 1;
  788. }
  789. memcpy(req->data, temp, req->data_len);
  790. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  791. wlfw_bdf_download_resp_msg_v01_ei, resp);
  792. if (ret < 0) {
  793. cnss_pr_err("Failed to initialize txn for BDF download request, err: %d\n",
  794. ret);
  795. goto err_send;
  796. }
  797. ret = qmi_send_request
  798. (&plat_priv->qmi_wlfw, NULL, &txn,
  799. QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
  800. WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
  801. wlfw_bdf_download_req_msg_v01_ei, req);
  802. if (ret < 0) {
  803. qmi_txn_cancel(&txn);
  804. cnss_pr_err("Failed to send respond BDF download request, err: %d\n",
  805. ret);
  806. goto err_send;
  807. }
  808. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  809. if (ret < 0) {
  810. cnss_pr_err("Failed to wait for response of BDF download request, err: %d\n",
  811. ret);
  812. goto err_send;
  813. }
  814. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  815. cnss_pr_err("BDF download request failed, result: %d, err: %d\n",
  816. resp->resp.result, resp->resp.error);
  817. ret = -resp->resp.result;
  818. goto err_send;
  819. }
  820. remaining -= req->data_len;
  821. temp += req->data_len;
  822. req->seg_id++;
  823. }
  824. release_firmware(fw_entry);
  825. if (resp->host_bdf_data_valid) {
  826. /* QCA6490 enable S3E regulator for IPA configuration only */
  827. if (!(resp->host_bdf_data & QMI_WLFW_HW_XPA_V01))
  828. cnss_enable_int_pow_amp_vreg(plat_priv);
  829. plat_priv->cbc_file_download =
  830. resp->host_bdf_data & QMI_WLFW_CBC_FILE_DOWNLOAD_V01;
  831. cnss_pr_info("Host BDF config: HW_XPA: %d CalDB: %d\n",
  832. resp->host_bdf_data & QMI_WLFW_HW_XPA_V01,
  833. plat_priv->cbc_file_download);
  834. }
  835. kfree(req);
  836. kfree(resp);
  837. return 0;
  838. err_send:
  839. release_firmware(fw_entry);
  840. err_req_fw:
  841. if (!(bdf_type == CNSS_BDF_REGDB ||
  842. test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state) ||
  843. ret == -EAGAIN))
  844. CNSS_QMI_ASSERT();
  845. kfree(req);
  846. kfree(resp);
  847. return ret;
  848. }
  849. int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
  850. {
  851. struct wlfw_m3_info_req_msg_v01 *req;
  852. struct wlfw_m3_info_resp_msg_v01 *resp;
  853. struct qmi_txn txn;
  854. struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
  855. int ret = 0;
  856. cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
  857. plat_priv->driver_state);
  858. req = kzalloc(sizeof(*req), GFP_KERNEL);
  859. if (!req)
  860. return -ENOMEM;
  861. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  862. if (!resp) {
  863. kfree(req);
  864. return -ENOMEM;
  865. }
  866. if (!m3_mem->pa || !m3_mem->size) {
  867. cnss_pr_err("Memory for M3 is not available\n");
  868. ret = -ENOMEM;
  869. goto out;
  870. }
  871. cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
  872. m3_mem->va, &m3_mem->pa, m3_mem->size);
  873. req->addr = plat_priv->m3_mem.pa;
  874. req->size = plat_priv->m3_mem.size;
  875. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  876. wlfw_m3_info_resp_msg_v01_ei, resp);
  877. if (ret < 0) {
  878. cnss_pr_err("Failed to initialize txn for M3 information request, err: %d\n",
  879. ret);
  880. goto out;
  881. }
  882. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  883. QMI_WLFW_M3_INFO_REQ_V01,
  884. WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
  885. wlfw_m3_info_req_msg_v01_ei, req);
  886. if (ret < 0) {
  887. qmi_txn_cancel(&txn);
  888. cnss_pr_err("Failed to send M3 information request, err: %d\n",
  889. ret);
  890. goto out;
  891. }
  892. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  893. if (ret < 0) {
  894. cnss_pr_err("Failed to wait for response of M3 information request, err: %d\n",
  895. ret);
  896. goto out;
  897. }
  898. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  899. cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
  900. resp->resp.result, resp->resp.error);
  901. ret = -resp->resp.result;
  902. goto out;
  903. }
  904. kfree(req);
  905. kfree(resp);
  906. return 0;
  907. out:
  908. CNSS_QMI_ASSERT();
  909. kfree(req);
  910. kfree(resp);
  911. return ret;
  912. }
  913. int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
  914. u8 *mac, u32 mac_len)
  915. {
  916. struct wlfw_mac_addr_req_msg_v01 req;
  917. struct wlfw_mac_addr_resp_msg_v01 resp = {0};
  918. struct qmi_txn txn;
  919. int ret;
  920. if (!plat_priv || !mac || mac_len != QMI_WLFW_MAC_ADDR_SIZE_V01)
  921. return -EINVAL;
  922. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  923. wlfw_mac_addr_resp_msg_v01_ei, &resp);
  924. if (ret < 0) {
  925. cnss_pr_err("Failed to initialize txn for mac req, err: %d\n",
  926. ret);
  927. ret = -EIO;
  928. goto out;
  929. }
  930. cnss_pr_dbg("Sending WLAN mac req [%pM], state: 0x%lx\n",
  931. mac, plat_priv->driver_state);
  932. memcpy(req.mac_addr, mac, mac_len);
  933. req.mac_addr_valid = 1;
  934. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  935. QMI_WLFW_MAC_ADDR_REQ_V01,
  936. WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN,
  937. wlfw_mac_addr_req_msg_v01_ei, &req);
  938. if (ret < 0) {
  939. qmi_txn_cancel(&txn);
  940. cnss_pr_err("Failed to send mac req, err: %d\n", ret);
  941. ret = -EIO;
  942. goto out;
  943. }
  944. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  945. if (ret < 0) {
  946. cnss_pr_err("Failed to wait for resp of mac req, err: %d\n",
  947. ret);
  948. ret = -EIO;
  949. goto out;
  950. }
  951. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  952. cnss_pr_err("WLAN mac req failed, result: %d, err: %d\n",
  953. resp.resp.result);
  954. ret = -resp.resp.result;
  955. }
  956. out:
  957. return ret;
  958. }
  959. int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name,
  960. u32 total_size)
  961. {
  962. int ret = 0;
  963. struct wlfw_qdss_trace_data_req_msg_v01 *req;
  964. struct wlfw_qdss_trace_data_resp_msg_v01 *resp;
  965. unsigned char *p_qdss_trace_data_temp, *p_qdss_trace_data = NULL;
  966. unsigned int remaining;
  967. struct qmi_txn txn;
  968. cnss_pr_dbg("%s\n", __func__);
  969. req = kzalloc(sizeof(*req), GFP_KERNEL);
  970. if (!req)
  971. return -ENOMEM;
  972. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  973. if (!resp) {
  974. kfree(req);
  975. return -ENOMEM;
  976. }
  977. p_qdss_trace_data = kzalloc(total_size, GFP_KERNEL);
  978. if (!p_qdss_trace_data) {
  979. ret = ENOMEM;
  980. goto end;
  981. }
  982. remaining = total_size;
  983. p_qdss_trace_data_temp = p_qdss_trace_data;
  984. while (remaining && resp->end == 0) {
  985. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  986. wlfw_qdss_trace_data_resp_msg_v01_ei, resp);
  987. if (ret < 0) {
  988. cnss_pr_err("Fail to init txn for QDSS trace resp %d\n",
  989. ret);
  990. goto fail;
  991. }
  992. ret = qmi_send_request
  993. (&plat_priv->qmi_wlfw, NULL, &txn,
  994. QMI_WLFW_QDSS_TRACE_DATA_REQ_V01,
  995. WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN,
  996. wlfw_qdss_trace_data_req_msg_v01_ei, req);
  997. if (ret < 0) {
  998. qmi_txn_cancel(&txn);
  999. cnss_pr_err("Fail to send QDSS trace data req %d\n",
  1000. ret);
  1001. goto fail;
  1002. }
  1003. ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
  1004. if (ret < 0) {
  1005. cnss_pr_err("QDSS trace resp wait failed with rc %d\n",
  1006. ret);
  1007. goto fail;
  1008. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1009. cnss_pr_err("QMI QDSS trace request rejected, result:%d error:%d\n",
  1010. resp->resp.result, resp->resp.error);
  1011. ret = -resp->resp.result;
  1012. goto fail;
  1013. } else {
  1014. ret = 0;
  1015. }
  1016. cnss_pr_dbg("%s: response total size %d data len %d",
  1017. __func__, resp->total_size, resp->data_len);
  1018. if ((resp->total_size_valid == 1 &&
  1019. resp->total_size == total_size) &&
  1020. (resp->seg_id_valid == 1 && resp->seg_id == req->seg_id) &&
  1021. (resp->data_valid == 1 &&
  1022. resp->data_len <= QMI_WLFW_MAX_DATA_SIZE_V01) &&
  1023. resp->data_len <= remaining) {
  1024. memcpy(p_qdss_trace_data_temp,
  1025. resp->data, resp->data_len);
  1026. } else {
  1027. cnss_pr_err("%s: Unmatched qdss trace data, Expect total_size %u, seg_id %u, Recv total_size_valid %u, total_size %u, seg_id_valid %u, seg_id %u, data_len_valid %u, data_len %u",
  1028. __func__,
  1029. total_size, req->seg_id,
  1030. resp->total_size_valid,
  1031. resp->total_size,
  1032. resp->seg_id_valid,
  1033. resp->seg_id,
  1034. resp->data_valid,
  1035. resp->data_len);
  1036. ret = -1;
  1037. goto fail;
  1038. }
  1039. remaining -= resp->data_len;
  1040. p_qdss_trace_data_temp += resp->data_len;
  1041. req->seg_id++;
  1042. }
  1043. if (remaining == 0 && (resp->end_valid && resp->end)) {
  1044. ret = cnss_genl_send_msg(p_qdss_trace_data,
  1045. CNSS_GENL_MSG_TYPE_QDSS, file_name,
  1046. total_size);
  1047. if (ret < 0) {
  1048. cnss_pr_err("Fail to save QDSS trace data: %d\n",
  1049. ret);
  1050. ret = -1;
  1051. goto fail;
  1052. }
  1053. } else {
  1054. cnss_pr_err("%s: QDSS trace file corrupted: remaining %u, end_valid %u, end %u",
  1055. __func__,
  1056. remaining, resp->end_valid, resp->end);
  1057. ret = -1;
  1058. goto fail;
  1059. }
  1060. fail:
  1061. kfree(p_qdss_trace_data);
  1062. end:
  1063. kfree(req);
  1064. kfree(resp);
  1065. return ret;
  1066. }
  1067. void cnss_get_qdss_cfg_filename(struct cnss_plat_data *plat_priv,
  1068. char *filename, u32 filename_len)
  1069. {
  1070. char filename_tmp[MAX_FIRMWARE_NAME_LEN];
  1071. char *debug_str = QDSS_DEBUG_FILE_STR;
  1072. if (plat_priv->device_id == KIWI_DEVICE_ID ||
  1073. plat_priv->device_id == MANGO_DEVICE_ID)
  1074. debug_str = "";
  1075. if (plat_priv->device_version.major_version == FW_V2_NUMBER)
  1076. snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
  1077. "_%s%s.cfg", debug_str, HW_V2_NUMBER);
  1078. else
  1079. snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
  1080. "_%s%s.cfg", debug_str, HW_V1_NUMBER);
  1081. cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
  1082. }
  1083. int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv)
  1084. {
  1085. struct wlfw_qdss_trace_config_download_req_msg_v01 *req;
  1086. struct wlfw_qdss_trace_config_download_resp_msg_v01 *resp;
  1087. struct qmi_txn txn;
  1088. const struct firmware *fw_entry = NULL;
  1089. const u8 *temp;
  1090. char qdss_cfg_filename[MAX_FIRMWARE_NAME_LEN];
  1091. unsigned int remaining;
  1092. int ret = 0;
  1093. cnss_pr_dbg("Sending QDSS config download message, state: 0x%lx\n",
  1094. plat_priv->driver_state);
  1095. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1096. if (!req)
  1097. return -ENOMEM;
  1098. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1099. if (!resp) {
  1100. kfree(req);
  1101. return -ENOMEM;
  1102. }
  1103. cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename, sizeof(qdss_cfg_filename));
  1104. ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
  1105. qdss_cfg_filename);
  1106. if (ret) {
  1107. cnss_pr_dbg("Unable to load %s\n",
  1108. qdss_cfg_filename);
  1109. goto err_req_fw;
  1110. }
  1111. temp = fw_entry->data;
  1112. remaining = fw_entry->size;
  1113. cnss_pr_dbg("Downloading QDSS: %s, size: %u\n",
  1114. qdss_cfg_filename, remaining);
  1115. while (remaining) {
  1116. req->total_size_valid = 1;
  1117. req->total_size = remaining;
  1118. req->seg_id_valid = 1;
  1119. req->data_valid = 1;
  1120. req->end_valid = 1;
  1121. if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
  1122. req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
  1123. } else {
  1124. req->data_len = remaining;
  1125. req->end = 1;
  1126. }
  1127. memcpy(req->data, temp, req->data_len);
  1128. ret = qmi_txn_init
  1129. (&plat_priv->qmi_wlfw, &txn,
  1130. wlfw_qdss_trace_config_download_resp_msg_v01_ei,
  1131. resp);
  1132. if (ret < 0) {
  1133. cnss_pr_err("Failed to initialize txn for QDSS download request, err: %d\n",
  1134. ret);
  1135. goto err_send;
  1136. }
  1137. ret = qmi_send_request
  1138. (&plat_priv->qmi_wlfw, NULL, &txn,
  1139. QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01,
  1140. WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
  1141. wlfw_qdss_trace_config_download_req_msg_v01_ei, req);
  1142. if (ret < 0) {
  1143. qmi_txn_cancel(&txn);
  1144. cnss_pr_err("Failed to send respond QDSS download request, err: %d\n",
  1145. ret);
  1146. goto err_send;
  1147. }
  1148. ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
  1149. if (ret < 0) {
  1150. cnss_pr_err("Failed to wait for response of QDSS download request, err: %d\n",
  1151. ret);
  1152. goto err_send;
  1153. }
  1154. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1155. cnss_pr_err("QDSS download request failed, result: %d, err: %d\n",
  1156. resp->resp.result, resp->resp.error);
  1157. ret = -resp->resp.result;
  1158. goto err_send;
  1159. }
  1160. remaining -= req->data_len;
  1161. temp += req->data_len;
  1162. req->seg_id++;
  1163. }
  1164. release_firmware(fw_entry);
  1165. kfree(req);
  1166. kfree(resp);
  1167. return 0;
  1168. err_send:
  1169. release_firmware(fw_entry);
  1170. err_req_fw:
  1171. kfree(req);
  1172. kfree(resp);
  1173. return ret;
  1174. }
  1175. static int wlfw_send_qdss_trace_mode_req
  1176. (struct cnss_plat_data *plat_priv,
  1177. enum wlfw_qdss_trace_mode_enum_v01 mode,
  1178. unsigned long long option)
  1179. {
  1180. int rc = 0;
  1181. int tmp = 0;
  1182. struct wlfw_qdss_trace_mode_req_msg_v01 *req;
  1183. struct wlfw_qdss_trace_mode_resp_msg_v01 *resp;
  1184. struct qmi_txn txn;
  1185. if (!plat_priv)
  1186. return -ENODEV;
  1187. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1188. if (!req)
  1189. return -ENOMEM;
  1190. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1191. if (!resp) {
  1192. kfree(req);
  1193. return -ENOMEM;
  1194. }
  1195. req->mode_valid = 1;
  1196. req->mode = mode;
  1197. req->option_valid = 1;
  1198. req->option = option;
  1199. tmp = plat_priv->hw_trc_override;
  1200. req->hw_trc_disable_override_valid = 1;
  1201. req->hw_trc_disable_override =
  1202. (tmp > QMI_PARAM_DISABLE_V01 ? QMI_PARAM_DISABLE_V01 :
  1203. (tmp < 0 ? QMI_PARAM_INVALID_V01 : tmp));
  1204. cnss_pr_dbg("%s: mode %u, option %llu, hw_trc_disable_override: %u",
  1205. __func__, mode, option, req->hw_trc_disable_override);
  1206. rc = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1207. wlfw_qdss_trace_mode_resp_msg_v01_ei, resp);
  1208. if (rc < 0) {
  1209. cnss_pr_err("Fail to init txn for QDSS Mode resp %d\n",
  1210. rc);
  1211. goto out;
  1212. }
  1213. rc = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1214. QMI_WLFW_QDSS_TRACE_MODE_REQ_V01,
  1215. WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN,
  1216. wlfw_qdss_trace_mode_req_msg_v01_ei, req);
  1217. if (rc < 0) {
  1218. qmi_txn_cancel(&txn);
  1219. cnss_pr_err("Fail to send QDSS Mode req %d\n", rc);
  1220. goto out;
  1221. }
  1222. rc = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
  1223. if (rc < 0) {
  1224. cnss_pr_err("QDSS Mode resp wait failed with rc %d\n",
  1225. rc);
  1226. goto out;
  1227. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1228. cnss_pr_err("QMI QDSS Mode request rejected, result:%d error:%d\n",
  1229. resp->resp.result, resp->resp.error);
  1230. rc = -resp->resp.result;
  1231. goto out;
  1232. }
  1233. kfree(resp);
  1234. kfree(req);
  1235. return rc;
  1236. out:
  1237. kfree(resp);
  1238. kfree(req);
  1239. CNSS_QMI_ASSERT();
  1240. return rc;
  1241. }
  1242. int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv)
  1243. {
  1244. return wlfw_send_qdss_trace_mode_req(plat_priv,
  1245. QMI_WLFW_QDSS_TRACE_ON_V01, 0);
  1246. }
  1247. int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option)
  1248. {
  1249. return wlfw_send_qdss_trace_mode_req(plat_priv, QMI_WLFW_QDSS_TRACE_OFF_V01,
  1250. option);
  1251. }
  1252. int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
  1253. enum cnss_driver_mode mode)
  1254. {
  1255. struct wlfw_wlan_mode_req_msg_v01 *req;
  1256. struct wlfw_wlan_mode_resp_msg_v01 *resp;
  1257. struct qmi_txn txn;
  1258. int ret = 0;
  1259. if (!plat_priv)
  1260. return -ENODEV;
  1261. cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
  1262. cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
  1263. if (mode == CNSS_OFF &&
  1264. test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
  1265. cnss_pr_dbg("Recovery is in progress, ignore mode off request\n");
  1266. return 0;
  1267. }
  1268. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1269. if (!req)
  1270. return -ENOMEM;
  1271. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1272. if (!resp) {
  1273. kfree(req);
  1274. return -ENOMEM;
  1275. }
  1276. req->mode = (enum wlfw_driver_mode_enum_v01)mode;
  1277. req->hw_debug_valid = 1;
  1278. req->hw_debug = 0;
  1279. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1280. wlfw_wlan_mode_resp_msg_v01_ei, resp);
  1281. if (ret < 0) {
  1282. cnss_pr_err("Failed to initialize txn for mode request, mode: %s(%d), err: %d\n",
  1283. cnss_qmi_mode_to_str(mode), mode, ret);
  1284. goto out;
  1285. }
  1286. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1287. QMI_WLFW_WLAN_MODE_REQ_V01,
  1288. WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
  1289. wlfw_wlan_mode_req_msg_v01_ei, req);
  1290. if (ret < 0) {
  1291. qmi_txn_cancel(&txn);
  1292. cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
  1293. cnss_qmi_mode_to_str(mode), mode, ret);
  1294. goto out;
  1295. }
  1296. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1297. if (ret < 0) {
  1298. cnss_pr_err("Failed to wait for response of mode request, mode: %s(%d), err: %d\n",
  1299. cnss_qmi_mode_to_str(mode), mode, ret);
  1300. goto out;
  1301. }
  1302. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1303. cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
  1304. cnss_qmi_mode_to_str(mode), mode, resp->resp.result,
  1305. resp->resp.error);
  1306. ret = -resp->resp.result;
  1307. goto out;
  1308. }
  1309. kfree(req);
  1310. kfree(resp);
  1311. return 0;
  1312. out:
  1313. if (mode == CNSS_OFF) {
  1314. cnss_pr_dbg("WLFW service is disconnected while sending mode off request\n");
  1315. ret = 0;
  1316. } else {
  1317. CNSS_QMI_ASSERT();
  1318. }
  1319. kfree(req);
  1320. kfree(resp);
  1321. return ret;
  1322. }
  1323. int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
  1324. struct cnss_wlan_enable_cfg *config,
  1325. const char *host_version)
  1326. {
  1327. struct wlfw_wlan_cfg_req_msg_v01 *req;
  1328. struct wlfw_wlan_cfg_resp_msg_v01 *resp;
  1329. struct qmi_txn txn;
  1330. u32 i;
  1331. int ret = 0;
  1332. if (!plat_priv)
  1333. return -ENODEV;
  1334. cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
  1335. plat_priv->driver_state);
  1336. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1337. if (!req)
  1338. return -ENOMEM;
  1339. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1340. if (!resp) {
  1341. kfree(req);
  1342. return -ENOMEM;
  1343. }
  1344. req->host_version_valid = 1;
  1345. strlcpy(req->host_version, host_version,
  1346. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  1347. req->tgt_cfg_valid = 1;
  1348. if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
  1349. req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
  1350. else
  1351. req->tgt_cfg_len = config->num_ce_tgt_cfg;
  1352. for (i = 0; i < req->tgt_cfg_len; i++) {
  1353. req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
  1354. req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
  1355. req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
  1356. req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
  1357. req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
  1358. }
  1359. req->svc_cfg_valid = 1;
  1360. if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
  1361. req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
  1362. else
  1363. req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
  1364. for (i = 0; i < req->svc_cfg_len; i++) {
  1365. req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
  1366. req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
  1367. req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
  1368. }
  1369. if (plat_priv->device_id != KIWI_DEVICE_ID &&
  1370. plat_priv->device_id != MANGO_DEVICE_ID) {
  1371. req->shadow_reg_v2_valid = 1;
  1372. if (config->num_shadow_reg_v2_cfg >
  1373. QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
  1374. req->shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
  1375. else
  1376. req->shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
  1377. memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg,
  1378. sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
  1379. * req->shadow_reg_v2_len);
  1380. } else {
  1381. req->shadow_reg_v3_valid = 1;
  1382. if (config->num_shadow_reg_v3_cfg >
  1383. MAX_NUM_SHADOW_REG_V3)
  1384. req->shadow_reg_v3_len = MAX_NUM_SHADOW_REG_V3;
  1385. else
  1386. req->shadow_reg_v3_len = config->num_shadow_reg_v3_cfg;
  1387. plat_priv->num_shadow_regs_v3 = req->shadow_reg_v3_len;
  1388. cnss_pr_dbg("Shadow reg v3 len: %d\n",
  1389. plat_priv->num_shadow_regs_v3);
  1390. memcpy(req->shadow_reg_v3, config->shadow_reg_v3_cfg,
  1391. sizeof(struct wlfw_shadow_reg_v3_cfg_s_v01)
  1392. * req->shadow_reg_v3_len);
  1393. }
  1394. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1395. wlfw_wlan_cfg_resp_msg_v01_ei, resp);
  1396. if (ret < 0) {
  1397. cnss_pr_err("Failed to initialize txn for WLAN config request, err: %d\n",
  1398. ret);
  1399. goto out;
  1400. }
  1401. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1402. QMI_WLFW_WLAN_CFG_REQ_V01,
  1403. WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
  1404. wlfw_wlan_cfg_req_msg_v01_ei, req);
  1405. if (ret < 0) {
  1406. qmi_txn_cancel(&txn);
  1407. cnss_pr_err("Failed to send WLAN config request, err: %d\n",
  1408. ret);
  1409. goto out;
  1410. }
  1411. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1412. if (ret < 0) {
  1413. cnss_pr_err("Failed to wait for response of WLAN config request, err: %d\n",
  1414. ret);
  1415. goto out;
  1416. }
  1417. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1418. cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
  1419. resp->resp.result, resp->resp.error);
  1420. ret = -resp->resp.result;
  1421. goto out;
  1422. }
  1423. kfree(req);
  1424. kfree(resp);
  1425. return 0;
  1426. out:
  1427. CNSS_QMI_ASSERT();
  1428. kfree(req);
  1429. kfree(resp);
  1430. return ret;
  1431. }
  1432. int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
  1433. u32 offset, u32 mem_type,
  1434. u32 data_len, u8 *data)
  1435. {
  1436. struct wlfw_athdiag_read_req_msg_v01 *req;
  1437. struct wlfw_athdiag_read_resp_msg_v01 *resp;
  1438. struct qmi_txn txn;
  1439. int ret = 0;
  1440. if (!plat_priv)
  1441. return -ENODEV;
  1442. if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
  1443. cnss_pr_err("Invalid parameters for athdiag read: data %pK, data_len %u\n",
  1444. data, data_len);
  1445. return -EINVAL;
  1446. }
  1447. cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
  1448. plat_priv->driver_state, offset, mem_type, data_len);
  1449. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1450. if (!req)
  1451. return -ENOMEM;
  1452. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1453. if (!resp) {
  1454. kfree(req);
  1455. return -ENOMEM;
  1456. }
  1457. req->offset = offset;
  1458. req->mem_type = mem_type;
  1459. req->data_len = data_len;
  1460. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1461. wlfw_athdiag_read_resp_msg_v01_ei, resp);
  1462. if (ret < 0) {
  1463. cnss_pr_err("Failed to initialize txn for athdiag read request, err: %d\n",
  1464. ret);
  1465. goto out;
  1466. }
  1467. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1468. QMI_WLFW_ATHDIAG_READ_REQ_V01,
  1469. WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN,
  1470. wlfw_athdiag_read_req_msg_v01_ei, req);
  1471. if (ret < 0) {
  1472. qmi_txn_cancel(&txn);
  1473. cnss_pr_err("Failed to send athdiag read request, err: %d\n",
  1474. ret);
  1475. goto out;
  1476. }
  1477. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1478. if (ret < 0) {
  1479. cnss_pr_err("Failed to wait for response of athdiag read request, err: %d\n",
  1480. ret);
  1481. goto out;
  1482. }
  1483. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1484. cnss_pr_err("Athdiag read request failed, result: %d, err: %d\n",
  1485. resp->resp.result, resp->resp.error);
  1486. ret = -resp->resp.result;
  1487. goto out;
  1488. }
  1489. if (!resp->data_valid || resp->data_len != data_len) {
  1490. cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
  1491. resp->data_valid, resp->data_len);
  1492. ret = -EINVAL;
  1493. goto out;
  1494. }
  1495. memcpy(data, resp->data, resp->data_len);
  1496. kfree(req);
  1497. kfree(resp);
  1498. return 0;
  1499. out:
  1500. kfree(req);
  1501. kfree(resp);
  1502. return ret;
  1503. }
  1504. int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
  1505. u32 offset, u32 mem_type,
  1506. u32 data_len, u8 *data)
  1507. {
  1508. struct wlfw_athdiag_write_req_msg_v01 *req;
  1509. struct wlfw_athdiag_write_resp_msg_v01 *resp;
  1510. struct qmi_txn txn;
  1511. int ret = 0;
  1512. if (!plat_priv)
  1513. return -ENODEV;
  1514. if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
  1515. cnss_pr_err("Invalid parameters for athdiag write: data %pK, data_len %u\n",
  1516. data, data_len);
  1517. return -EINVAL;
  1518. }
  1519. cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %pK\n",
  1520. plat_priv->driver_state, offset, mem_type, data_len, data);
  1521. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1522. if (!req)
  1523. return -ENOMEM;
  1524. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1525. if (!resp) {
  1526. kfree(req);
  1527. return -ENOMEM;
  1528. }
  1529. req->offset = offset;
  1530. req->mem_type = mem_type;
  1531. req->data_len = data_len;
  1532. memcpy(req->data, data, data_len);
  1533. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1534. wlfw_athdiag_write_resp_msg_v01_ei, resp);
  1535. if (ret < 0) {
  1536. cnss_pr_err("Failed to initialize txn for athdiag write request, err: %d\n",
  1537. ret);
  1538. goto out;
  1539. }
  1540. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1541. QMI_WLFW_ATHDIAG_WRITE_REQ_V01,
  1542. WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
  1543. wlfw_athdiag_write_req_msg_v01_ei, req);
  1544. if (ret < 0) {
  1545. qmi_txn_cancel(&txn);
  1546. cnss_pr_err("Failed to send athdiag write request, err: %d\n",
  1547. ret);
  1548. goto out;
  1549. }
  1550. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1551. if (ret < 0) {
  1552. cnss_pr_err("Failed to wait for response of athdiag write request, err: %d\n",
  1553. ret);
  1554. goto out;
  1555. }
  1556. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1557. cnss_pr_err("Athdiag write request failed, result: %d, err: %d\n",
  1558. resp->resp.result, resp->resp.error);
  1559. ret = -resp->resp.result;
  1560. goto out;
  1561. }
  1562. kfree(req);
  1563. kfree(resp);
  1564. return 0;
  1565. out:
  1566. kfree(req);
  1567. kfree(resp);
  1568. return ret;
  1569. }
  1570. int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
  1571. u8 fw_log_mode)
  1572. {
  1573. struct wlfw_ini_req_msg_v01 *req;
  1574. struct wlfw_ini_resp_msg_v01 *resp;
  1575. struct qmi_txn txn;
  1576. int ret = 0;
  1577. if (!plat_priv)
  1578. return -ENODEV;
  1579. cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
  1580. plat_priv->driver_state, fw_log_mode);
  1581. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1582. if (!req)
  1583. return -ENOMEM;
  1584. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1585. if (!resp) {
  1586. kfree(req);
  1587. return -ENOMEM;
  1588. }
  1589. req->enablefwlog_valid = 1;
  1590. req->enablefwlog = fw_log_mode;
  1591. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1592. wlfw_ini_resp_msg_v01_ei, resp);
  1593. if (ret < 0) {
  1594. cnss_pr_err("Failed to initialize txn for ini request, fw_log_mode: %d, err: %d\n",
  1595. fw_log_mode, ret);
  1596. goto out;
  1597. }
  1598. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1599. QMI_WLFW_INI_REQ_V01,
  1600. WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
  1601. wlfw_ini_req_msg_v01_ei, req);
  1602. if (ret < 0) {
  1603. qmi_txn_cancel(&txn);
  1604. cnss_pr_err("Failed to send ini request, fw_log_mode: %d, err: %d\n",
  1605. fw_log_mode, ret);
  1606. goto out;
  1607. }
  1608. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1609. if (ret < 0) {
  1610. cnss_pr_err("Failed to wait for response of ini request, fw_log_mode: %d, err: %d\n",
  1611. fw_log_mode, ret);
  1612. goto out;
  1613. }
  1614. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1615. cnss_pr_err("Ini request failed, fw_log_mode: %d, result: %d, err: %d\n",
  1616. fw_log_mode, resp->resp.result, resp->resp.error);
  1617. ret = -resp->resp.result;
  1618. goto out;
  1619. }
  1620. kfree(req);
  1621. kfree(resp);
  1622. return 0;
  1623. out:
  1624. kfree(req);
  1625. kfree(resp);
  1626. return ret;
  1627. }
  1628. int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv)
  1629. {
  1630. struct wlfw_pcie_gen_switch_req_msg_v01 req;
  1631. struct wlfw_pcie_gen_switch_resp_msg_v01 resp = {0};
  1632. struct qmi_txn txn;
  1633. int ret = 0;
  1634. if (!plat_priv)
  1635. return -ENODEV;
  1636. if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 ||
  1637. !plat_priv->fw_pcie_gen_switch) {
  1638. cnss_pr_dbg("PCIE Gen speed not setup\n");
  1639. return 0;
  1640. }
  1641. cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n",
  1642. plat_priv->pcie_gen_speed, plat_priv->driver_state);
  1643. req.pcie_speed = (enum wlfw_pcie_gen_speed_v01)
  1644. plat_priv->pcie_gen_speed;
  1645. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1646. wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp);
  1647. if (ret < 0) {
  1648. cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n",
  1649. ret);
  1650. goto out;
  1651. }
  1652. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1653. QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01,
  1654. WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
  1655. wlfw_pcie_gen_switch_req_msg_v01_ei, &req);
  1656. if (ret < 0) {
  1657. qmi_txn_cancel(&txn);
  1658. cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret);
  1659. goto out;
  1660. }
  1661. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1662. if (ret < 0) {
  1663. cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n",
  1664. ret);
  1665. goto out;
  1666. }
  1667. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  1668. cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n",
  1669. plat_priv->pcie_gen_speed, resp.resp.result,
  1670. resp.resp.error);
  1671. ret = -resp.resp.result;
  1672. }
  1673. out:
  1674. /* Reset PCIE Gen speed after one time use */
  1675. plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01;
  1676. return ret;
  1677. }
  1678. int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv)
  1679. {
  1680. struct wlfw_antenna_switch_req_msg_v01 *req;
  1681. struct wlfw_antenna_switch_resp_msg_v01 *resp;
  1682. struct qmi_txn txn;
  1683. int ret = 0;
  1684. if (!plat_priv)
  1685. return -ENODEV;
  1686. cnss_pr_dbg("Sending antenna switch sync request, state: 0x%lx\n",
  1687. plat_priv->driver_state);
  1688. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1689. if (!req)
  1690. return -ENOMEM;
  1691. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1692. if (!resp) {
  1693. kfree(req);
  1694. return -ENOMEM;
  1695. }
  1696. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1697. wlfw_antenna_switch_resp_msg_v01_ei, resp);
  1698. if (ret < 0) {
  1699. cnss_pr_err("Failed to initialize txn for antenna switch request, err: %d\n",
  1700. ret);
  1701. goto out;
  1702. }
  1703. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1704. QMI_WLFW_ANTENNA_SWITCH_REQ_V01,
  1705. WLFW_ANTENNA_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
  1706. wlfw_antenna_switch_req_msg_v01_ei, req);
  1707. if (ret < 0) {
  1708. qmi_txn_cancel(&txn);
  1709. cnss_pr_err("Failed to send antenna switch request, err: %d\n",
  1710. ret);
  1711. goto out;
  1712. }
  1713. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1714. if (ret < 0) {
  1715. cnss_pr_err("Failed to wait for response of antenna switch request, err: %d\n",
  1716. ret);
  1717. goto out;
  1718. }
  1719. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1720. cnss_pr_dbg("Antenna switch request failed, result: %d, err: %d\n",
  1721. resp->resp.result, resp->resp.error);
  1722. ret = -resp->resp.result;
  1723. goto out;
  1724. }
  1725. if (resp->antenna_valid)
  1726. plat_priv->antenna = resp->antenna;
  1727. cnss_pr_dbg("Antenna valid: %u, antenna 0x%llx\n",
  1728. resp->antenna_valid, resp->antenna);
  1729. kfree(req);
  1730. kfree(resp);
  1731. return 0;
  1732. out:
  1733. kfree(req);
  1734. kfree(resp);
  1735. return ret;
  1736. }
  1737. int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
  1738. {
  1739. struct wlfw_antenna_grant_req_msg_v01 *req;
  1740. struct wlfw_antenna_grant_resp_msg_v01 *resp;
  1741. struct qmi_txn txn;
  1742. int ret = 0;
  1743. if (!plat_priv)
  1744. return -ENODEV;
  1745. cnss_pr_dbg("Sending antenna grant sync request, state: 0x%lx, grant 0x%llx\n",
  1746. plat_priv->driver_state, plat_priv->grant);
  1747. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1748. if (!req)
  1749. return -ENOMEM;
  1750. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1751. if (!resp) {
  1752. kfree(req);
  1753. return -ENOMEM;
  1754. }
  1755. req->grant_valid = 1;
  1756. req->grant = plat_priv->grant;
  1757. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1758. wlfw_antenna_grant_resp_msg_v01_ei, resp);
  1759. if (ret < 0) {
  1760. cnss_pr_err("Failed to initialize txn for antenna grant request, err: %d\n",
  1761. ret);
  1762. goto out;
  1763. }
  1764. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1765. QMI_WLFW_ANTENNA_GRANT_REQ_V01,
  1766. WLFW_ANTENNA_GRANT_REQ_MSG_V01_MAX_MSG_LEN,
  1767. wlfw_antenna_grant_req_msg_v01_ei, req);
  1768. if (ret < 0) {
  1769. qmi_txn_cancel(&txn);
  1770. cnss_pr_err("Failed to send antenna grant request, err: %d\n",
  1771. ret);
  1772. goto out;
  1773. }
  1774. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1775. if (ret < 0) {
  1776. cnss_pr_err("Failed to wait for response of antenna grant request, err: %d\n",
  1777. ret);
  1778. goto out;
  1779. }
  1780. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1781. cnss_pr_err("Antenna grant request failed, result: %d, err: %d\n",
  1782. resp->resp.result, resp->resp.error);
  1783. ret = -resp->resp.result;
  1784. goto out;
  1785. }
  1786. kfree(req);
  1787. kfree(resp);
  1788. return 0;
  1789. out:
  1790. kfree(req);
  1791. kfree(resp);
  1792. return ret;
  1793. }
  1794. int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
  1795. {
  1796. struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
  1797. struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
  1798. struct qmi_txn txn;
  1799. struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
  1800. int ret = 0;
  1801. int i;
  1802. cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
  1803. plat_priv->driver_state);
  1804. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1805. if (!req)
  1806. return -ENOMEM;
  1807. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1808. if (!resp) {
  1809. kfree(req);
  1810. return -ENOMEM;
  1811. }
  1812. if (plat_priv->qdss_mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
  1813. cnss_pr_err("Invalid seg len %u\n", plat_priv->qdss_mem_seg_len);
  1814. ret = -EINVAL;
  1815. goto out;
  1816. }
  1817. req->mem_seg_len = plat_priv->qdss_mem_seg_len;
  1818. for (i = 0; i < req->mem_seg_len; i++) {
  1819. cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
  1820. qdss_mem[i].va, &qdss_mem[i].pa,
  1821. qdss_mem[i].size, qdss_mem[i].type);
  1822. req->mem_seg[i].addr = qdss_mem[i].pa;
  1823. req->mem_seg[i].size = qdss_mem[i].size;
  1824. req->mem_seg[i].type = qdss_mem[i].type;
  1825. }
  1826. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1827. wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
  1828. if (ret < 0) {
  1829. cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
  1830. ret);
  1831. goto out;
  1832. }
  1833. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1834. QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
  1835. WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
  1836. wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
  1837. if (ret < 0) {
  1838. qmi_txn_cancel(&txn);
  1839. cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
  1840. ret);
  1841. goto out;
  1842. }
  1843. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1844. if (ret < 0) {
  1845. cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
  1846. ret);
  1847. goto out;
  1848. }
  1849. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1850. cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
  1851. resp->resp.result, resp->resp.error);
  1852. ret = -resp->resp.result;
  1853. goto out;
  1854. }
  1855. kfree(req);
  1856. kfree(resp);
  1857. return 0;
  1858. out:
  1859. kfree(req);
  1860. kfree(resp);
  1861. return ret;
  1862. }
  1863. int cnss_wlfw_send_host_wfc_call_status(struct cnss_plat_data *plat_priv,
  1864. struct cnss_wfc_cfg cfg)
  1865. {
  1866. struct wlfw_wfc_call_status_req_msg_v01 *req;
  1867. struct wlfw_wfc_call_status_resp_msg_v01 *resp;
  1868. struct qmi_txn txn;
  1869. int ret = 0;
  1870. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  1871. cnss_pr_err("Drop host WFC indication as FW not initialized\n");
  1872. return -EINVAL;
  1873. }
  1874. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1875. if (!req)
  1876. return -ENOMEM;
  1877. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1878. if (!resp) {
  1879. kfree(req);
  1880. return -ENOMEM;
  1881. }
  1882. req->wfc_call_active_valid = 1;
  1883. req->wfc_call_active = cfg.mode;
  1884. cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n",
  1885. plat_priv->driver_state);
  1886. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1887. wlfw_wfc_call_status_resp_msg_v01_ei, resp);
  1888. if (ret < 0) {
  1889. cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n",
  1890. ret);
  1891. goto out;
  1892. }
  1893. cnss_pr_dbg("Send WFC Mode: %d\n", cfg.mode);
  1894. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1895. QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
  1896. WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
  1897. wlfw_wfc_call_status_req_msg_v01_ei, req);
  1898. if (ret < 0) {
  1899. qmi_txn_cancel(&txn);
  1900. cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n",
  1901. ret);
  1902. goto out;
  1903. }
  1904. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1905. if (ret < 0) {
  1906. cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n",
  1907. ret);
  1908. goto out;
  1909. }
  1910. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1911. cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n",
  1912. resp->resp.result, resp->resp.error);
  1913. ret = -EINVAL;
  1914. goto out;
  1915. }
  1916. ret = 0;
  1917. out:
  1918. kfree(req);
  1919. kfree(resp);
  1920. return ret;
  1921. }
  1922. static int cnss_wlfw_wfc_call_status_send_sync
  1923. (struct cnss_plat_data *plat_priv,
  1924. const struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg)
  1925. {
  1926. struct wlfw_wfc_call_status_req_msg_v01 *req;
  1927. struct wlfw_wfc_call_status_resp_msg_v01 *resp;
  1928. struct qmi_txn txn;
  1929. int ret = 0;
  1930. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  1931. cnss_pr_err("Drop IMS WFC indication as FW not initialized\n");
  1932. return -EINVAL;
  1933. }
  1934. req = kzalloc(sizeof(*req), GFP_KERNEL);
  1935. if (!req)
  1936. return -ENOMEM;
  1937. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  1938. if (!resp) {
  1939. kfree(req);
  1940. return -ENOMEM;
  1941. }
  1942. /**
  1943. * WFC Call r1 design has CNSS as pass thru using opaque hex buffer.
  1944. * But in r2 update QMI structure is expanded and as an effect qmi
  1945. * decoded structures have padding. Thus we cannot use buffer design.
  1946. * For backward compatibility for r1 design copy only wfc_call_active
  1947. * value in hex buffer.
  1948. */
  1949. req->wfc_call_status_len = sizeof(ind_msg->wfc_call_active);
  1950. req->wfc_call_status[0] = ind_msg->wfc_call_active;
  1951. /* wfc_call_active is mandatory in IMS indication */
  1952. req->wfc_call_active_valid = 1;
  1953. req->wfc_call_active = ind_msg->wfc_call_active;
  1954. req->all_wfc_calls_held_valid = ind_msg->all_wfc_calls_held_valid;
  1955. req->all_wfc_calls_held = ind_msg->all_wfc_calls_held;
  1956. req->is_wfc_emergency_valid = ind_msg->is_wfc_emergency_valid;
  1957. req->is_wfc_emergency = ind_msg->is_wfc_emergency;
  1958. req->twt_ims_start_valid = ind_msg->twt_ims_start_valid;
  1959. req->twt_ims_start = ind_msg->twt_ims_start;
  1960. req->twt_ims_int_valid = ind_msg->twt_ims_int_valid;
  1961. req->twt_ims_int = ind_msg->twt_ims_int;
  1962. req->media_quality_valid = ind_msg->media_quality_valid;
  1963. req->media_quality =
  1964. (enum wlfw_wfc_media_quality_v01)ind_msg->media_quality;
  1965. cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n",
  1966. plat_priv->driver_state);
  1967. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  1968. wlfw_wfc_call_status_resp_msg_v01_ei, resp);
  1969. if (ret < 0) {
  1970. cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n",
  1971. ret);
  1972. goto out;
  1973. }
  1974. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  1975. QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
  1976. WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
  1977. wlfw_wfc_call_status_req_msg_v01_ei, req);
  1978. if (ret < 0) {
  1979. qmi_txn_cancel(&txn);
  1980. cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n",
  1981. ret);
  1982. goto out;
  1983. }
  1984. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  1985. if (ret < 0) {
  1986. cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n",
  1987. ret);
  1988. goto out;
  1989. }
  1990. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  1991. cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n",
  1992. resp->resp.result, resp->resp.error);
  1993. ret = -resp->resp.result;
  1994. goto out;
  1995. }
  1996. ret = 0;
  1997. out:
  1998. kfree(req);
  1999. kfree(resp);
  2000. return ret;
  2001. }
  2002. int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
  2003. {
  2004. struct wlfw_dynamic_feature_mask_req_msg_v01 *req;
  2005. struct wlfw_dynamic_feature_mask_resp_msg_v01 *resp;
  2006. struct qmi_txn txn;
  2007. int ret = 0;
  2008. cnss_pr_dbg("Sending dynamic feature mask 0x%llx, state: 0x%lx\n",
  2009. plat_priv->dynamic_feature,
  2010. plat_priv->driver_state);
  2011. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2012. if (!req)
  2013. return -ENOMEM;
  2014. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2015. if (!resp) {
  2016. kfree(req);
  2017. return -ENOMEM;
  2018. }
  2019. req->mask_valid = 1;
  2020. req->mask = plat_priv->dynamic_feature;
  2021. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  2022. wlfw_dynamic_feature_mask_resp_msg_v01_ei, resp);
  2023. if (ret < 0) {
  2024. cnss_pr_err("Fail to initialize txn for dynamic feature mask request: err %d\n",
  2025. ret);
  2026. goto out;
  2027. }
  2028. ret = qmi_send_request
  2029. (&plat_priv->qmi_wlfw, NULL, &txn,
  2030. QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01,
  2031. WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN,
  2032. wlfw_dynamic_feature_mask_req_msg_v01_ei, req);
  2033. if (ret < 0) {
  2034. qmi_txn_cancel(&txn);
  2035. cnss_pr_err("Fail to send dynamic feature mask request: err %d\n",
  2036. ret);
  2037. goto out;
  2038. }
  2039. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2040. if (ret < 0) {
  2041. cnss_pr_err("Fail to wait for response of dynamic feature mask request, err %d\n",
  2042. ret);
  2043. goto out;
  2044. }
  2045. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2046. cnss_pr_err("Dynamic feature mask request failed, result: %d, err: %d\n",
  2047. resp->resp.result, resp->resp.error);
  2048. ret = -resp->resp.result;
  2049. goto out;
  2050. }
  2051. out:
  2052. kfree(req);
  2053. kfree(resp);
  2054. return ret;
  2055. }
  2056. int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
  2057. void *cmd, int cmd_len)
  2058. {
  2059. struct wlfw_get_info_req_msg_v01 *req;
  2060. struct wlfw_get_info_resp_msg_v01 *resp;
  2061. struct qmi_txn txn;
  2062. int ret = 0;
  2063. cnss_pr_buf("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n",
  2064. type, cmd_len, plat_priv->driver_state);
  2065. if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
  2066. return -EINVAL;
  2067. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2068. if (!req)
  2069. return -ENOMEM;
  2070. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2071. if (!resp) {
  2072. kfree(req);
  2073. return -ENOMEM;
  2074. }
  2075. req->type = type;
  2076. req->data_len = cmd_len;
  2077. memcpy(req->data, cmd, req->data_len);
  2078. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  2079. wlfw_get_info_resp_msg_v01_ei, resp);
  2080. if (ret < 0) {
  2081. cnss_pr_err("Failed to initialize txn for get info request, err: %d\n",
  2082. ret);
  2083. goto out;
  2084. }
  2085. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  2086. QMI_WLFW_GET_INFO_REQ_V01,
  2087. WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN,
  2088. wlfw_get_info_req_msg_v01_ei, req);
  2089. if (ret < 0) {
  2090. qmi_txn_cancel(&txn);
  2091. cnss_pr_err("Failed to send get info request, err: %d\n",
  2092. ret);
  2093. goto out;
  2094. }
  2095. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2096. if (ret < 0) {
  2097. cnss_pr_err("Failed to wait for response of get info request, err: %d\n",
  2098. ret);
  2099. goto out;
  2100. }
  2101. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2102. cnss_pr_err("Get info request failed, result: %d, err: %d\n",
  2103. resp->resp.result, resp->resp.error);
  2104. ret = -resp->resp.result;
  2105. goto out;
  2106. }
  2107. kfree(req);
  2108. kfree(resp);
  2109. return 0;
  2110. out:
  2111. kfree(req);
  2112. kfree(resp);
  2113. return ret;
  2114. }
  2115. unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
  2116. {
  2117. return QMI_WLFW_TIMEOUT_MS;
  2118. }
  2119. static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
  2120. struct sockaddr_qrtr *sq,
  2121. struct qmi_txn *txn, const void *data)
  2122. {
  2123. struct cnss_plat_data *plat_priv =
  2124. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2125. const struct wlfw_request_mem_ind_msg_v01 *ind_msg = data;
  2126. int i;
  2127. cnss_pr_dbg("Received QMI WLFW request memory indication\n");
  2128. if (!txn) {
  2129. cnss_pr_err("Spurious indication\n");
  2130. return;
  2131. }
  2132. if (ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
  2133. cnss_pr_err("Invalid seg len %u\n", ind_msg->mem_seg_len);
  2134. return;
  2135. }
  2136. plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len;
  2137. for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
  2138. cnss_pr_dbg("FW requests for memory, size: 0x%x, type: %u\n",
  2139. ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
  2140. plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
  2141. plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
  2142. if (!plat_priv->fw_mem[i].va &&
  2143. plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
  2144. plat_priv->fw_mem[i].attrs |=
  2145. DMA_ATTR_FORCE_CONTIGUOUS;
  2146. if (plat_priv->fw_mem[i].type == CNSS_MEM_CAL_V01)
  2147. plat_priv->cal_mem = &plat_priv->fw_mem[i];
  2148. }
  2149. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
  2150. 0, NULL);
  2151. }
  2152. static void cnss_wlfw_fw_mem_ready_ind_cb(struct qmi_handle *qmi_wlfw,
  2153. struct sockaddr_qrtr *sq,
  2154. struct qmi_txn *txn, const void *data)
  2155. {
  2156. struct cnss_plat_data *plat_priv =
  2157. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2158. cnss_pr_dbg("Received QMI WLFW FW memory ready indication\n");
  2159. if (!txn) {
  2160. cnss_pr_err("Spurious indication\n");
  2161. return;
  2162. }
  2163. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_READY,
  2164. 0, NULL);
  2165. }
  2166. /**
  2167. * cnss_wlfw_fw_ready_ind_cb: FW ready indication handler (Helium arch)
  2168. *
  2169. * This event is not required for HST/ HSP as FW calibration done is
  2170. * provided in QMI_WLFW_CAL_DONE_IND_V01
  2171. */
  2172. static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw,
  2173. struct sockaddr_qrtr *sq,
  2174. struct qmi_txn *txn, const void *data)
  2175. {
  2176. struct cnss_plat_data *plat_priv =
  2177. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2178. struct cnss_cal_info *cal_info;
  2179. if (!txn) {
  2180. cnss_pr_err("Spurious indication\n");
  2181. return;
  2182. }
  2183. if (plat_priv->device_id == QCA6390_DEVICE_ID ||
  2184. plat_priv->device_id == QCA6490_DEVICE_ID) {
  2185. cnss_pr_dbg("Ignore FW Ready Indication for HST/HSP");
  2186. return;
  2187. }
  2188. cnss_pr_dbg("Received QMI WLFW FW ready indication.\n");
  2189. cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
  2190. if (!cal_info)
  2191. return;
  2192. cal_info->cal_status = CNSS_CAL_DONE;
  2193. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
  2194. 0, cal_info);
  2195. }
  2196. static void cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle *qmi_wlfw,
  2197. struct sockaddr_qrtr *sq,
  2198. struct qmi_txn *txn, const void *data)
  2199. {
  2200. struct cnss_plat_data *plat_priv =
  2201. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2202. cnss_pr_dbg("Received QMI WLFW FW initialization done indication\n");
  2203. if (!txn) {
  2204. cnss_pr_err("Spurious indication\n");
  2205. return;
  2206. }
  2207. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL);
  2208. }
  2209. static void cnss_wlfw_pin_result_ind_cb(struct qmi_handle *qmi_wlfw,
  2210. struct sockaddr_qrtr *sq,
  2211. struct qmi_txn *txn, const void *data)
  2212. {
  2213. struct cnss_plat_data *plat_priv =
  2214. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2215. const struct wlfw_pin_connect_result_ind_msg_v01 *ind_msg = data;
  2216. cnss_pr_dbg("Received QMI WLFW pin connect result indication\n");
  2217. if (!txn) {
  2218. cnss_pr_err("Spurious indication\n");
  2219. return;
  2220. }
  2221. if (ind_msg->pwr_pin_result_valid)
  2222. plat_priv->pin_result.fw_pwr_pin_result =
  2223. ind_msg->pwr_pin_result;
  2224. if (ind_msg->phy_io_pin_result_valid)
  2225. plat_priv->pin_result.fw_phy_io_pin_result =
  2226. ind_msg->phy_io_pin_result;
  2227. if (ind_msg->rf_pin_result_valid)
  2228. plat_priv->pin_result.fw_rf_pin_result = ind_msg->rf_pin_result;
  2229. cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
  2230. ind_msg->pwr_pin_result, ind_msg->phy_io_pin_result,
  2231. ind_msg->rf_pin_result);
  2232. }
  2233. int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
  2234. u32 cal_file_download_size)
  2235. {
  2236. struct wlfw_cal_report_req_msg_v01 req = {0};
  2237. struct wlfw_cal_report_resp_msg_v01 resp = {0};
  2238. struct qmi_txn txn;
  2239. int ret = 0;
  2240. cnss_pr_dbg("Sending cal file report request. File size: %d, state: 0x%lx\n",
  2241. cal_file_download_size, plat_priv->driver_state);
  2242. req.cal_file_download_size_valid = 1;
  2243. req.cal_file_download_size = cal_file_download_size;
  2244. ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
  2245. wlfw_cal_report_resp_msg_v01_ei, &resp);
  2246. if (ret < 0) {
  2247. cnss_pr_err("Failed to initialize txn for Cal Report request, err: %d\n",
  2248. ret);
  2249. goto out;
  2250. }
  2251. ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
  2252. QMI_WLFW_CAL_REPORT_REQ_V01,
  2253. WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
  2254. wlfw_cal_report_req_msg_v01_ei, &req);
  2255. if (ret < 0) {
  2256. qmi_txn_cancel(&txn);
  2257. cnss_pr_err("Failed to send Cal Report request, err: %d\n",
  2258. ret);
  2259. goto out;
  2260. }
  2261. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2262. if (ret < 0) {
  2263. cnss_pr_err("Failed to wait for response of Cal Report request, err: %d\n",
  2264. ret);
  2265. goto out;
  2266. }
  2267. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  2268. cnss_pr_err("Cal Report request failed, result: %d, err: %d\n",
  2269. resp.resp.result, resp.resp.error);
  2270. ret = -resp.resp.result;
  2271. goto out;
  2272. }
  2273. out:
  2274. return ret;
  2275. }
  2276. static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
  2277. struct sockaddr_qrtr *sq,
  2278. struct qmi_txn *txn, const void *data)
  2279. {
  2280. struct cnss_plat_data *plat_priv =
  2281. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2282. const struct wlfw_cal_done_ind_msg_v01 *ind = data;
  2283. struct cnss_cal_info *cal_info;
  2284. cnss_pr_dbg("Received Cal done indication. File size: %d\n",
  2285. ind->cal_file_upload_size);
  2286. cnss_pr_info("Calibration took %d ms\n",
  2287. jiffies_to_msecs(jiffies - plat_priv->cal_time));
  2288. if (!txn) {
  2289. cnss_pr_err("Spurious indication\n");
  2290. return;
  2291. }
  2292. if (ind->cal_file_upload_size_valid)
  2293. plat_priv->cal_file_size = ind->cal_file_upload_size;
  2294. cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
  2295. if (!cal_info)
  2296. return;
  2297. cal_info->cal_status = CNSS_CAL_DONE;
  2298. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
  2299. 0, cal_info);
  2300. }
  2301. static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
  2302. struct sockaddr_qrtr *sq,
  2303. struct qmi_txn *txn,
  2304. const void *data)
  2305. {
  2306. struct cnss_plat_data *plat_priv =
  2307. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2308. const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
  2309. int i;
  2310. cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
  2311. if (!txn) {
  2312. cnss_pr_err("Spurious indication\n");
  2313. return;
  2314. }
  2315. if (plat_priv->qdss_mem_seg_len) {
  2316. cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
  2317. plat_priv->qdss_mem_seg_len);
  2318. return;
  2319. }
  2320. if (ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
  2321. cnss_pr_err("Invalid seg len %u\n", ind_msg->mem_seg_len);
  2322. return;
  2323. }
  2324. plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
  2325. for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
  2326. cnss_pr_dbg("QDSS requests for memory, size: 0x%x, type: %u\n",
  2327. ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
  2328. plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
  2329. plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
  2330. }
  2331. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
  2332. 0, NULL);
  2333. }
  2334. /**
  2335. * cnss_wlfw_fw_mem_file_save_ind_cb: Save given FW mem to filesystem
  2336. *
  2337. * QDSS_TRACE_SAVE_IND feature is overloaded to provide any host allocated
  2338. * fw memory segment for dumping to file system. Only one type of mem can be
  2339. * saved per indication and is provided in mem seg index 0.
  2340. *
  2341. * Return: None
  2342. */
  2343. static void cnss_wlfw_fw_mem_file_save_ind_cb(struct qmi_handle *qmi_wlfw,
  2344. struct sockaddr_qrtr *sq,
  2345. struct qmi_txn *txn,
  2346. const void *data)
  2347. {
  2348. struct cnss_plat_data *plat_priv =
  2349. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2350. const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
  2351. struct cnss_qmi_event_fw_mem_file_save_data *event_data;
  2352. int i = 0;
  2353. if (!txn || !data) {
  2354. cnss_pr_err("Spurious indication\n");
  2355. return;
  2356. }
  2357. cnss_pr_dbg("QMI fw_mem_file_save: source: %d mem_seg: %d type: %u len: %u\n",
  2358. ind_msg->source, ind_msg->mem_seg_valid,
  2359. ind_msg->mem_seg[0].type, ind_msg->mem_seg_len);
  2360. event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
  2361. if (!event_data)
  2362. return;
  2363. event_data->mem_type = ind_msg->mem_seg[0].type;
  2364. event_data->mem_seg_len = ind_msg->mem_seg_len;
  2365. event_data->total_size = ind_msg->total_size;
  2366. if (ind_msg->mem_seg_valid) {
  2367. if (ind_msg->mem_seg_len > QMI_WLFW_MAX_STR_LEN_V01) {
  2368. cnss_pr_err("Invalid seg len indication\n");
  2369. goto free_event_data;
  2370. }
  2371. for (i = 0; i < ind_msg->mem_seg_len; i++) {
  2372. event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
  2373. event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
  2374. if (event_data->mem_type != ind_msg->mem_seg[i].type) {
  2375. cnss_pr_err("FW Mem file save ind cannot have multiple mem types\n");
  2376. goto free_event_data;
  2377. }
  2378. cnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n",
  2379. i, ind_msg->mem_seg[i].addr,
  2380. ind_msg->mem_seg[i].size);
  2381. }
  2382. }
  2383. if (ind_msg->file_name_valid)
  2384. strlcpy(event_data->file_name, ind_msg->file_name,
  2385. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2386. if (ind_msg->source == 1) {
  2387. if (!ind_msg->file_name_valid)
  2388. strlcpy(event_data->file_name, "qdss_trace_wcss_etb",
  2389. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2390. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA,
  2391. 0, event_data);
  2392. } else {
  2393. if (event_data->mem_type == QMI_WLFW_MEM_QDSS_V01) {
  2394. if (!ind_msg->file_name_valid)
  2395. strlcpy(event_data->file_name, "qdss_trace_ddr",
  2396. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2397. } else {
  2398. if (!ind_msg->file_name_valid)
  2399. strlcpy(event_data->file_name, "fw_mem_dump",
  2400. QMI_WLFW_MAX_STR_LEN_V01 + 1);
  2401. }
  2402. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE,
  2403. 0, event_data);
  2404. }
  2405. return;
  2406. free_event_data:
  2407. kfree(event_data);
  2408. }
  2409. static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
  2410. struct sockaddr_qrtr *sq,
  2411. struct qmi_txn *txn,
  2412. const void *data)
  2413. {
  2414. struct cnss_plat_data *plat_priv =
  2415. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2416. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
  2417. 0, NULL);
  2418. }
  2419. static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw,
  2420. struct sockaddr_qrtr *sq,
  2421. struct qmi_txn *txn,
  2422. const void *data)
  2423. {
  2424. struct cnss_plat_data *plat_priv =
  2425. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2426. const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data;
  2427. cnss_pr_buf("Received QMI WLFW respond get info indication\n");
  2428. if (!txn) {
  2429. cnss_pr_err("Spurious indication\n");
  2430. return;
  2431. }
  2432. cnss_pr_buf("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n",
  2433. ind_msg->data_len, ind_msg->type,
  2434. ind_msg->is_last, ind_msg->seq_no);
  2435. if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb)
  2436. plat_priv->get_info_cb(plat_priv->get_info_cb_ctx,
  2437. (void *)ind_msg->data,
  2438. ind_msg->data_len);
  2439. }
  2440. static int cnss_ims_wfc_call_twt_cfg_send_sync
  2441. (struct cnss_plat_data *plat_priv,
  2442. const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg)
  2443. {
  2444. struct ims_private_service_wfc_call_twt_config_req_msg_v01 *req;
  2445. struct ims_private_service_wfc_call_twt_config_rsp_msg_v01 *resp;
  2446. struct qmi_txn txn;
  2447. int ret = 0;
  2448. if (!test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) {
  2449. cnss_pr_err("Drop FW WFC indication as IMS QMI not connected\n");
  2450. return -EINVAL;
  2451. }
  2452. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2453. if (!req)
  2454. return -ENOMEM;
  2455. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2456. if (!resp) {
  2457. kfree(req);
  2458. return -ENOMEM;
  2459. }
  2460. req->twt_sta_start_valid = ind_msg->twt_sta_start_valid;
  2461. req->twt_sta_start = ind_msg->twt_sta_start;
  2462. req->twt_sta_int_valid = ind_msg->twt_sta_int_valid;
  2463. req->twt_sta_int = ind_msg->twt_sta_int;
  2464. req->twt_sta_upo_valid = ind_msg->twt_sta_upo_valid;
  2465. req->twt_sta_upo = ind_msg->twt_sta_upo;
  2466. req->twt_sta_sp_valid = ind_msg->twt_sta_sp_valid;
  2467. req->twt_sta_sp = ind_msg->twt_sta_sp;
  2468. req->twt_sta_dl_valid = req->twt_sta_dl_valid;
  2469. req->twt_sta_dl = req->twt_sta_dl;
  2470. req->twt_sta_config_changed_valid =
  2471. ind_msg->twt_sta_config_changed_valid;
  2472. req->twt_sta_config_changed = ind_msg->twt_sta_config_changed;
  2473. cnss_pr_dbg("CNSS->IMS: TWT_CFG_REQ: state: 0x%lx\n",
  2474. plat_priv->driver_state);
  2475. ret =
  2476. qmi_txn_init(&plat_priv->ims_qmi, &txn,
  2477. ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei,
  2478. resp);
  2479. if (ret < 0) {
  2480. cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Txn Init Err: %d\n",
  2481. ret);
  2482. goto out;
  2483. }
  2484. ret =
  2485. qmi_send_request(&plat_priv->ims_qmi, NULL, &txn,
  2486. QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_V01,
  2487. IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_MSG_V01_MAX_MSG_LEN,
  2488. ims_private_service_wfc_call_twt_config_req_msg_v01_ei, req);
  2489. if (ret < 0) {
  2490. qmi_txn_cancel(&txn);
  2491. cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Send Err: %d\n", ret);
  2492. goto out;
  2493. }
  2494. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2495. if (ret < 0) {
  2496. cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: QMI Wait Err: %d\n", ret);
  2497. goto out;
  2498. }
  2499. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2500. cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: Result: %d Err: %d\n",
  2501. resp->resp.result, resp->resp.error);
  2502. ret = -resp->resp.result;
  2503. goto out;
  2504. }
  2505. ret = 0;
  2506. out:
  2507. kfree(req);
  2508. kfree(resp);
  2509. return ret;
  2510. }
  2511. int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv,
  2512. void *data)
  2513. {
  2514. int ret;
  2515. struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
  2516. ret = cnss_ims_wfc_call_twt_cfg_send_sync(plat_priv, ind_msg);
  2517. kfree(data);
  2518. return ret;
  2519. }
  2520. static void cnss_wlfw_process_twt_cfg_ind(struct qmi_handle *qmi_wlfw,
  2521. struct sockaddr_qrtr *sq,
  2522. struct qmi_txn *txn,
  2523. const void *data)
  2524. {
  2525. struct cnss_plat_data *plat_priv =
  2526. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2527. const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
  2528. struct wlfw_wfc_call_twt_config_ind_msg_v01 *event_data;
  2529. if (!txn) {
  2530. cnss_pr_err("FW->CNSS: TWT_CFG_IND: Spurious indication\n");
  2531. return;
  2532. }
  2533. if (!ind_msg) {
  2534. cnss_pr_err("FW->CNSS: TWT_CFG_IND: Invalid indication\n");
  2535. return;
  2536. }
  2537. cnss_pr_dbg("FW->CNSS: TWT_CFG_IND: %x %llx, %x %x, %x %x, %x %x, %x %x, %x %x\n",
  2538. ind_msg->twt_sta_start_valid, ind_msg->twt_sta_start,
  2539. ind_msg->twt_sta_int_valid, ind_msg->twt_sta_int,
  2540. ind_msg->twt_sta_upo_valid, ind_msg->twt_sta_upo,
  2541. ind_msg->twt_sta_sp_valid, ind_msg->twt_sta_sp,
  2542. ind_msg->twt_sta_dl_valid, ind_msg->twt_sta_dl,
  2543. ind_msg->twt_sta_config_changed_valid,
  2544. ind_msg->twt_sta_config_changed);
  2545. event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
  2546. if (!event_data)
  2547. return;
  2548. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND, 0,
  2549. event_data);
  2550. }
  2551. static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
  2552. {
  2553. .type = QMI_INDICATION,
  2554. .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
  2555. .ei = wlfw_request_mem_ind_msg_v01_ei,
  2556. .decoded_size = sizeof(struct wlfw_request_mem_ind_msg_v01),
  2557. .fn = cnss_wlfw_request_mem_ind_cb
  2558. },
  2559. {
  2560. .type = QMI_INDICATION,
  2561. .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
  2562. .ei = wlfw_fw_mem_ready_ind_msg_v01_ei,
  2563. .decoded_size = sizeof(struct wlfw_fw_mem_ready_ind_msg_v01),
  2564. .fn = cnss_wlfw_fw_mem_ready_ind_cb
  2565. },
  2566. {
  2567. .type = QMI_INDICATION,
  2568. .msg_id = QMI_WLFW_FW_READY_IND_V01,
  2569. .ei = wlfw_fw_ready_ind_msg_v01_ei,
  2570. .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
  2571. .fn = cnss_wlfw_fw_ready_ind_cb
  2572. },
  2573. {
  2574. .type = QMI_INDICATION,
  2575. .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
  2576. .ei = wlfw_fw_init_done_ind_msg_v01_ei,
  2577. .decoded_size = sizeof(struct wlfw_fw_init_done_ind_msg_v01),
  2578. .fn = cnss_wlfw_fw_init_done_ind_cb
  2579. },
  2580. {
  2581. .type = QMI_INDICATION,
  2582. .msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01,
  2583. .ei = wlfw_pin_connect_result_ind_msg_v01_ei,
  2584. .decoded_size =
  2585. sizeof(struct wlfw_pin_connect_result_ind_msg_v01),
  2586. .fn = cnss_wlfw_pin_result_ind_cb
  2587. },
  2588. {
  2589. .type = QMI_INDICATION,
  2590. .msg_id = QMI_WLFW_CAL_DONE_IND_V01,
  2591. .ei = wlfw_cal_done_ind_msg_v01_ei,
  2592. .decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
  2593. .fn = cnss_wlfw_cal_done_ind_cb
  2594. },
  2595. {
  2596. .type = QMI_INDICATION,
  2597. .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
  2598. .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
  2599. .decoded_size =
  2600. sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
  2601. .fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
  2602. },
  2603. {
  2604. .type = QMI_INDICATION,
  2605. .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
  2606. .ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
  2607. .decoded_size =
  2608. sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
  2609. .fn = cnss_wlfw_fw_mem_file_save_ind_cb
  2610. },
  2611. {
  2612. .type = QMI_INDICATION,
  2613. .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
  2614. .ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
  2615. .decoded_size =
  2616. sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
  2617. .fn = cnss_wlfw_qdss_trace_free_ind_cb
  2618. },
  2619. {
  2620. .type = QMI_INDICATION,
  2621. .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01,
  2622. .ei = wlfw_respond_get_info_ind_msg_v01_ei,
  2623. .decoded_size =
  2624. sizeof(struct wlfw_respond_get_info_ind_msg_v01),
  2625. .fn = cnss_wlfw_respond_get_info_ind_cb
  2626. },
  2627. {
  2628. .type = QMI_INDICATION,
  2629. .msg_id = QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01,
  2630. .ei = wlfw_wfc_call_twt_config_ind_msg_v01_ei,
  2631. .decoded_size =
  2632. sizeof(struct wlfw_wfc_call_twt_config_ind_msg_v01),
  2633. .fn = cnss_wlfw_process_twt_cfg_ind
  2634. },
  2635. {}
  2636. };
  2637. static int cnss_wlfw_connect_to_server(struct cnss_plat_data *plat_priv,
  2638. void *data)
  2639. {
  2640. struct cnss_qmi_event_server_arrive_data *event_data = data;
  2641. struct qmi_handle *qmi_wlfw = &plat_priv->qmi_wlfw;
  2642. struct sockaddr_qrtr sq = { 0 };
  2643. int ret = 0;
  2644. if (!event_data)
  2645. return -EINVAL;
  2646. sq.sq_family = AF_QIPCRTR;
  2647. sq.sq_node = event_data->node;
  2648. sq.sq_port = event_data->port;
  2649. ret = kernel_connect(qmi_wlfw->sock, (struct sockaddr *)&sq,
  2650. sizeof(sq), 0);
  2651. if (ret < 0) {
  2652. cnss_pr_err("Failed to connect to QMI WLFW remote service port\n");
  2653. goto out;
  2654. }
  2655. set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
  2656. cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
  2657. plat_priv->driver_state);
  2658. kfree(data);
  2659. return 0;
  2660. out:
  2661. CNSS_QMI_ASSERT();
  2662. kfree(data);
  2663. return ret;
  2664. }
  2665. int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data)
  2666. {
  2667. int ret = 0;
  2668. if (!plat_priv)
  2669. return -ENODEV;
  2670. if (test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state)) {
  2671. cnss_pr_err("Unexpected WLFW server arrive\n");
  2672. CNSS_ASSERT(0);
  2673. return -EINVAL;
  2674. }
  2675. cnss_ignore_qmi_failure(false);
  2676. ret = cnss_wlfw_connect_to_server(plat_priv, data);
  2677. if (ret < 0)
  2678. goto out;
  2679. ret = cnss_wlfw_ind_register_send_sync(plat_priv);
  2680. if (ret < 0) {
  2681. if (ret == -EALREADY)
  2682. ret = 0;
  2683. goto out;
  2684. }
  2685. ret = cnss_wlfw_host_cap_send_sync(plat_priv);
  2686. if (ret < 0)
  2687. goto out;
  2688. return 0;
  2689. out:
  2690. return ret;
  2691. }
  2692. int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
  2693. {
  2694. int ret;
  2695. if (!plat_priv)
  2696. return -ENODEV;
  2697. clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
  2698. cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
  2699. plat_priv->driver_state);
  2700. cnss_qmi_deinit(plat_priv);
  2701. clear_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
  2702. ret = cnss_qmi_init(plat_priv);
  2703. if (ret < 0) {
  2704. cnss_pr_err("QMI WLFW service registraton failed, ret\n", ret);
  2705. CNSS_ASSERT(0);
  2706. }
  2707. return 0;
  2708. }
  2709. static int wlfw_new_server(struct qmi_handle *qmi_wlfw,
  2710. struct qmi_service *service)
  2711. {
  2712. struct cnss_plat_data *plat_priv =
  2713. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2714. struct cnss_qmi_event_server_arrive_data *event_data;
  2715. if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
  2716. cnss_pr_info("WLFW server delete in progress, Ignore server arrive, state: 0x%lx\n",
  2717. plat_priv->driver_state);
  2718. return 0;
  2719. }
  2720. cnss_pr_dbg("WLFW server arriving: node %u port %u\n",
  2721. service->node, service->port);
  2722. event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
  2723. if (!event_data)
  2724. return -ENOMEM;
  2725. event_data->node = service->node;
  2726. event_data->port = service->port;
  2727. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_ARRIVE,
  2728. 0, event_data);
  2729. return 0;
  2730. }
  2731. static void wlfw_del_server(struct qmi_handle *qmi_wlfw,
  2732. struct qmi_service *service)
  2733. {
  2734. struct cnss_plat_data *plat_priv =
  2735. container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
  2736. if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
  2737. cnss_pr_info("WLFW server delete in progress, Ignore server delete, state: 0x%lx\n",
  2738. plat_priv->driver_state);
  2739. return;
  2740. }
  2741. cnss_pr_dbg("WLFW server exiting\n");
  2742. if (plat_priv) {
  2743. cnss_ignore_qmi_failure(true);
  2744. set_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
  2745. }
  2746. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_EXIT,
  2747. 0, NULL);
  2748. }
  2749. static struct qmi_ops qmi_wlfw_ops = {
  2750. .new_server = wlfw_new_server,
  2751. .del_server = wlfw_del_server,
  2752. };
  2753. int cnss_qmi_init(struct cnss_plat_data *plat_priv)
  2754. {
  2755. int ret = 0;
  2756. ret = qmi_handle_init(&plat_priv->qmi_wlfw,
  2757. QMI_WLFW_MAX_RECV_BUF_SIZE,
  2758. &qmi_wlfw_ops, qmi_wlfw_msg_handlers);
  2759. if (ret < 0) {
  2760. cnss_pr_err("Failed to initialize WLFW QMI handle, err: %d\n",
  2761. ret);
  2762. goto out;
  2763. }
  2764. ret = qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01,
  2765. WLFW_SERVICE_VERS_V01, WLFW_SERVICE_INS_ID_V01);
  2766. if (ret < 0)
  2767. cnss_pr_err("Failed to add WLFW QMI lookup, err: %d\n", ret);
  2768. out:
  2769. return ret;
  2770. }
  2771. void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
  2772. {
  2773. qmi_handle_release(&plat_priv->qmi_wlfw);
  2774. }
  2775. int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv)
  2776. {
  2777. struct dms_get_mac_address_req_msg_v01 req;
  2778. struct dms_get_mac_address_resp_msg_v01 resp;
  2779. struct qmi_txn txn;
  2780. int ret = 0;
  2781. if (!test_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state)) {
  2782. cnss_pr_err("DMS QMI connection not established\n");
  2783. return -EINVAL;
  2784. }
  2785. cnss_pr_dbg("Requesting DMS MAC address");
  2786. memset(&resp, 0, sizeof(resp));
  2787. ret = qmi_txn_init(&plat_priv->qmi_dms, &txn,
  2788. dms_get_mac_address_resp_msg_v01_ei, &resp);
  2789. if (ret < 0) {
  2790. cnss_pr_err("Failed to initialize txn for dms, err: %d\n",
  2791. ret);
  2792. goto out;
  2793. }
  2794. req.device = DMS_DEVICE_MAC_WLAN_V01;
  2795. ret = qmi_send_request(&plat_priv->qmi_dms, NULL, &txn,
  2796. QMI_DMS_GET_MAC_ADDRESS_REQ_V01,
  2797. DMS_GET_MAC_ADDRESS_REQ_MSG_V01_MAX_MSG_LEN,
  2798. dms_get_mac_address_req_msg_v01_ei, &req);
  2799. if (ret < 0) {
  2800. qmi_txn_cancel(&txn);
  2801. cnss_pr_err("Failed to send QMI_DMS_GET_MAC_ADDRESS_REQ_V01, err: %d\n",
  2802. ret);
  2803. goto out;
  2804. }
  2805. ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
  2806. if (ret < 0) {
  2807. cnss_pr_err("Failed to wait for QMI_DMS_GET_MAC_ADDRESS_RESP_V01, err: %d\n",
  2808. ret);
  2809. goto out;
  2810. }
  2811. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  2812. cnss_pr_err("QMI_DMS_GET_MAC_ADDRESS_REQ_V01 failed, result: %d, err: %d\n",
  2813. resp.resp.result, resp.resp.error);
  2814. ret = -resp.resp.result;
  2815. goto out;
  2816. }
  2817. if (!resp.mac_address_valid ||
  2818. resp.mac_address_len != QMI_WLFW_MAC_ADDR_SIZE_V01) {
  2819. cnss_pr_err("Invalid MAC address received from DMS\n");
  2820. plat_priv->dms.mac_valid = false;
  2821. goto out;
  2822. }
  2823. plat_priv->dms.mac_valid = true;
  2824. memcpy(plat_priv->dms.mac, resp.mac_address, QMI_WLFW_MAC_ADDR_SIZE_V01);
  2825. cnss_pr_info("Received DMS MAC: [%pM]\n", plat_priv->dms.mac);
  2826. out:
  2827. return ret;
  2828. }
  2829. static int cnss_dms_connect_to_server(struct cnss_plat_data *plat_priv,
  2830. unsigned int node, unsigned int port)
  2831. {
  2832. struct qmi_handle *qmi_dms = &plat_priv->qmi_dms;
  2833. struct sockaddr_qrtr sq = {0};
  2834. int ret = 0;
  2835. sq.sq_family = AF_QIPCRTR;
  2836. sq.sq_node = node;
  2837. sq.sq_port = port;
  2838. ret = kernel_connect(qmi_dms->sock, (struct sockaddr *)&sq,
  2839. sizeof(sq), 0);
  2840. if (ret < 0) {
  2841. cnss_pr_err("Failed to connect to QMI DMS remote service Node: %d Port: %d\n",
  2842. node, port);
  2843. goto out;
  2844. }
  2845. set_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
  2846. cnss_pr_info("QMI DMS service connected, state: 0x%lx\n",
  2847. plat_priv->driver_state);
  2848. out:
  2849. return ret;
  2850. }
  2851. static int dms_new_server(struct qmi_handle *qmi_dms,
  2852. struct qmi_service *service)
  2853. {
  2854. struct cnss_plat_data *plat_priv =
  2855. container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
  2856. if (!service)
  2857. return -EINVAL;
  2858. return cnss_dms_connect_to_server(plat_priv, service->node,
  2859. service->port);
  2860. }
  2861. static void cnss_dms_server_exit_work(struct work_struct *work)
  2862. {
  2863. int ret;
  2864. struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
  2865. cnss_dms_deinit(plat_priv);
  2866. cnss_pr_info("QMI DMS Server Exit");
  2867. clear_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state);
  2868. ret = cnss_dms_init(plat_priv);
  2869. if (ret < 0)
  2870. cnss_pr_err("QMI DMS service registraton failed, ret\n", ret);
  2871. }
  2872. static DECLARE_WORK(cnss_dms_del_work, cnss_dms_server_exit_work);
  2873. static void dms_del_server(struct qmi_handle *qmi_dms,
  2874. struct qmi_service *service)
  2875. {
  2876. struct cnss_plat_data *plat_priv =
  2877. container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
  2878. if (!plat_priv)
  2879. return;
  2880. if (test_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state)) {
  2881. cnss_pr_info("DMS server delete or cnss remove in progress, Ignore server delete: 0x%lx\n",
  2882. plat_priv->driver_state);
  2883. return;
  2884. }
  2885. set_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state);
  2886. clear_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
  2887. cnss_pr_info("QMI DMS service disconnected, state: 0x%lx\n",
  2888. plat_priv->driver_state);
  2889. schedule_work(&cnss_dms_del_work);
  2890. }
  2891. void cnss_cancel_dms_work(void)
  2892. {
  2893. cancel_work_sync(&cnss_dms_del_work);
  2894. }
  2895. static struct qmi_ops qmi_dms_ops = {
  2896. .new_server = dms_new_server,
  2897. .del_server = dms_del_server,
  2898. };
  2899. int cnss_dms_init(struct cnss_plat_data *plat_priv)
  2900. {
  2901. int ret = 0;
  2902. ret = qmi_handle_init(&plat_priv->qmi_dms, DMS_QMI_MAX_MSG_LEN,
  2903. &qmi_dms_ops, NULL);
  2904. if (ret < 0) {
  2905. cnss_pr_err("Failed to initialize DMS handle, err: %d\n", ret);
  2906. goto out;
  2907. }
  2908. ret = qmi_add_lookup(&plat_priv->qmi_dms, DMS_SERVICE_ID_V01,
  2909. DMS_SERVICE_VERS_V01, 0);
  2910. if (ret < 0)
  2911. cnss_pr_err("Failed to add DMS lookup, err: %d\n", ret);
  2912. out:
  2913. return ret;
  2914. }
  2915. void cnss_dms_deinit(struct cnss_plat_data *plat_priv)
  2916. {
  2917. set_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state);
  2918. qmi_handle_release(&plat_priv->qmi_dms);
  2919. }
  2920. int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
  2921. {
  2922. int ret;
  2923. struct coex_antenna_switch_to_wlan_req_msg_v01 *req;
  2924. struct coex_antenna_switch_to_wlan_resp_msg_v01 *resp;
  2925. struct qmi_txn txn;
  2926. if (!plat_priv)
  2927. return -ENODEV;
  2928. cnss_pr_dbg("Sending coex antenna switch_to_wlan\n");
  2929. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2930. if (!req)
  2931. return -ENOMEM;
  2932. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2933. if (!resp) {
  2934. kfree(req);
  2935. return -ENOMEM;
  2936. }
  2937. req->antenna = plat_priv->antenna;
  2938. ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
  2939. coex_antenna_switch_to_wlan_resp_msg_v01_ei, resp);
  2940. if (ret < 0) {
  2941. cnss_pr_err("Fail to init txn for coex antenna switch_to_wlan resp %d\n",
  2942. ret);
  2943. goto out;
  2944. }
  2945. ret = qmi_send_request
  2946. (&plat_priv->coex_qmi, NULL, &txn,
  2947. QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01,
  2948. COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN,
  2949. coex_antenna_switch_to_wlan_req_msg_v01_ei, req);
  2950. if (ret < 0) {
  2951. qmi_txn_cancel(&txn);
  2952. cnss_pr_err("Fail to send coex antenna switch_to_wlan req %d\n",
  2953. ret);
  2954. goto out;
  2955. }
  2956. ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
  2957. if (ret < 0) {
  2958. cnss_pr_err("Coex antenna switch_to_wlan resp wait failed with ret %d\n",
  2959. ret);
  2960. goto out;
  2961. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  2962. cnss_pr_err("Coex antenna switch_to_wlan request rejected, result:%d error:%d\n",
  2963. resp->resp.result, resp->resp.error);
  2964. ret = -resp->resp.result;
  2965. goto out;
  2966. }
  2967. if (resp->grant_valid)
  2968. plat_priv->grant = resp->grant;
  2969. cnss_pr_dbg("Coex antenna grant: 0x%llx\n", resp->grant);
  2970. kfree(resp);
  2971. kfree(req);
  2972. return 0;
  2973. out:
  2974. kfree(resp);
  2975. kfree(req);
  2976. return ret;
  2977. }
  2978. int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
  2979. {
  2980. int ret;
  2981. struct coex_antenna_switch_to_mdm_req_msg_v01 *req;
  2982. struct coex_antenna_switch_to_mdm_resp_msg_v01 *resp;
  2983. struct qmi_txn txn;
  2984. if (!plat_priv)
  2985. return -ENODEV;
  2986. cnss_pr_dbg("Sending coex antenna switch_to_mdm\n");
  2987. req = kzalloc(sizeof(*req), GFP_KERNEL);
  2988. if (!req)
  2989. return -ENOMEM;
  2990. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  2991. if (!resp) {
  2992. kfree(req);
  2993. return -ENOMEM;
  2994. }
  2995. req->antenna = plat_priv->antenna;
  2996. ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
  2997. coex_antenna_switch_to_mdm_resp_msg_v01_ei, resp);
  2998. if (ret < 0) {
  2999. cnss_pr_err("Fail to init txn for coex antenna switch_to_mdm resp %d\n",
  3000. ret);
  3001. goto out;
  3002. }
  3003. ret = qmi_send_request
  3004. (&plat_priv->coex_qmi, NULL, &txn,
  3005. QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01,
  3006. COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN,
  3007. coex_antenna_switch_to_mdm_req_msg_v01_ei, req);
  3008. if (ret < 0) {
  3009. qmi_txn_cancel(&txn);
  3010. cnss_pr_err("Fail to send coex antenna switch_to_mdm req %d\n",
  3011. ret);
  3012. goto out;
  3013. }
  3014. ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
  3015. if (ret < 0) {
  3016. cnss_pr_err("Coex antenna switch_to_mdm resp wait failed with ret %d\n",
  3017. ret);
  3018. goto out;
  3019. } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  3020. cnss_pr_err("Coex antenna switch_to_mdm request rejected, result:%d error:%d\n",
  3021. resp->resp.result, resp->resp.error);
  3022. ret = -resp->resp.result;
  3023. goto out;
  3024. }
  3025. kfree(resp);
  3026. kfree(req);
  3027. return 0;
  3028. out:
  3029. kfree(resp);
  3030. kfree(req);
  3031. return ret;
  3032. }
  3033. int cnss_send_subsys_restart_level_msg(struct cnss_plat_data *plat_priv)
  3034. {
  3035. int ret;
  3036. struct wlfw_subsys_restart_level_req_msg_v01 req;
  3037. struct wlfw_subsys_restart_level_resp_msg_v01 resp;
  3038. u8 pcss_enabled;
  3039. if (!plat_priv)
  3040. return -ENODEV;
  3041. if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
  3042. cnss_pr_dbg("Can't send pcss cmd before fw ready\n");
  3043. return 0;
  3044. }
  3045. pcss_enabled = plat_priv->recovery_pcss_enabled;
  3046. cnss_pr_dbg("Sending pcss recovery status: %d\n", pcss_enabled);
  3047. req.restart_level_type_valid = 1;
  3048. req.restart_level_type = pcss_enabled;
  3049. ret = qmi_send_wait(&plat_priv->qmi_wlfw, &req, &resp,
  3050. wlfw_subsys_restart_level_req_msg_v01_ei,
  3051. wlfw_subsys_restart_level_resp_msg_v01_ei,
  3052. QMI_WLFW_SUBSYS_RESTART_LEVEL_REQ_V01,
  3053. WLFW_SUBSYS_RESTART_LEVEL_REQ_MSG_V01_MAX_MSG_LEN,
  3054. QMI_WLFW_TIMEOUT_JF);
  3055. if (ret < 0)
  3056. cnss_pr_err("pcss recovery setting failed with ret %d\n", ret);
  3057. return ret;
  3058. }
  3059. static int coex_new_server(struct qmi_handle *qmi,
  3060. struct qmi_service *service)
  3061. {
  3062. struct cnss_plat_data *plat_priv =
  3063. container_of(qmi, struct cnss_plat_data, coex_qmi);
  3064. struct sockaddr_qrtr sq = { 0 };
  3065. int ret = 0;
  3066. cnss_pr_dbg("COEX server arrive: node %u port %u\n",
  3067. service->node, service->port);
  3068. sq.sq_family = AF_QIPCRTR;
  3069. sq.sq_node = service->node;
  3070. sq.sq_port = service->port;
  3071. ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
  3072. if (ret < 0) {
  3073. cnss_pr_err("Fail to connect to remote service port\n");
  3074. return ret;
  3075. }
  3076. set_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
  3077. cnss_pr_dbg("COEX Server Connected: 0x%lx\n",
  3078. plat_priv->driver_state);
  3079. return 0;
  3080. }
  3081. static void coex_del_server(struct qmi_handle *qmi,
  3082. struct qmi_service *service)
  3083. {
  3084. struct cnss_plat_data *plat_priv =
  3085. container_of(qmi, struct cnss_plat_data, coex_qmi);
  3086. cnss_pr_dbg("COEX server exit\n");
  3087. clear_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
  3088. }
  3089. static struct qmi_ops coex_qmi_ops = {
  3090. .new_server = coex_new_server,
  3091. .del_server = coex_del_server,
  3092. };
  3093. int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
  3094. { int ret;
  3095. ret = qmi_handle_init(&plat_priv->coex_qmi,
  3096. COEX_SERVICE_MAX_MSG_LEN,
  3097. &coex_qmi_ops, NULL);
  3098. if (ret < 0)
  3099. return ret;
  3100. ret = qmi_add_lookup(&plat_priv->coex_qmi, COEX_SERVICE_ID_V01,
  3101. COEX_SERVICE_VERS_V01, 0);
  3102. return ret;
  3103. }
  3104. void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv)
  3105. {
  3106. qmi_handle_release(&plat_priv->coex_qmi);
  3107. }
  3108. /* IMS Service */
  3109. int ims_subscribe_for_indication_send_async(struct cnss_plat_data *plat_priv)
  3110. {
  3111. int ret;
  3112. struct ims_private_service_subscribe_for_indications_req_msg_v01 *req;
  3113. struct qmi_txn *txn;
  3114. if (!plat_priv)
  3115. return -ENODEV;
  3116. cnss_pr_dbg("Sending ASYNC ims subscribe for indication\n");
  3117. req = kzalloc(sizeof(*req), GFP_KERNEL);
  3118. if (!req)
  3119. return -ENOMEM;
  3120. req->wfc_call_status_valid = 1;
  3121. req->wfc_call_status = 1;
  3122. txn = &plat_priv->txn;
  3123. ret = qmi_txn_init(&plat_priv->ims_qmi, txn, NULL, NULL);
  3124. if (ret < 0) {
  3125. cnss_pr_err("Fail to init txn for ims subscribe for indication resp %d\n",
  3126. ret);
  3127. goto out;
  3128. }
  3129. ret = qmi_send_request
  3130. (&plat_priv->ims_qmi, NULL, txn,
  3131. QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
  3132. IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN,
  3133. ims_private_service_subscribe_for_indications_req_msg_v01_ei, req);
  3134. if (ret < 0) {
  3135. qmi_txn_cancel(txn);
  3136. cnss_pr_err("Fail to send ims subscribe for indication req %d\n",
  3137. ret);
  3138. goto out;
  3139. }
  3140. kfree(req);
  3141. return 0;
  3142. out:
  3143. kfree(req);
  3144. return ret;
  3145. }
  3146. static void ims_subscribe_for_indication_resp_cb(struct qmi_handle *qmi,
  3147. struct sockaddr_qrtr *sq,
  3148. struct qmi_txn *txn,
  3149. const void *data)
  3150. {
  3151. const
  3152. struct ims_private_service_subscribe_for_indications_rsp_msg_v01 *resp =
  3153. data;
  3154. cnss_pr_dbg("Received IMS subscribe indication response\n");
  3155. if (!txn) {
  3156. cnss_pr_err("spurious response\n");
  3157. return;
  3158. }
  3159. if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
  3160. cnss_pr_err("IMS subscribe for indication request rejected, result:%d error:%d\n",
  3161. resp->resp.result, resp->resp.error);
  3162. txn->result = -resp->resp.result;
  3163. }
  3164. }
  3165. int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv,
  3166. void *data)
  3167. {
  3168. int ret;
  3169. struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
  3170. ret = cnss_wlfw_wfc_call_status_send_sync(plat_priv, ind_msg);
  3171. kfree(data);
  3172. return ret;
  3173. }
  3174. static void
  3175. cnss_ims_process_wfc_call_ind_cb(struct qmi_handle *ims_qmi,
  3176. struct sockaddr_qrtr *sq,
  3177. struct qmi_txn *txn, const void *data)
  3178. {
  3179. struct cnss_plat_data *plat_priv =
  3180. container_of(ims_qmi, struct cnss_plat_data, ims_qmi);
  3181. const
  3182. struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
  3183. struct ims_private_service_wfc_call_status_ind_msg_v01 *event_data;
  3184. if (!txn) {
  3185. cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Spurious indication\n");
  3186. return;
  3187. }
  3188. if (!ind_msg) {
  3189. cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Invalid indication\n");
  3190. return;
  3191. }
  3192. cnss_pr_dbg("IMS->CNSS: WFC_CALL_IND: %x, %x %x, %x %x, %x %llx, %x %x, %x %x\n",
  3193. ind_msg->wfc_call_active, ind_msg->all_wfc_calls_held_valid,
  3194. ind_msg->all_wfc_calls_held,
  3195. ind_msg->is_wfc_emergency_valid, ind_msg->is_wfc_emergency,
  3196. ind_msg->twt_ims_start_valid, ind_msg->twt_ims_start,
  3197. ind_msg->twt_ims_int_valid, ind_msg->twt_ims_int,
  3198. ind_msg->media_quality_valid, ind_msg->media_quality);
  3199. event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
  3200. if (!event_data)
  3201. return;
  3202. cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND,
  3203. 0, event_data);
  3204. }
  3205. static struct qmi_msg_handler qmi_ims_msg_handlers[] = {
  3206. {
  3207. .type = QMI_RESPONSE,
  3208. .msg_id =
  3209. QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
  3210. .ei =
  3211. ims_private_service_subscribe_for_indications_rsp_msg_v01_ei,
  3212. .decoded_size = sizeof(struct
  3213. ims_private_service_subscribe_for_indications_rsp_msg_v01),
  3214. .fn = ims_subscribe_for_indication_resp_cb
  3215. },
  3216. {
  3217. .type = QMI_INDICATION,
  3218. .msg_id = QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01,
  3219. .ei = ims_private_service_wfc_call_status_ind_msg_v01_ei,
  3220. .decoded_size =
  3221. sizeof(struct ims_private_service_wfc_call_status_ind_msg_v01),
  3222. .fn = cnss_ims_process_wfc_call_ind_cb
  3223. },
  3224. {}
  3225. };
  3226. static int ims_new_server(struct qmi_handle *qmi,
  3227. struct qmi_service *service)
  3228. {
  3229. struct cnss_plat_data *plat_priv =
  3230. container_of(qmi, struct cnss_plat_data, ims_qmi);
  3231. struct sockaddr_qrtr sq = { 0 };
  3232. int ret = 0;
  3233. cnss_pr_dbg("IMS server arrive: node %u port %u\n",
  3234. service->node, service->port);
  3235. sq.sq_family = AF_QIPCRTR;
  3236. sq.sq_node = service->node;
  3237. sq.sq_port = service->port;
  3238. ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
  3239. if (ret < 0) {
  3240. cnss_pr_err("Fail to connect to remote service port\n");
  3241. return ret;
  3242. }
  3243. set_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
  3244. cnss_pr_dbg("IMS Server Connected: 0x%lx\n",
  3245. plat_priv->driver_state);
  3246. ret = ims_subscribe_for_indication_send_async(plat_priv);
  3247. return ret;
  3248. }
  3249. static void ims_del_server(struct qmi_handle *qmi,
  3250. struct qmi_service *service)
  3251. {
  3252. struct cnss_plat_data *plat_priv =
  3253. container_of(qmi, struct cnss_plat_data, ims_qmi);
  3254. cnss_pr_dbg("IMS server exit\n");
  3255. clear_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
  3256. }
  3257. static struct qmi_ops ims_qmi_ops = {
  3258. .new_server = ims_new_server,
  3259. .del_server = ims_del_server,
  3260. };
  3261. int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
  3262. { int ret;
  3263. ret = qmi_handle_init(&plat_priv->ims_qmi,
  3264. IMSPRIVATE_SERVICE_MAX_MSG_LEN,
  3265. &ims_qmi_ops, qmi_ims_msg_handlers);
  3266. if (ret < 0)
  3267. return ret;
  3268. ret = qmi_add_lookup(&plat_priv->ims_qmi, IMSPRIVATE_SERVICE_ID_V01,
  3269. IMSPRIVATE_SERVICE_VERS_V01, 0);
  3270. return ret;
  3271. }
  3272. void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv)
  3273. {
  3274. qmi_handle_release(&plat_priv->ims_qmi);
  3275. }