firmware_if.c 80 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2016-2022 HabanaLabs, Ltd.
  4. * All Rights Reserved.
  5. */
  6. #include "habanalabs.h"
  7. #include "../include/common/hl_boot_if.h"
  8. #include <linux/firmware.h>
  9. #include <linux/crc32.h>
  10. #include <linux/slab.h>
  11. #include <linux/ctype.h>
  12. #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
  13. static char *extract_fw_ver_from_str(const char *fw_str)
  14. {
  15. char *str, *fw_ver, *whitespace;
  16. u32 ver_offset;
  17. fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
  18. if (!fw_ver)
  19. return NULL;
  20. str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
  21. if (!str)
  22. goto free_fw_ver;
  23. /* Skip the fw- part */
  24. str += 3;
  25. ver_offset = str - fw_str;
  26. /* Copy until the next whitespace */
  27. whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
  28. if (!whitespace)
  29. goto free_fw_ver;
  30. strscpy(fw_ver, str, whitespace - str + 1);
  31. return fw_ver;
  32. free_fw_ver:
  33. kfree(fw_ver);
  34. return NULL;
  35. }
  36. static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
  37. {
  38. char major[8], minor[8], *first_dot, *second_dot;
  39. int rc;
  40. first_dot = strnstr(preboot_ver, ".", 10);
  41. if (first_dot) {
  42. strscpy(major, preboot_ver, first_dot - preboot_ver + 1);
  43. rc = kstrtou32(major, 10, &hdev->fw_major_version);
  44. } else {
  45. rc = -EINVAL;
  46. }
  47. if (rc) {
  48. dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
  49. goto out;
  50. }
  51. /* skip the first dot */
  52. first_dot++;
  53. second_dot = strnstr(first_dot, ".", 10);
  54. if (second_dot) {
  55. strscpy(minor, first_dot, second_dot - first_dot + 1);
  56. rc = kstrtou32(minor, 10, &hdev->fw_minor_version);
  57. } else {
  58. rc = -EINVAL;
  59. }
  60. if (rc)
  61. dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
  62. out:
  63. kfree(preboot_ver);
  64. return rc;
  65. }
  66. static int hl_request_fw(struct hl_device *hdev,
  67. const struct firmware **firmware_p,
  68. const char *fw_name)
  69. {
  70. size_t fw_size;
  71. int rc;
  72. rc = request_firmware(firmware_p, fw_name, hdev->dev);
  73. if (rc) {
  74. dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
  75. fw_name, rc);
  76. goto out;
  77. }
  78. fw_size = (*firmware_p)->size;
  79. if ((fw_size % 4) != 0) {
  80. dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
  81. fw_name, fw_size);
  82. rc = -EINVAL;
  83. goto release_fw;
  84. }
  85. dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
  86. if (fw_size > FW_FILE_MAX_SIZE) {
  87. dev_err(hdev->dev,
  88. "FW file size %zu exceeds maximum of %u bytes\n",
  89. fw_size, FW_FILE_MAX_SIZE);
  90. rc = -EINVAL;
  91. goto release_fw;
  92. }
  93. return 0;
  94. release_fw:
  95. release_firmware(*firmware_p);
  96. out:
  97. return rc;
  98. }
  99. /**
  100. * hl_release_firmware() - release FW
  101. *
  102. * @fw: fw descriptor
  103. *
  104. * note: this inline function added to serve as a comprehensive mirror for the
  105. * hl_request_fw function.
  106. */
  107. static inline void hl_release_firmware(const struct firmware *fw)
  108. {
  109. release_firmware(fw);
  110. }
  111. /**
  112. * hl_fw_copy_fw_to_device() - copy FW to device
  113. *
  114. * @hdev: pointer to hl_device structure.
  115. * @fw: fw descriptor
  116. * @dst: IO memory mapped address space to copy firmware to
  117. * @src_offset: offset in src FW to copy from
  118. * @size: amount of bytes to copy (0 to copy the whole binary)
  119. *
  120. * actual copy of FW binary data to device, shared by static and dynamic loaders
  121. */
  122. static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
  123. const struct firmware *fw, void __iomem *dst,
  124. u32 src_offset, u32 size)
  125. {
  126. const void *fw_data;
  127. /* size 0 indicates to copy the whole file */
  128. if (!size)
  129. size = fw->size;
  130. if (src_offset + size > fw->size) {
  131. dev_err(hdev->dev,
  132. "size to copy(%u) and offset(%u) are invalid\n",
  133. size, src_offset);
  134. return -EINVAL;
  135. }
  136. fw_data = (const void *) fw->data;
  137. memcpy_toio(dst, fw_data + src_offset, size);
  138. return 0;
  139. }
  140. /**
  141. * hl_fw_copy_msg_to_device() - copy message to device
  142. *
  143. * @hdev: pointer to hl_device structure.
  144. * @msg: message
  145. * @dst: IO memory mapped address space to copy firmware to
  146. * @src_offset: offset in src message to copy from
  147. * @size: amount of bytes to copy (0 to copy the whole binary)
  148. *
  149. * actual copy of message data to device.
  150. */
  151. static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
  152. struct lkd_msg_comms *msg, void __iomem *dst,
  153. u32 src_offset, u32 size)
  154. {
  155. void *msg_data;
  156. /* size 0 indicates to copy the whole file */
  157. if (!size)
  158. size = sizeof(struct lkd_msg_comms);
  159. if (src_offset + size > sizeof(struct lkd_msg_comms)) {
  160. dev_err(hdev->dev,
  161. "size to copy(%u) and offset(%u) are invalid\n",
  162. size, src_offset);
  163. return -EINVAL;
  164. }
  165. msg_data = (void *) msg;
  166. memcpy_toio(dst, msg_data + src_offset, size);
  167. return 0;
  168. }
  169. /**
  170. * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
  171. *
  172. * @hdev: pointer to hl_device structure.
  173. * @fw_name: the firmware image name
  174. * @dst: IO memory mapped address space to copy firmware to
  175. * @src_offset: offset in src FW to copy from
  176. * @size: amount of bytes to copy (0 to copy the whole binary)
  177. *
  178. * Copy fw code from firmware file to device memory.
  179. *
  180. * Return: 0 on success, non-zero for failure.
  181. */
  182. int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
  183. void __iomem *dst, u32 src_offset, u32 size)
  184. {
  185. const struct firmware *fw;
  186. int rc;
  187. rc = hl_request_fw(hdev, &fw, fw_name);
  188. if (rc)
  189. return rc;
  190. rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
  191. hl_release_firmware(fw);
  192. return rc;
  193. }
  194. int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
  195. {
  196. struct cpucp_packet pkt = {};
  197. pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
  198. pkt.value = cpu_to_le64(value);
  199. return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
  200. }
  201. int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
  202. u16 len, u32 timeout, u64 *result)
  203. {
  204. struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
  205. struct asic_fixed_properties *prop = &hdev->asic_prop;
  206. struct cpucp_packet *pkt;
  207. dma_addr_t pkt_dma_addr;
  208. struct hl_bd *sent_bd;
  209. u32 tmp, expected_ack_val, pi, opcode;
  210. int rc;
  211. pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
  212. if (!pkt) {
  213. dev_err(hdev->dev,
  214. "Failed to allocate DMA memory for packet to CPU\n");
  215. return -ENOMEM;
  216. }
  217. memcpy(pkt, msg, len);
  218. mutex_lock(&hdev->send_cpu_message_lock);
  219. /* CPU-CP messages can be sent during soft-reset */
  220. if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
  221. rc = 0;
  222. goto out;
  223. }
  224. if (hdev->device_cpu_disabled) {
  225. rc = -EIO;
  226. goto out;
  227. }
  228. /* set fence to a non valid value */
  229. pkt->fence = cpu_to_le32(UINT_MAX);
  230. pi = queue->pi;
  231. /*
  232. * The CPU queue is a synchronous queue with an effective depth of
  233. * a single entry (although it is allocated with room for multiple
  234. * entries). We lock on it using 'send_cpu_message_lock' which
  235. * serializes accesses to the CPU queue.
  236. * Which means that we don't need to lock the access to the entire H/W
  237. * queues module when submitting a JOB to the CPU queue.
  238. */
  239. hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
  240. if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
  241. expected_ack_val = queue->pi;
  242. else
  243. expected_ack_val = CPUCP_PACKET_FENCE_VAL;
  244. rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
  245. (tmp == expected_ack_val), 1000,
  246. timeout, true);
  247. hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
  248. if (rc == -ETIMEDOUT) {
  249. /* If FW performed reset just before sending it a packet, we will get a timeout.
  250. * This is expected behavior, hence no need for error message.
  251. */
  252. if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
  253. dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
  254. tmp);
  255. else
  256. dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
  257. hdev->device_cpu_disabled = true;
  258. goto out;
  259. }
  260. tmp = le32_to_cpu(pkt->ctl);
  261. rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
  262. if (rc) {
  263. opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
  264. if (!prop->supports_advanced_cpucp_rc) {
  265. dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
  266. goto scrub_descriptor;
  267. }
  268. switch (rc) {
  269. case cpucp_packet_invalid:
  270. dev_err(hdev->dev,
  271. "CPU packet %d is not supported by F/W\n", opcode);
  272. break;
  273. case cpucp_packet_fault:
  274. dev_err(hdev->dev,
  275. "F/W failed processing CPU packet %d\n", opcode);
  276. break;
  277. case cpucp_packet_invalid_pkt:
  278. dev_dbg(hdev->dev,
  279. "CPU packet %d is not supported by F/W\n", opcode);
  280. break;
  281. case cpucp_packet_invalid_params:
  282. dev_err(hdev->dev,
  283. "F/W reports invalid parameters for CPU packet %d\n", opcode);
  284. break;
  285. default:
  286. dev_err(hdev->dev,
  287. "Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
  288. }
  289. /* propagate the return code from the f/w to the callers who want to check it */
  290. if (result)
  291. *result = rc;
  292. rc = -EIO;
  293. } else if (result) {
  294. *result = le64_to_cpu(pkt->result);
  295. }
  296. scrub_descriptor:
  297. /* Scrub previous buffer descriptor 'ctl' field which contains the
  298. * previous PI value written during packet submission.
  299. * We must do this or else F/W can read an old value upon queue wraparound.
  300. */
  301. sent_bd = queue->kernel_address;
  302. sent_bd += hl_pi_2_offset(pi);
  303. sent_bd->ctl = cpu_to_le32(UINT_MAX);
  304. out:
  305. mutex_unlock(&hdev->send_cpu_message_lock);
  306. hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
  307. return rc;
  308. }
  309. int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
  310. {
  311. struct cpucp_packet pkt;
  312. u64 result;
  313. int rc;
  314. memset(&pkt, 0, sizeof(pkt));
  315. pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
  316. CPUCP_PKT_CTL_OPCODE_SHIFT);
  317. pkt.value = cpu_to_le64(event_type);
  318. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  319. 0, &result);
  320. if (rc)
  321. dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
  322. return rc;
  323. }
  324. int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
  325. size_t irq_arr_size)
  326. {
  327. struct cpucp_unmask_irq_arr_packet *pkt;
  328. size_t total_pkt_size;
  329. u64 result;
  330. int rc;
  331. total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
  332. irq_arr_size;
  333. /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
  334. total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
  335. /* total_pkt_size is casted to u16 later on */
  336. if (total_pkt_size > USHRT_MAX) {
  337. dev_err(hdev->dev, "too many elements in IRQ array\n");
  338. return -EINVAL;
  339. }
  340. pkt = kzalloc(total_pkt_size, GFP_KERNEL);
  341. if (!pkt)
  342. return -ENOMEM;
  343. pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
  344. memcpy(&pkt->irqs, irq_arr, irq_arr_size);
  345. pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
  346. CPUCP_PKT_CTL_OPCODE_SHIFT);
  347. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
  348. total_pkt_size, 0, &result);
  349. if (rc)
  350. dev_err(hdev->dev, "failed to unmask IRQ array\n");
  351. kfree(pkt);
  352. return rc;
  353. }
  354. int hl_fw_test_cpu_queue(struct hl_device *hdev)
  355. {
  356. struct cpucp_packet test_pkt = {};
  357. u64 result;
  358. int rc;
  359. test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
  360. CPUCP_PKT_CTL_OPCODE_SHIFT);
  361. test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
  362. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
  363. sizeof(test_pkt), 0, &result);
  364. if (!rc) {
  365. if (result != CPUCP_PACKET_FENCE_VAL)
  366. dev_err(hdev->dev,
  367. "CPU queue test failed (%#08llx)\n", result);
  368. } else {
  369. dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
  370. }
  371. return rc;
  372. }
  373. void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
  374. dma_addr_t *dma_handle)
  375. {
  376. u64 kernel_addr;
  377. kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
  378. *dma_handle = hdev->cpu_accessible_dma_address +
  379. (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
  380. return (void *) (uintptr_t) kernel_addr;
  381. }
  382. void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
  383. void *vaddr)
  384. {
  385. gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
  386. size);
  387. }
  388. int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
  389. {
  390. struct cpucp_packet pkt;
  391. int rc;
  392. memset(&pkt, 0, sizeof(pkt));
  393. pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  394. pkt.value = cpu_to_le64(open);
  395. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
  396. if (rc)
  397. dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
  398. return rc;
  399. }
  400. int hl_fw_send_heartbeat(struct hl_device *hdev)
  401. {
  402. struct cpucp_packet hb_pkt;
  403. u64 result;
  404. int rc;
  405. memset(&hb_pkt, 0, sizeof(hb_pkt));
  406. hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
  407. CPUCP_PKT_CTL_OPCODE_SHIFT);
  408. hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
  409. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
  410. sizeof(hb_pkt), 0, &result);
  411. if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
  412. return -EIO;
  413. if (le32_to_cpu(hb_pkt.status_mask) &
  414. CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
  415. dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
  416. rc = -EIO;
  417. }
  418. return rc;
  419. }
  420. static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
  421. u32 sts_val)
  422. {
  423. bool err_exists = false;
  424. if (!(err_val & CPU_BOOT_ERR0_ENABLED))
  425. return false;
  426. if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
  427. dev_err(hdev->dev,
  428. "Device boot error - DRAM initialization failed\n");
  429. err_exists = true;
  430. }
  431. if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
  432. dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
  433. err_exists = true;
  434. }
  435. if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
  436. dev_err(hdev->dev,
  437. "Device boot error - Thermal Sensor initialization failed\n");
  438. err_exists = true;
  439. }
  440. if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
  441. if (hdev->bmc_enable) {
  442. dev_err(hdev->dev,
  443. "Device boot error - Skipped waiting for BMC\n");
  444. err_exists = true;
  445. } else {
  446. dev_info(hdev->dev,
  447. "Device boot message - Skipped waiting for BMC\n");
  448. /* This is an info so we don't want it to disable the
  449. * device
  450. */
  451. err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
  452. }
  453. }
  454. if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
  455. dev_err(hdev->dev,
  456. "Device boot error - Serdes data from BMC not available\n");
  457. err_exists = true;
  458. }
  459. if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
  460. dev_err(hdev->dev,
  461. "Device boot error - NIC F/W initialization failed\n");
  462. err_exists = true;
  463. }
  464. if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
  465. dev_err(hdev->dev,
  466. "Device boot warning - security not ready\n");
  467. err_exists = true;
  468. }
  469. if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
  470. dev_err(hdev->dev, "Device boot error - security failure\n");
  471. err_exists = true;
  472. }
  473. if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
  474. dev_err(hdev->dev, "Device boot error - eFuse failure\n");
  475. err_exists = true;
  476. }
  477. if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
  478. dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
  479. err_exists = true;
  480. }
  481. if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
  482. dev_err(hdev->dev, "Device boot error - PLL failure\n");
  483. err_exists = true;
  484. }
  485. if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
  486. /* Ignore this bit, don't prevent driver loading */
  487. dev_dbg(hdev->dev, "device unusable status is set\n");
  488. err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
  489. }
  490. if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
  491. dev_err(hdev->dev, "Device boot error - binning failure\n");
  492. err_exists = true;
  493. }
  494. if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
  495. dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
  496. /* All warnings should go here in order not to reach the unknown error validation */
  497. if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
  498. dev_warn(hdev->dev,
  499. "Device boot warning - EEPROM failure detected, default settings applied\n");
  500. /* This is a warning so we don't want it to disable the
  501. * device
  502. */
  503. err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL;
  504. }
  505. if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
  506. dev_warn(hdev->dev,
  507. "Device boot warning - Skipped DRAM initialization\n");
  508. /* This is a warning so we don't want it to disable the
  509. * device
  510. */
  511. err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
  512. }
  513. if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
  514. dev_warn(hdev->dev,
  515. "Device boot warning - Failed to load preboot primary image\n");
  516. /* This is a warning so we don't want it to disable the
  517. * device as we have a secondary preboot image
  518. */
  519. err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
  520. }
  521. if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
  522. dev_warn(hdev->dev,
  523. "Device boot warning - TPM failure\n");
  524. /* This is a warning so we don't want it to disable the
  525. * device
  526. */
  527. err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
  528. }
  529. if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
  530. dev_err(hdev->dev,
  531. "Device boot error - unknown ERR0 error 0x%08x\n", err_val);
  532. err_exists = true;
  533. }
  534. /* return error only if it's in the predefined mask */
  535. if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
  536. lower_32_bits(hdev->boot_error_status_mask)))
  537. return true;
  538. return false;
  539. }
  540. /* placeholder for ERR1 as no errors defined there yet */
  541. static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
  542. u32 sts_val)
  543. {
  544. /*
  545. * keep this variable to preserve the logic of the function.
  546. * this way it would require less modifications when error will be
  547. * added to DEV_ERR1
  548. */
  549. bool err_exists = false;
  550. if (!(err_val & CPU_BOOT_ERR1_ENABLED))
  551. return false;
  552. if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
  553. dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
  554. if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
  555. dev_err(hdev->dev,
  556. "Device boot error - unknown ERR1 error 0x%08x\n",
  557. err_val);
  558. err_exists = true;
  559. }
  560. /* return error only if it's in the predefined mask */
  561. if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
  562. upper_32_bits(hdev->boot_error_status_mask)))
  563. return true;
  564. return false;
  565. }
  566. static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
  567. u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
  568. u32 cpu_boot_dev_status1_reg)
  569. {
  570. u32 err_val, status_val;
  571. bool err_exists = false;
  572. /* Some of the firmware status codes are deprecated in newer f/w
  573. * versions. In those versions, the errors are reported
  574. * in different registers. Therefore, we need to check those
  575. * registers and print the exact errors. Moreover, there
  576. * may be multiple errors, so we need to report on each error
  577. * separately. Some of the error codes might indicate a state
  578. * that is not an error per-se, but it is an error in production
  579. * environment
  580. */
  581. err_val = RREG32(boot_err0_reg);
  582. status_val = RREG32(cpu_boot_dev_status0_reg);
  583. err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
  584. err_val = RREG32(boot_err1_reg);
  585. status_val = RREG32(cpu_boot_dev_status1_reg);
  586. err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
  587. if (err_exists)
  588. return -EIO;
  589. return 0;
  590. }
  591. int hl_fw_cpucp_info_get(struct hl_device *hdev,
  592. u32 sts_boot_dev_sts0_reg,
  593. u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
  594. u32 boot_err1_reg)
  595. {
  596. struct asic_fixed_properties *prop = &hdev->asic_prop;
  597. struct cpucp_packet pkt = {};
  598. dma_addr_t cpucp_info_dma_addr;
  599. void *cpucp_info_cpu_addr;
  600. char *kernel_ver;
  601. u64 result;
  602. int rc;
  603. cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
  604. &cpucp_info_dma_addr);
  605. if (!cpucp_info_cpu_addr) {
  606. dev_err(hdev->dev,
  607. "Failed to allocate DMA memory for CPU-CP info packet\n");
  608. return -ENOMEM;
  609. }
  610. memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
  611. pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
  612. CPUCP_PKT_CTL_OPCODE_SHIFT);
  613. pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
  614. pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
  615. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  616. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  617. if (rc) {
  618. dev_err(hdev->dev,
  619. "Failed to handle CPU-CP info pkt, error %d\n", rc);
  620. goto out;
  621. }
  622. rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
  623. sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
  624. if (rc) {
  625. dev_err(hdev->dev, "Errors in device boot\n");
  626. goto out;
  627. }
  628. memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
  629. sizeof(prop->cpucp_info));
  630. rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
  631. if (rc) {
  632. dev_err(hdev->dev,
  633. "Failed to build hwmon channel info, error %d\n", rc);
  634. rc = -EFAULT;
  635. goto out;
  636. }
  637. kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
  638. if (kernel_ver) {
  639. dev_info(hdev->dev, "Linux version %s", kernel_ver);
  640. kfree(kernel_ver);
  641. }
  642. /* assume EQ code doesn't need to check eqe index */
  643. hdev->event_queue.check_eqe_index = false;
  644. /* Read FW application security bits again */
  645. if (prop->fw_cpu_boot_dev_sts0_valid) {
  646. prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
  647. if (prop->fw_app_cpu_boot_dev_sts0 &
  648. CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
  649. hdev->event_queue.check_eqe_index = true;
  650. }
  651. if (prop->fw_cpu_boot_dev_sts1_valid)
  652. prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
  653. out:
  654. hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
  655. return rc;
  656. }
  657. static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
  658. {
  659. struct cpucp_array_data_packet *pkt;
  660. size_t total_pkt_size, data_size;
  661. u64 result;
  662. int rc;
  663. /* skip sending this info for unsupported ASICs */
  664. if (!hdev->asic_funcs->get_msi_info)
  665. return 0;
  666. data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
  667. total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
  668. /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
  669. total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
  670. /* total_pkt_size is casted to u16 later on */
  671. if (total_pkt_size > USHRT_MAX) {
  672. dev_err(hdev->dev, "CPUCP array data is too big\n");
  673. return -EINVAL;
  674. }
  675. pkt = kzalloc(total_pkt_size, GFP_KERNEL);
  676. if (!pkt)
  677. return -ENOMEM;
  678. pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
  679. memset((void *) &pkt->data, 0xFF, data_size);
  680. hdev->asic_funcs->get_msi_info(pkt->data);
  681. pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
  682. CPUCP_PKT_CTL_OPCODE_SHIFT);
  683. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
  684. total_pkt_size, 0, &result);
  685. /*
  686. * in case packet result is invalid it means that FW does not support
  687. * this feature and will use default/hard coded MSI values. no reason
  688. * to stop the boot
  689. */
  690. if (rc && result == cpucp_packet_invalid)
  691. rc = 0;
  692. if (rc)
  693. dev_err(hdev->dev, "failed to send CPUCP array data\n");
  694. kfree(pkt);
  695. return rc;
  696. }
  697. int hl_fw_cpucp_handshake(struct hl_device *hdev,
  698. u32 sts_boot_dev_sts0_reg,
  699. u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
  700. u32 boot_err1_reg)
  701. {
  702. int rc;
  703. rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
  704. sts_boot_dev_sts1_reg, boot_err0_reg,
  705. boot_err1_reg);
  706. if (rc)
  707. return rc;
  708. return hl_fw_send_msi_info_msg(hdev);
  709. }
  710. int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
  711. {
  712. struct cpucp_packet pkt = {};
  713. void *eeprom_info_cpu_addr;
  714. dma_addr_t eeprom_info_dma_addr;
  715. u64 result;
  716. int rc;
  717. eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
  718. &eeprom_info_dma_addr);
  719. if (!eeprom_info_cpu_addr) {
  720. dev_err(hdev->dev,
  721. "Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
  722. return -ENOMEM;
  723. }
  724. memset(eeprom_info_cpu_addr, 0, max_size);
  725. pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
  726. CPUCP_PKT_CTL_OPCODE_SHIFT);
  727. pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
  728. pkt.data_max_size = cpu_to_le32(max_size);
  729. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  730. HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
  731. if (rc) {
  732. dev_err(hdev->dev,
  733. "Failed to handle CPU-CP EEPROM packet, error %d\n",
  734. rc);
  735. goto out;
  736. }
  737. /* result contains the actual size */
  738. memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
  739. out:
  740. hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
  741. return rc;
  742. }
  743. int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
  744. {
  745. struct cpucp_monitor_dump *mon_dump_cpu_addr;
  746. dma_addr_t mon_dump_dma_addr;
  747. struct cpucp_packet pkt = {};
  748. size_t data_size;
  749. __le32 *src_ptr;
  750. u32 *dst_ptr;
  751. u64 result;
  752. int i, rc;
  753. data_size = sizeof(struct cpucp_monitor_dump);
  754. mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
  755. if (!mon_dump_cpu_addr) {
  756. dev_err(hdev->dev,
  757. "Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
  758. return -ENOMEM;
  759. }
  760. memset(mon_dump_cpu_addr, 0, data_size);
  761. pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  762. pkt.addr = cpu_to_le64(mon_dump_dma_addr);
  763. pkt.data_max_size = cpu_to_le32(data_size);
  764. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  765. HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
  766. if (rc) {
  767. dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
  768. goto out;
  769. }
  770. /* result contains the actual size */
  771. src_ptr = (__le32 *) mon_dump_cpu_addr;
  772. dst_ptr = data;
  773. for (i = 0; i < (data_size / sizeof(u32)); i++) {
  774. *dst_ptr = le32_to_cpu(*src_ptr);
  775. src_ptr++;
  776. dst_ptr++;
  777. }
  778. out:
  779. hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
  780. return rc;
  781. }
  782. int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
  783. struct hl_info_pci_counters *counters)
  784. {
  785. struct cpucp_packet pkt = {};
  786. u64 result;
  787. int rc;
  788. pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
  789. CPUCP_PKT_CTL_OPCODE_SHIFT);
  790. /* Fetch PCI rx counter */
  791. pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
  792. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  793. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  794. if (rc) {
  795. dev_err(hdev->dev,
  796. "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
  797. return rc;
  798. }
  799. counters->rx_throughput = result;
  800. memset(&pkt, 0, sizeof(pkt));
  801. pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
  802. CPUCP_PKT_CTL_OPCODE_SHIFT);
  803. /* Fetch PCI tx counter */
  804. pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
  805. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  806. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  807. if (rc) {
  808. dev_err(hdev->dev,
  809. "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
  810. return rc;
  811. }
  812. counters->tx_throughput = result;
  813. /* Fetch PCI replay counter */
  814. memset(&pkt, 0, sizeof(pkt));
  815. pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
  816. CPUCP_PKT_CTL_OPCODE_SHIFT);
  817. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  818. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  819. if (rc) {
  820. dev_err(hdev->dev,
  821. "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
  822. return rc;
  823. }
  824. counters->replay_cnt = (u32) result;
  825. return rc;
  826. }
  827. int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
  828. {
  829. struct cpucp_packet pkt = {};
  830. u64 result;
  831. int rc;
  832. pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
  833. CPUCP_PKT_CTL_OPCODE_SHIFT);
  834. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  835. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  836. if (rc) {
  837. dev_err(hdev->dev,
  838. "Failed to handle CpuCP total energy pkt, error %d\n",
  839. rc);
  840. return rc;
  841. }
  842. *total_energy = result;
  843. return rc;
  844. }
  845. int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
  846. enum pll_index *pll_index)
  847. {
  848. struct asic_fixed_properties *prop = &hdev->asic_prop;
  849. u8 pll_byte, pll_bit_off;
  850. bool dynamic_pll;
  851. int fw_pll_idx;
  852. dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
  853. CPU_BOOT_DEV_STS0_DYN_PLL_EN);
  854. if (!dynamic_pll) {
  855. /*
  856. * in case we are working with legacy FW (each asic has unique
  857. * PLL numbering) use the driver based index as they are
  858. * aligned with fw legacy numbering
  859. */
  860. *pll_index = input_pll_index;
  861. return 0;
  862. }
  863. /* retrieve a FW compatible PLL index based on
  864. * ASIC specific user request
  865. */
  866. fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
  867. if (fw_pll_idx < 0) {
  868. dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
  869. input_pll_index, fw_pll_idx);
  870. return -EINVAL;
  871. }
  872. /* PLL map is a u8 array */
  873. pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
  874. pll_bit_off = fw_pll_idx & 0x7;
  875. if (!(pll_byte & BIT(pll_bit_off))) {
  876. dev_err(hdev->dev, "PLL index %d is not supported\n",
  877. fw_pll_idx);
  878. return -EINVAL;
  879. }
  880. *pll_index = fw_pll_idx;
  881. return 0;
  882. }
  883. int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
  884. u16 *pll_freq_arr)
  885. {
  886. struct cpucp_packet pkt;
  887. enum pll_index used_pll_idx;
  888. u64 result;
  889. int rc;
  890. rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
  891. if (rc)
  892. return rc;
  893. memset(&pkt, 0, sizeof(pkt));
  894. pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
  895. CPUCP_PKT_CTL_OPCODE_SHIFT);
  896. pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
  897. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  898. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  899. if (rc) {
  900. dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
  901. return rc;
  902. }
  903. pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
  904. pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
  905. pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
  906. pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
  907. return 0;
  908. }
  909. int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
  910. {
  911. struct cpucp_packet pkt;
  912. u64 result;
  913. int rc;
  914. memset(&pkt, 0, sizeof(pkt));
  915. pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
  916. CPUCP_PKT_CTL_OPCODE_SHIFT);
  917. pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
  918. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  919. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  920. if (rc) {
  921. dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
  922. return rc;
  923. }
  924. *power = result;
  925. return rc;
  926. }
  927. int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
  928. struct cpucp_hbm_row_info *info)
  929. {
  930. struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
  931. dma_addr_t cpucp_repl_rows_info_dma_addr;
  932. struct cpucp_packet pkt = {};
  933. u64 result;
  934. int rc;
  935. cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
  936. sizeof(struct cpucp_hbm_row_info),
  937. &cpucp_repl_rows_info_dma_addr);
  938. if (!cpucp_repl_rows_info_cpu_addr) {
  939. dev_err(hdev->dev,
  940. "Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
  941. return -ENOMEM;
  942. }
  943. memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
  944. pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
  945. CPUCP_PKT_CTL_OPCODE_SHIFT);
  946. pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
  947. pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
  948. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  949. HL_CPUCP_INFO_TIMEOUT_USEC, &result);
  950. if (rc) {
  951. dev_err(hdev->dev,
  952. "Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
  953. goto out;
  954. }
  955. memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
  956. out:
  957. hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
  958. cpucp_repl_rows_info_cpu_addr);
  959. return rc;
  960. }
  961. int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
  962. {
  963. struct cpucp_packet pkt;
  964. u64 result;
  965. int rc;
  966. memset(&pkt, 0, sizeof(pkt));
  967. pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
  968. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
  969. if (rc) {
  970. dev_err(hdev->dev,
  971. "Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
  972. goto out;
  973. }
  974. *pend_rows_num = (u32) result;
  975. out:
  976. return rc;
  977. }
  978. int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
  979. {
  980. struct cpucp_packet pkt;
  981. int rc;
  982. memset(&pkt, 0, sizeof(pkt));
  983. pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  984. pkt.value = cpu_to_le64(asid);
  985. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  986. HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
  987. if (rc)
  988. dev_err(hdev->dev,
  989. "Failed on ASID configuration request for engine core, error %d\n",
  990. rc);
  991. return rc;
  992. }
  993. void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
  994. {
  995. struct static_fw_load_mgr *static_loader =
  996. &hdev->fw_loader.static_loader;
  997. int rc;
  998. if (hdev->asic_prop.dynamic_fw_load) {
  999. rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
  1000. COMMS_RST_DEV, 0, false,
  1001. hdev->fw_loader.cpu_timeout);
  1002. if (rc)
  1003. dev_warn(hdev->dev, "Failed sending COMMS_RST_DEV\n");
  1004. } else {
  1005. WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
  1006. }
  1007. }
  1008. void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
  1009. {
  1010. struct static_fw_load_mgr *static_loader =
  1011. &hdev->fw_loader.static_loader;
  1012. int rc;
  1013. if (hdev->device_cpu_is_halted)
  1014. return;
  1015. /* Stop device CPU to make sure nothing bad happens */
  1016. if (hdev->asic_prop.dynamic_fw_load) {
  1017. rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
  1018. COMMS_GOTO_WFE, 0, true,
  1019. hdev->fw_loader.cpu_timeout);
  1020. if (rc)
  1021. dev_warn(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
  1022. } else {
  1023. WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
  1024. msleep(static_loader->cpu_reset_wait_msec);
  1025. /* Must clear this register in order to prevent preboot
  1026. * from reading WFE after reboot
  1027. */
  1028. WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
  1029. }
  1030. hdev->device_cpu_is_halted = true;
  1031. }
  1032. static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
  1033. {
  1034. /* Some of the status codes below are deprecated in newer f/w
  1035. * versions but we keep them here for backward compatibility
  1036. */
  1037. switch (status) {
  1038. case CPU_BOOT_STATUS_NA:
  1039. dev_err(hdev->dev,
  1040. "Device boot progress - BTL/ROM did NOT run\n");
  1041. break;
  1042. case CPU_BOOT_STATUS_IN_WFE:
  1043. dev_err(hdev->dev,
  1044. "Device boot progress - Stuck inside WFE loop\n");
  1045. break;
  1046. case CPU_BOOT_STATUS_IN_BTL:
  1047. dev_err(hdev->dev,
  1048. "Device boot progress - Stuck in BTL\n");
  1049. break;
  1050. case CPU_BOOT_STATUS_IN_PREBOOT:
  1051. dev_err(hdev->dev,
  1052. "Device boot progress - Stuck in Preboot\n");
  1053. break;
  1054. case CPU_BOOT_STATUS_IN_SPL:
  1055. dev_err(hdev->dev,
  1056. "Device boot progress - Stuck in SPL\n");
  1057. break;
  1058. case CPU_BOOT_STATUS_IN_UBOOT:
  1059. dev_err(hdev->dev,
  1060. "Device boot progress - Stuck in u-boot\n");
  1061. break;
  1062. case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
  1063. dev_err(hdev->dev,
  1064. "Device boot progress - DRAM initialization failed\n");
  1065. break;
  1066. case CPU_BOOT_STATUS_UBOOT_NOT_READY:
  1067. dev_err(hdev->dev,
  1068. "Device boot progress - Cannot boot\n");
  1069. break;
  1070. case CPU_BOOT_STATUS_TS_INIT_FAIL:
  1071. dev_err(hdev->dev,
  1072. "Device boot progress - Thermal Sensor initialization failed\n");
  1073. break;
  1074. case CPU_BOOT_STATUS_SECURITY_READY:
  1075. dev_err(hdev->dev,
  1076. "Device boot progress - Stuck in preboot after security initialization\n");
  1077. break;
  1078. default:
  1079. dev_err(hdev->dev,
  1080. "Device boot progress - Invalid status code %d\n",
  1081. status);
  1082. break;
  1083. }
  1084. }
  1085. static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
  1086. {
  1087. struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
  1088. u32 status;
  1089. int rc;
  1090. /* Need to check two possible scenarios:
  1091. *
  1092. * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
  1093. * the preboot is waiting for the boot fit
  1094. *
  1095. * All other status values - for older firmwares where the uboot was
  1096. * loaded from the FLASH
  1097. */
  1098. rc = hl_poll_timeout(
  1099. hdev,
  1100. pre_fw_load->cpu_boot_status_reg,
  1101. status,
  1102. (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
  1103. (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
  1104. (status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
  1105. hdev->fw_poll_interval_usec,
  1106. pre_fw_load->wait_for_preboot_timeout);
  1107. if (rc) {
  1108. dev_err(hdev->dev, "CPU boot ready status timeout\n");
  1109. detect_cpu_boot_status(hdev, status);
  1110. /* If we read all FF, then something is totally wrong, no point
  1111. * of reading specific errors
  1112. */
  1113. if (status != -1)
  1114. fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
  1115. pre_fw_load->boot_err1_reg,
  1116. pre_fw_load->sts_boot_dev_sts0_reg,
  1117. pre_fw_load->sts_boot_dev_sts1_reg);
  1118. return -EIO;
  1119. }
  1120. hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
  1121. return 0;
  1122. }
  1123. static int hl_fw_read_preboot_caps(struct hl_device *hdev)
  1124. {
  1125. struct pre_fw_load_props *pre_fw_load;
  1126. struct asic_fixed_properties *prop;
  1127. u32 reg_val;
  1128. int rc;
  1129. prop = &hdev->asic_prop;
  1130. pre_fw_load = &hdev->fw_loader.pre_fw_load;
  1131. rc = hl_fw_wait_preboot_ready(hdev);
  1132. if (rc)
  1133. return rc;
  1134. /*
  1135. * the registers DEV_STS* contain FW capabilities/features.
  1136. * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
  1137. * is set.
  1138. * In the first read of this register we store the value of this
  1139. * register ONLY if the register is enabled (which will be propagated
  1140. * to next stages) and also mark the register as valid.
  1141. * In case it is not enabled the stored value will be left 0- all
  1142. * caps/features are off
  1143. */
  1144. reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
  1145. if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
  1146. prop->fw_cpu_boot_dev_sts0_valid = true;
  1147. prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
  1148. }
  1149. reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
  1150. if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
  1151. prop->fw_cpu_boot_dev_sts1_valid = true;
  1152. prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
  1153. }
  1154. prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
  1155. CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
  1156. /* initialize FW loader once we know what load protocol is used */
  1157. hdev->asic_funcs->init_firmware_loader(hdev);
  1158. dev_dbg(hdev->dev, "Attempting %s FW load\n",
  1159. prop->dynamic_fw_load ? "dynamic" : "legacy");
  1160. return 0;
  1161. }
  1162. static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
  1163. enum hl_fw_component fwc)
  1164. {
  1165. struct asic_fixed_properties *prop = &hdev->asic_prop;
  1166. struct fw_load_mgr *fw_loader = &hdev->fw_loader;
  1167. struct static_fw_load_mgr *static_loader;
  1168. char *dest, *boot_ver, *preboot_ver;
  1169. u32 ver_off, limit;
  1170. const char *name;
  1171. char btl_ver[32];
  1172. static_loader = &hdev->fw_loader.static_loader;
  1173. switch (fwc) {
  1174. case FW_COMP_BOOT_FIT:
  1175. ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
  1176. dest = prop->uboot_ver;
  1177. name = "Boot-fit";
  1178. limit = static_loader->boot_fit_version_max_off;
  1179. break;
  1180. case FW_COMP_PREBOOT:
  1181. ver_off = RREG32(static_loader->preboot_version_offset_reg);
  1182. dest = prop->preboot_ver;
  1183. name = "Preboot";
  1184. limit = static_loader->preboot_version_max_off;
  1185. break;
  1186. default:
  1187. dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
  1188. return -EIO;
  1189. }
  1190. ver_off &= static_loader->sram_offset_mask;
  1191. if (ver_off < limit) {
  1192. memcpy_fromio(dest,
  1193. hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
  1194. VERSION_MAX_LEN);
  1195. } else {
  1196. dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
  1197. name, ver_off);
  1198. strscpy(dest, "unavailable", VERSION_MAX_LEN);
  1199. return -EIO;
  1200. }
  1201. if (fwc == FW_COMP_BOOT_FIT) {
  1202. boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
  1203. if (boot_ver) {
  1204. dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
  1205. kfree(boot_ver);
  1206. }
  1207. } else if (fwc == FW_COMP_PREBOOT) {
  1208. preboot_ver = strnstr(prop->preboot_ver, "Preboot",
  1209. VERSION_MAX_LEN);
  1210. if (preboot_ver && preboot_ver != prop->preboot_ver) {
  1211. strscpy(btl_ver, prop->preboot_ver,
  1212. min((int) (preboot_ver - prop->preboot_ver),
  1213. 31));
  1214. dev_info(hdev->dev, "%s\n", btl_ver);
  1215. }
  1216. preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
  1217. if (preboot_ver) {
  1218. dev_info(hdev->dev, "preboot version %s\n",
  1219. preboot_ver);
  1220. kfree(preboot_ver);
  1221. }
  1222. }
  1223. return 0;
  1224. }
  1225. /**
  1226. * hl_fw_preboot_update_state - update internal data structures during
  1227. * handshake with preboot
  1228. *
  1229. *
  1230. * @hdev: pointer to the habanalabs device structure
  1231. *
  1232. * @return 0 on success, otherwise non-zero error code
  1233. */
  1234. static void hl_fw_preboot_update_state(struct hl_device *hdev)
  1235. {
  1236. struct asic_fixed_properties *prop = &hdev->asic_prop;
  1237. u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
  1238. cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
  1239. cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
  1240. /* We read boot_dev_sts registers multiple times during boot:
  1241. * 1. preboot - a. Check whether the security status bits are valid
  1242. * b. Check whether fw security is enabled
  1243. * c. Check whether hard reset is done by preboot
  1244. * 2. boot cpu - a. Fetch boot cpu security status
  1245. * b. Check whether hard reset is done by boot cpu
  1246. * 3. FW application - a. Fetch fw application security status
  1247. * b. Check whether hard reset is done by fw app
  1248. */
  1249. prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
  1250. prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
  1251. dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
  1252. cpu_boot_dev_sts0);
  1253. dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
  1254. cpu_boot_dev_sts1);
  1255. dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
  1256. prop->hard_reset_done_by_fw ? "enabled" : "disabled");
  1257. dev_dbg(hdev->dev, "firmware-level security is %s\n",
  1258. prop->fw_security_enabled ? "enabled" : "disabled");
  1259. dev_dbg(hdev->dev, "GIC controller is %s\n",
  1260. prop->gic_interrupts_enable ? "enabled" : "disabled");
  1261. }
  1262. static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
  1263. {
  1264. int rc;
  1265. rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
  1266. if (rc)
  1267. return rc;
  1268. return 0;
  1269. }
  1270. int hl_fw_read_preboot_status(struct hl_device *hdev)
  1271. {
  1272. int rc;
  1273. if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
  1274. return 0;
  1275. /* get FW pre-load parameters */
  1276. hdev->asic_funcs->init_firmware_preload_params(hdev);
  1277. /*
  1278. * In order to determine boot method (static VS dynamic) we need to
  1279. * read the boot caps register
  1280. */
  1281. rc = hl_fw_read_preboot_caps(hdev);
  1282. if (rc)
  1283. return rc;
  1284. hl_fw_preboot_update_state(hdev);
  1285. /* no need to read preboot status in dynamic load */
  1286. if (hdev->asic_prop.dynamic_fw_load)
  1287. return 0;
  1288. return hl_fw_static_read_preboot_status(hdev);
  1289. }
  1290. /* associate string with COMM status */
  1291. static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
  1292. [COMMS_STS_NOOP] = "NOOP",
  1293. [COMMS_STS_ACK] = "ACK",
  1294. [COMMS_STS_OK] = "OK",
  1295. [COMMS_STS_ERR] = "ERR",
  1296. [COMMS_STS_VALID_ERR] = "VALID_ERR",
  1297. [COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
  1298. };
  1299. /**
  1300. * hl_fw_dynamic_report_error_status - report error status
  1301. *
  1302. * @hdev: pointer to the habanalabs device structure
  1303. * @status: value of FW status register
  1304. * @expected_status: the expected status
  1305. */
  1306. static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
  1307. u32 status,
  1308. enum comms_sts expected_status)
  1309. {
  1310. enum comms_sts comm_status =
  1311. FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
  1312. if (comm_status < COMMS_STS_INVLD_LAST)
  1313. dev_err(hdev->dev, "Device status %s, expected status: %s\n",
  1314. hl_dynamic_fw_status_str[comm_status],
  1315. hl_dynamic_fw_status_str[expected_status]);
  1316. else
  1317. dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
  1318. comm_status,
  1319. hl_dynamic_fw_status_str[expected_status]);
  1320. }
  1321. /**
  1322. * hl_fw_dynamic_send_cmd - send LKD to FW cmd
  1323. *
  1324. * @hdev: pointer to the habanalabs device structure
  1325. * @fw_loader: managing structure for loading device's FW
  1326. * @cmd: LKD to FW cmd code
  1327. * @size: size of next FW component to be loaded (0 if not necessary)
  1328. *
  1329. * LDK to FW exact command layout is defined at struct comms_command.
  1330. * note: the size argument is used only when the next FW component should be
  1331. * loaded, otherwise it shall be 0. the size is used by the FW in later
  1332. * protocol stages and when sending only indicating the amount of memory
  1333. * to be allocated by the FW to receive the next boot component.
  1334. */
  1335. static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
  1336. struct fw_load_mgr *fw_loader,
  1337. enum comms_cmd cmd, unsigned int size)
  1338. {
  1339. struct cpu_dyn_regs *dyn_regs;
  1340. u32 val;
  1341. dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
  1342. val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
  1343. val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
  1344. WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
  1345. }
  1346. /**
  1347. * hl_fw_dynamic_extract_fw_response - update the FW response
  1348. *
  1349. * @hdev: pointer to the habanalabs device structure
  1350. * @fw_loader: managing structure for loading device's FW
  1351. * @response: FW response
  1352. * @status: the status read from CPU status register
  1353. *
  1354. * @return 0 on success, otherwise non-zero error code
  1355. */
  1356. static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
  1357. struct fw_load_mgr *fw_loader,
  1358. struct fw_response *response,
  1359. u32 status)
  1360. {
  1361. response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
  1362. response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
  1363. COMMS_STATUS_OFFSET_ALIGN_SHIFT;
  1364. response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
  1365. if ((response->ram_type != COMMS_SRAM) &&
  1366. (response->ram_type != COMMS_DRAM)) {
  1367. dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
  1368. response->ram_type);
  1369. return -EIO;
  1370. }
  1371. return 0;
  1372. }
  1373. /**
  1374. * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
  1375. *
  1376. * @hdev: pointer to the habanalabs device structure
  1377. * @fw_loader: managing structure for loading device's FW
  1378. * @expected_status: expected status to wait for
  1379. * @timeout: timeout for status wait
  1380. *
  1381. * @return 0 on success, otherwise non-zero error code
  1382. *
  1383. * waiting for status from FW include polling the FW status register until
  1384. * expected status is received or timeout occurs (whatever occurs first).
  1385. */
  1386. static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
  1387. struct fw_load_mgr *fw_loader,
  1388. enum comms_sts expected_status,
  1389. u32 timeout)
  1390. {
  1391. struct cpu_dyn_regs *dyn_regs;
  1392. u32 status;
  1393. int rc;
  1394. dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
  1395. /* Wait for expected status */
  1396. rc = hl_poll_timeout(
  1397. hdev,
  1398. le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
  1399. status,
  1400. FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
  1401. hdev->fw_comms_poll_interval_usec,
  1402. timeout);
  1403. if (rc) {
  1404. hl_fw_dynamic_report_error_status(hdev, status,
  1405. expected_status);
  1406. return -EIO;
  1407. }
  1408. /*
  1409. * skip storing FW response for NOOP to preserve the actual desired
  1410. * FW status
  1411. */
  1412. if (expected_status == COMMS_STS_NOOP)
  1413. return 0;
  1414. rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
  1415. &fw_loader->dynamic_loader.response,
  1416. status);
  1417. return rc;
  1418. }
  1419. /**
  1420. * hl_fw_dynamic_send_clear_cmd - send clear command to FW
  1421. *
  1422. * @hdev: pointer to the habanalabs device structure
  1423. * @fw_loader: managing structure for loading device's FW
  1424. *
  1425. * @return 0 on success, otherwise non-zero error code
  1426. *
  1427. * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
  1428. * from FW) we need to clear the CPU status register in order to avoid garbage
  1429. * between command cycles.
  1430. * This is done by sending clear command and polling the CPU to LKD status
  1431. * register to hold the status NOOP
  1432. */
  1433. static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
  1434. struct fw_load_mgr *fw_loader)
  1435. {
  1436. hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
  1437. return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
  1438. fw_loader->cpu_timeout);
  1439. }
  1440. /**
  1441. * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
  1442. *
  1443. * @hdev: pointer to the habanalabs device structure
  1444. * @fw_loader: managing structure for loading device's FW
  1445. * @cmd: LKD to FW cmd code
  1446. * @size: size of next FW component to be loaded (0 if not necessary)
  1447. * @wait_ok: if true also wait for OK response from FW
  1448. * @timeout: timeout for status wait
  1449. *
  1450. * @return 0 on success, otherwise non-zero error code
  1451. *
  1452. * brief:
  1453. * when sending protocol command we have the following steps:
  1454. * - send clear (clear command and verify clear status register)
  1455. * - send the actual protocol command
  1456. * - wait for ACK on the protocol command
  1457. * - send clear
  1458. * - send NOOP
  1459. * if, in addition, the specific protocol command should wait for OK then:
  1460. * - wait for OK
  1461. * - send clear
  1462. * - send NOOP
  1463. *
  1464. * NOTES:
  1465. * send clear: this is necessary in order to clear the status register to avoid
  1466. * leftovers between command
  1467. * NOOP command: necessary to avoid loop on the clear command by the FW
  1468. */
  1469. int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
  1470. struct fw_load_mgr *fw_loader,
  1471. enum comms_cmd cmd, unsigned int size,
  1472. bool wait_ok, u32 timeout)
  1473. {
  1474. int rc;
  1475. /* first send clear command to clean former commands */
  1476. rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
  1477. /* send the actual command */
  1478. hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
  1479. /* wait for ACK for the command */
  1480. rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
  1481. timeout);
  1482. if (rc)
  1483. return rc;
  1484. /* clear command to prepare for NOOP command */
  1485. rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
  1486. if (rc)
  1487. return rc;
  1488. /* send the actual NOOP command */
  1489. hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
  1490. if (!wait_ok)
  1491. return 0;
  1492. rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
  1493. timeout);
  1494. if (rc)
  1495. return rc;
  1496. /* clear command to prepare for NOOP command */
  1497. rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
  1498. if (rc)
  1499. return rc;
  1500. /* send the actual NOOP command */
  1501. hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
  1502. return 0;
  1503. }
  1504. /**
  1505. * hl_fw_compat_crc32 - CRC compatible with FW
  1506. *
  1507. * @data: pointer to the data
  1508. * @size: size of the data
  1509. *
  1510. * @return the CRC32 result
  1511. *
  1512. * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
  1513. * in order to be aligned we need to flip the bits of both the input
  1514. * initial CRC and kernel's CRC32 result.
  1515. * in addition both sides use initial CRC of 0,
  1516. */
  1517. static u32 hl_fw_compat_crc32(u8 *data, size_t size)
  1518. {
  1519. return ~crc32_le(~((u32)0), data, size);
  1520. }
  1521. /**
  1522. * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
  1523. * transfer (image or descriptor) between
  1524. * host and FW
  1525. *
  1526. * @hdev: pointer to the habanalabs device structure
  1527. * @addr: device address of memory transfer
  1528. * @size: memory transfer size
  1529. * @region: PCI memory region
  1530. *
  1531. * @return 0 on success, otherwise non-zero error code
  1532. */
  1533. static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
  1534. u64 addr, size_t size,
  1535. struct pci_mem_region *region)
  1536. {
  1537. u64 end_addr;
  1538. /* now make sure that the memory transfer is within region's bounds */
  1539. end_addr = addr + size;
  1540. if (end_addr >= region->region_base + region->region_size) {
  1541. dev_err(hdev->dev,
  1542. "dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
  1543. end_addr);
  1544. return -EIO;
  1545. }
  1546. /*
  1547. * now make sure memory transfer is within predefined BAR bounds.
  1548. * this is to make sure we do not need to set the bar (e.g. for DRAM
  1549. * memory transfers)
  1550. */
  1551. if (end_addr >= region->region_base - region->offset_in_bar +
  1552. region->bar_size) {
  1553. dev_err(hdev->dev,
  1554. "FW image beyond PCI BAR bounds\n");
  1555. return -EIO;
  1556. }
  1557. return 0;
  1558. }
  1559. /**
  1560. * hl_fw_dynamic_validate_descriptor - validate FW descriptor
  1561. *
  1562. * @hdev: pointer to the habanalabs device structure
  1563. * @fw_loader: managing structure for loading device's FW
  1564. * @fw_desc: the descriptor form FW
  1565. *
  1566. * @return 0 on success, otherwise non-zero error code
  1567. */
  1568. static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
  1569. struct fw_load_mgr *fw_loader,
  1570. struct lkd_fw_comms_desc *fw_desc)
  1571. {
  1572. struct pci_mem_region *region;
  1573. enum pci_region region_id;
  1574. size_t data_size;
  1575. u32 data_crc32;
  1576. u8 *data_ptr;
  1577. u64 addr;
  1578. int rc;
  1579. if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
  1580. dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
  1581. fw_desc->header.magic);
  1582. if (fw_desc->header.version != HL_COMMS_DESC_VER)
  1583. dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
  1584. fw_desc->header.version);
  1585. /*
  1586. * Calc CRC32 of data without header. use the size of the descriptor
  1587. * reported by firmware, without calculating it ourself, to allow adding
  1588. * more fields to the lkd_fw_comms_desc structure.
  1589. * note that no alignment/stride address issues here as all structures
  1590. * are 64 bit padded.
  1591. */
  1592. data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
  1593. data_size = le16_to_cpu(fw_desc->header.size);
  1594. data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
  1595. if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
  1596. dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
  1597. data_crc32, fw_desc->header.crc32);
  1598. return -EIO;
  1599. }
  1600. /* find memory region to which to copy the image */
  1601. addr = le64_to_cpu(fw_desc->img_addr);
  1602. region_id = hl_get_pci_memory_region(hdev, addr);
  1603. if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
  1604. dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
  1605. return -EIO;
  1606. }
  1607. region = &hdev->pci_mem_region[region_id];
  1608. /* store the region for the copy stage */
  1609. fw_loader->dynamic_loader.image_region = region;
  1610. /*
  1611. * here we know that the start address is valid, now make sure that the
  1612. * image is within region's bounds
  1613. */
  1614. rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
  1615. fw_loader->dynamic_loader.fw_image_size,
  1616. region);
  1617. if (rc) {
  1618. dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
  1619. return rc;
  1620. }
  1621. /* here we can mark the descriptor as valid as the content has been validated */
  1622. fw_loader->dynamic_loader.fw_desc_valid = true;
  1623. return 0;
  1624. }
  1625. static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
  1626. struct fw_response *response,
  1627. struct pci_mem_region *region)
  1628. {
  1629. u64 device_addr;
  1630. int rc;
  1631. device_addr = region->region_base + response->ram_offset;
  1632. /*
  1633. * validate that the descriptor is within region's bounds
  1634. * Note that as the start address was supplied according to the RAM
  1635. * type- testing only the end address is enough
  1636. */
  1637. rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
  1638. sizeof(struct lkd_fw_comms_desc),
  1639. region);
  1640. return rc;
  1641. }
  1642. /**
  1643. * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
  1644. *
  1645. * @hdev: pointer to the habanalabs device structure
  1646. * @fw_loader: managing structure for loading device's FW
  1647. *
  1648. * @return 0 on success, otherwise non-zero error code
  1649. */
  1650. static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
  1651. struct fw_load_mgr *fw_loader)
  1652. {
  1653. struct lkd_fw_comms_desc *fw_desc;
  1654. struct pci_mem_region *region;
  1655. struct fw_response *response;
  1656. enum pci_region region_id;
  1657. void __iomem *src;
  1658. int rc;
  1659. fw_desc = &fw_loader->dynamic_loader.comm_desc;
  1660. response = &fw_loader->dynamic_loader.response;
  1661. region_id = (response->ram_type == COMMS_SRAM) ?
  1662. PCI_REGION_SRAM : PCI_REGION_DRAM;
  1663. region = &hdev->pci_mem_region[region_id];
  1664. rc = hl_fw_dynamic_validate_response(hdev, response, region);
  1665. if (rc) {
  1666. dev_err(hdev->dev,
  1667. "invalid mem transfer request for FW descriptor\n");
  1668. return rc;
  1669. }
  1670. /*
  1671. * extract address to copy the descriptor from
  1672. * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
  1673. * as invalid.
  1674. * it will be marked again as valid once validated
  1675. */
  1676. fw_loader->dynamic_loader.fw_desc_valid = false;
  1677. src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
  1678. response->ram_offset;
  1679. memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
  1680. return hl_fw_dynamic_validate_descriptor(hdev, fw_loader, fw_desc);
  1681. }
  1682. /**
  1683. * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
  1684. *
  1685. * @hdev: pointer to the habanalabs device structure
  1686. * @fw_loader: managing structure for loading device's FW
  1687. * @next_image_size: size to allocate for next FW component
  1688. *
  1689. * @return 0 on success, otherwise non-zero error code
  1690. */
  1691. static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
  1692. struct fw_load_mgr *fw_loader,
  1693. size_t next_image_size)
  1694. {
  1695. int rc;
  1696. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
  1697. next_image_size, true,
  1698. fw_loader->cpu_timeout);
  1699. if (rc)
  1700. return rc;
  1701. return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
  1702. }
  1703. /**
  1704. * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
  1705. *
  1706. * @hdev: pointer to the habanalabs device structure
  1707. * @fwc: the firmware component
  1708. * @fw_version: fw component's version string
  1709. */
  1710. static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
  1711. enum hl_fw_component fwc,
  1712. const char *fw_version)
  1713. {
  1714. struct asic_fixed_properties *prop = &hdev->asic_prop;
  1715. char *preboot_ver, *boot_ver;
  1716. char btl_ver[32];
  1717. switch (fwc) {
  1718. case FW_COMP_BOOT_FIT:
  1719. strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
  1720. boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
  1721. if (boot_ver) {
  1722. dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
  1723. kfree(boot_ver);
  1724. }
  1725. break;
  1726. case FW_COMP_PREBOOT:
  1727. strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
  1728. preboot_ver = strnstr(prop->preboot_ver, "Preboot",
  1729. VERSION_MAX_LEN);
  1730. if (preboot_ver && preboot_ver != prop->preboot_ver) {
  1731. strscpy(btl_ver, prop->preboot_ver,
  1732. min((int) (preboot_ver - prop->preboot_ver), 31));
  1733. dev_info(hdev->dev, "%s\n", btl_ver);
  1734. }
  1735. preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
  1736. if (preboot_ver) {
  1737. int rc;
  1738. dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
  1739. /* This function takes care of freeing preboot_ver */
  1740. rc = extract_fw_sub_versions(hdev, preboot_ver);
  1741. if (rc)
  1742. return rc;
  1743. }
  1744. break;
  1745. default:
  1746. dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
  1747. return -EINVAL;
  1748. }
  1749. return 0;
  1750. }
  1751. /**
  1752. * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
  1753. *
  1754. * @hdev: pointer to the habanalabs device structure
  1755. * @fw: fw descriptor
  1756. * @fw_loader: managing structure for loading device's FW
  1757. */
  1758. static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
  1759. const struct firmware *fw,
  1760. struct fw_load_mgr *fw_loader)
  1761. {
  1762. struct lkd_fw_comms_desc *fw_desc;
  1763. struct pci_mem_region *region;
  1764. void __iomem *dest;
  1765. u64 addr;
  1766. int rc;
  1767. fw_desc = &fw_loader->dynamic_loader.comm_desc;
  1768. addr = le64_to_cpu(fw_desc->img_addr);
  1769. /* find memory region to which to copy the image */
  1770. region = fw_loader->dynamic_loader.image_region;
  1771. dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
  1772. (addr - region->region_base);
  1773. rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
  1774. fw_loader->boot_fit_img.src_off,
  1775. fw_loader->boot_fit_img.copy_size);
  1776. return rc;
  1777. }
  1778. /**
  1779. * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
  1780. *
  1781. * @hdev: pointer to the habanalabs device structure
  1782. * @msg: message
  1783. * @fw_loader: managing structure for loading device's FW
  1784. */
  1785. static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
  1786. struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
  1787. {
  1788. struct lkd_fw_comms_desc *fw_desc;
  1789. struct pci_mem_region *region;
  1790. void __iomem *dest;
  1791. u64 addr;
  1792. int rc;
  1793. fw_desc = &fw_loader->dynamic_loader.comm_desc;
  1794. addr = le64_to_cpu(fw_desc->img_addr);
  1795. /* find memory region to which to copy the image */
  1796. region = fw_loader->dynamic_loader.image_region;
  1797. dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
  1798. (addr - region->region_base);
  1799. rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
  1800. return rc;
  1801. }
  1802. /**
  1803. * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
  1804. * is loaded
  1805. *
  1806. * @hdev: pointer to the habanalabs device structure
  1807. * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
  1808. * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
  1809. *
  1810. * @return 0 on success, otherwise non-zero error code
  1811. */
  1812. static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
  1813. u32 cpu_boot_dev_sts0_reg,
  1814. u32 cpu_boot_dev_sts1_reg)
  1815. {
  1816. struct asic_fixed_properties *prop = &hdev->asic_prop;
  1817. hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
  1818. /* Read boot_cpu status bits */
  1819. if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
  1820. prop->fw_bootfit_cpu_boot_dev_sts0 =
  1821. RREG32(cpu_boot_dev_sts0_reg);
  1822. prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
  1823. CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
  1824. dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
  1825. prop->fw_bootfit_cpu_boot_dev_sts0);
  1826. }
  1827. if (prop->fw_cpu_boot_dev_sts1_valid) {
  1828. prop->fw_bootfit_cpu_boot_dev_sts1 =
  1829. RREG32(cpu_boot_dev_sts1_reg);
  1830. dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
  1831. prop->fw_bootfit_cpu_boot_dev_sts1);
  1832. }
  1833. dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
  1834. prop->hard_reset_done_by_fw ? "enabled" : "disabled");
  1835. }
  1836. static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
  1837. {
  1838. struct cpu_dyn_regs *dyn_regs =
  1839. &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
  1840. /* Check whether all 3 interrupt interfaces are set, if not use a
  1841. * single interface
  1842. */
  1843. if (!hdev->asic_prop.gic_interrupts_enable &&
  1844. !(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
  1845. CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
  1846. dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
  1847. dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
  1848. dev_warn(hdev->dev,
  1849. "Using a single interrupt interface towards cpucp");
  1850. }
  1851. }
  1852. /**
  1853. * hl_fw_dynamic_load_image - load FW image using dynamic protocol
  1854. *
  1855. * @hdev: pointer to the habanalabs device structure
  1856. * @fw_loader: managing structure for loading device's FW
  1857. * @load_fwc: the FW component to be loaded
  1858. * @img_ld_timeout: image load timeout
  1859. *
  1860. * @return 0 on success, otherwise non-zero error code
  1861. */
  1862. static int hl_fw_dynamic_load_image(struct hl_device *hdev,
  1863. struct fw_load_mgr *fw_loader,
  1864. enum hl_fw_component load_fwc,
  1865. u32 img_ld_timeout)
  1866. {
  1867. enum hl_fw_component cur_fwc;
  1868. const struct firmware *fw;
  1869. char *fw_name;
  1870. int rc = 0;
  1871. /*
  1872. * when loading image we have one of 2 scenarios:
  1873. * 1. current FW component is preboot and we want to load boot-fit
  1874. * 2. current FW component is boot-fit and we want to load linux
  1875. */
  1876. if (load_fwc == FW_COMP_BOOT_FIT) {
  1877. cur_fwc = FW_COMP_PREBOOT;
  1878. fw_name = fw_loader->boot_fit_img.image_name;
  1879. } else {
  1880. cur_fwc = FW_COMP_BOOT_FIT;
  1881. fw_name = fw_loader->linux_img.image_name;
  1882. }
  1883. /* request FW in order to communicate to FW the size to be allocated */
  1884. rc = hl_request_fw(hdev, &fw, fw_name);
  1885. if (rc)
  1886. return rc;
  1887. /* store the image size for future validation */
  1888. fw_loader->dynamic_loader.fw_image_size = fw->size;
  1889. rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
  1890. if (rc)
  1891. goto release_fw;
  1892. /* read preboot version */
  1893. rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
  1894. fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
  1895. if (rc)
  1896. goto release_fw;
  1897. /* update state according to boot stage */
  1898. if (cur_fwc == FW_COMP_BOOT_FIT) {
  1899. struct cpu_dyn_regs *dyn_regs;
  1900. dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
  1901. hl_fw_boot_fit_update_state(hdev,
  1902. le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
  1903. le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
  1904. }
  1905. /* copy boot fit to space allocated by FW */
  1906. rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
  1907. if (rc)
  1908. goto release_fw;
  1909. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
  1910. 0, true,
  1911. fw_loader->cpu_timeout);
  1912. if (rc)
  1913. goto release_fw;
  1914. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
  1915. 0, false,
  1916. img_ld_timeout);
  1917. release_fw:
  1918. hl_release_firmware(fw);
  1919. return rc;
  1920. }
  1921. static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
  1922. struct fw_load_mgr *fw_loader)
  1923. {
  1924. struct dynamic_fw_load_mgr *dyn_loader;
  1925. u32 status;
  1926. int rc;
  1927. dyn_loader = &fw_loader->dynamic_loader;
  1928. /*
  1929. * Make sure CPU boot-loader is running
  1930. * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
  1931. * yet there is a debug scenario in which we loading uboot (without Linux)
  1932. * which at later stage is relocated to DRAM. In this case we expect
  1933. * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
  1934. * poll flags
  1935. */
  1936. rc = hl_poll_timeout(
  1937. hdev,
  1938. le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
  1939. status,
  1940. (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
  1941. (status == CPU_BOOT_STATUS_SRAM_AVAIL),
  1942. hdev->fw_poll_interval_usec,
  1943. dyn_loader->wait_for_bl_timeout);
  1944. if (rc) {
  1945. dev_err(hdev->dev, "failed to wait for boot\n");
  1946. return rc;
  1947. }
  1948. dev_dbg(hdev->dev, "uboot status = %d\n", status);
  1949. return 0;
  1950. }
  1951. static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
  1952. struct fw_load_mgr *fw_loader)
  1953. {
  1954. struct dynamic_fw_load_mgr *dyn_loader;
  1955. u32 status;
  1956. int rc;
  1957. dyn_loader = &fw_loader->dynamic_loader;
  1958. /* Make sure CPU linux is running */
  1959. rc = hl_poll_timeout(
  1960. hdev,
  1961. le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
  1962. status,
  1963. (status == CPU_BOOT_STATUS_SRAM_AVAIL),
  1964. hdev->fw_poll_interval_usec,
  1965. fw_loader->cpu_timeout);
  1966. if (rc) {
  1967. dev_err(hdev->dev, "failed to wait for Linux\n");
  1968. return rc;
  1969. }
  1970. dev_dbg(hdev->dev, "Boot status = %d\n", status);
  1971. return 0;
  1972. }
  1973. /**
  1974. * hl_fw_linux_update_state - update internal data structures after Linux
  1975. * is loaded.
  1976. * Note: Linux initialization is comprised mainly
  1977. * of two stages - loading kernel (SRAM_AVAIL)
  1978. * & loading ARMCP.
  1979. * Therefore reading boot device status in any of
  1980. * these stages might result in different values.
  1981. *
  1982. * @hdev: pointer to the habanalabs device structure
  1983. * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
  1984. * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
  1985. *
  1986. * @return 0 on success, otherwise non-zero error code
  1987. */
  1988. static void hl_fw_linux_update_state(struct hl_device *hdev,
  1989. u32 cpu_boot_dev_sts0_reg,
  1990. u32 cpu_boot_dev_sts1_reg)
  1991. {
  1992. struct asic_fixed_properties *prop = &hdev->asic_prop;
  1993. hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
  1994. /* Read FW application security bits */
  1995. if (prop->fw_cpu_boot_dev_sts0_valid) {
  1996. prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
  1997. prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
  1998. CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
  1999. if (prop->fw_app_cpu_boot_dev_sts0 &
  2000. CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
  2001. prop->gic_interrupts_enable = false;
  2002. dev_dbg(hdev->dev,
  2003. "Firmware application CPU status0 %#x\n",
  2004. prop->fw_app_cpu_boot_dev_sts0);
  2005. dev_dbg(hdev->dev, "GIC controller is %s\n",
  2006. prop->gic_interrupts_enable ?
  2007. "enabled" : "disabled");
  2008. }
  2009. if (prop->fw_cpu_boot_dev_sts1_valid) {
  2010. prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
  2011. dev_dbg(hdev->dev,
  2012. "Firmware application CPU status1 %#x\n",
  2013. prop->fw_app_cpu_boot_dev_sts1);
  2014. }
  2015. dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
  2016. prop->hard_reset_done_by_fw ? "enabled" : "disabled");
  2017. dev_info(hdev->dev, "Successfully loaded firmware to device\n");
  2018. }
  2019. /**
  2020. * hl_fw_dynamic_send_msg - send a COMMS message with attached data
  2021. *
  2022. * @hdev: pointer to the habanalabs device structure
  2023. * @fw_loader: managing structure for loading device's FW
  2024. * @msg_type: message type
  2025. * @data: data to be sent
  2026. *
  2027. * @return 0 on success, otherwise non-zero error code
  2028. */
  2029. static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
  2030. struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
  2031. {
  2032. struct lkd_msg_comms msg;
  2033. int rc;
  2034. memset(&msg, 0, sizeof(msg));
  2035. /* create message to be sent */
  2036. msg.header.type = msg_type;
  2037. msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
  2038. msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
  2039. switch (msg_type) {
  2040. case HL_COMMS_RESET_CAUSE_TYPE:
  2041. msg.reset_cause = *(__u8 *) data;
  2042. break;
  2043. default:
  2044. dev_err(hdev->dev,
  2045. "Send COMMS message - invalid message type %u\n",
  2046. msg_type);
  2047. return -EINVAL;
  2048. }
  2049. rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
  2050. sizeof(struct lkd_msg_comms));
  2051. if (rc)
  2052. return rc;
  2053. /* copy message to space allocated by FW */
  2054. rc = hl_fw_dynamic_copy_msg(hdev, &msg, fw_loader);
  2055. if (rc)
  2056. return rc;
  2057. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
  2058. 0, true,
  2059. fw_loader->cpu_timeout);
  2060. if (rc)
  2061. return rc;
  2062. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
  2063. 0, true,
  2064. fw_loader->cpu_timeout);
  2065. if (rc)
  2066. return rc;
  2067. return 0;
  2068. }
  2069. /**
  2070. * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
  2071. *
  2072. * @hdev: pointer to the habanalabs device structure
  2073. * @fw_loader: managing structure for loading device's FW
  2074. *
  2075. * @return 0 on success, otherwise non-zero error code
  2076. *
  2077. * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
  2078. * the communication is done using registers:
  2079. * - LKD command register
  2080. * - FW status register
  2081. * the protocol is race free. this goal is achieved by splitting the requests
  2082. * and response to known synchronization points between the LKD and the FW.
  2083. * each response to LKD request is known and bound to a predefined timeout.
  2084. * in case of timeout expiration without the desired status from FW- the
  2085. * protocol (and hence the boot) will fail.
  2086. */
  2087. static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
  2088. struct fw_load_mgr *fw_loader)
  2089. {
  2090. struct cpu_dyn_regs *dyn_regs;
  2091. int rc;
  2092. dev_info(hdev->dev,
  2093. "Loading %sfirmware to device, may take some time...\n",
  2094. hdev->asic_prop.fw_security_enabled ? "secured " : "");
  2095. /* initialize FW descriptor as invalid */
  2096. fw_loader->dynamic_loader.fw_desc_valid = false;
  2097. /*
  2098. * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
  2099. * It will be updated from FW after hl_fw_dynamic_request_descriptor().
  2100. */
  2101. dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
  2102. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
  2103. 0, true,
  2104. fw_loader->cpu_timeout);
  2105. if (rc)
  2106. goto protocol_err;
  2107. if (hdev->reset_info.curr_reset_cause) {
  2108. rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
  2109. HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
  2110. if (rc)
  2111. goto protocol_err;
  2112. /* Clear current reset cause */
  2113. hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
  2114. }
  2115. if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
  2116. rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
  2117. if (rc)
  2118. goto protocol_err;
  2119. /* read preboot version */
  2120. return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
  2121. fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
  2122. }
  2123. /* load boot fit to FW */
  2124. rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
  2125. fw_loader->boot_fit_timeout);
  2126. if (rc) {
  2127. dev_err(hdev->dev, "failed to load boot fit\n");
  2128. goto protocol_err;
  2129. }
  2130. /*
  2131. * when testing FW load (without Linux) on PLDM we don't want to
  2132. * wait until boot fit is active as it may take several hours.
  2133. * instead, we load the bootfit and let it do all initialization in
  2134. * the background.
  2135. */
  2136. if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
  2137. return 0;
  2138. rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
  2139. if (rc)
  2140. goto protocol_err;
  2141. /* Enable DRAM scrambling before Linux boot and after successful
  2142. * UBoot
  2143. */
  2144. hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
  2145. if (!(hdev->fw_components & FW_TYPE_LINUX)) {
  2146. dev_info(hdev->dev, "Skip loading Linux F/W\n");
  2147. return 0;
  2148. }
  2149. if (fw_loader->skip_bmc) {
  2150. rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
  2151. COMMS_SKIP_BMC, 0,
  2152. true,
  2153. fw_loader->cpu_timeout);
  2154. if (rc) {
  2155. dev_err(hdev->dev, "failed to load boot fit\n");
  2156. goto protocol_err;
  2157. }
  2158. }
  2159. /* load Linux image to FW */
  2160. rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
  2161. fw_loader->cpu_timeout);
  2162. if (rc) {
  2163. dev_err(hdev->dev, "failed to load Linux\n");
  2164. goto protocol_err;
  2165. }
  2166. rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
  2167. if (rc)
  2168. goto protocol_err;
  2169. hl_fw_linux_update_state(hdev, le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
  2170. le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
  2171. hl_fw_dynamic_update_linux_interrupt_if(hdev);
  2172. return 0;
  2173. protocol_err:
  2174. if (fw_loader->dynamic_loader.fw_desc_valid)
  2175. fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
  2176. le32_to_cpu(dyn_regs->cpu_boot_err1),
  2177. le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
  2178. le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
  2179. return rc;
  2180. }
  2181. /**
  2182. * hl_fw_static_init_cpu - initialize the device CPU using static protocol
  2183. *
  2184. * @hdev: pointer to the habanalabs device structure
  2185. * @fw_loader: managing structure for loading device's FW
  2186. *
  2187. * @return 0 on success, otherwise non-zero error code
  2188. */
  2189. static int hl_fw_static_init_cpu(struct hl_device *hdev,
  2190. struct fw_load_mgr *fw_loader)
  2191. {
  2192. u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
  2193. u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
  2194. struct static_fw_load_mgr *static_loader;
  2195. u32 cpu_boot_status_reg;
  2196. int rc;
  2197. if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
  2198. return 0;
  2199. /* init common loader parameters */
  2200. cpu_timeout = fw_loader->cpu_timeout;
  2201. /* init static loader parameters */
  2202. static_loader = &fw_loader->static_loader;
  2203. cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
  2204. msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
  2205. cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
  2206. cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
  2207. cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
  2208. dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
  2209. cpu_timeout / USEC_PER_SEC);
  2210. /* Wait for boot FIT request */
  2211. rc = hl_poll_timeout(
  2212. hdev,
  2213. cpu_boot_status_reg,
  2214. status,
  2215. status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
  2216. hdev->fw_poll_interval_usec,
  2217. fw_loader->boot_fit_timeout);
  2218. if (rc) {
  2219. dev_dbg(hdev->dev,
  2220. "No boot fit request received, resuming boot\n");
  2221. } else {
  2222. rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
  2223. if (rc)
  2224. goto out;
  2225. /* Clear device CPU message status */
  2226. WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
  2227. /* Signal device CPU that boot loader is ready */
  2228. WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
  2229. /* Poll for CPU device ack */
  2230. rc = hl_poll_timeout(
  2231. hdev,
  2232. cpu_msg_status_reg,
  2233. status,
  2234. status == CPU_MSG_OK,
  2235. hdev->fw_poll_interval_usec,
  2236. fw_loader->boot_fit_timeout);
  2237. if (rc) {
  2238. dev_err(hdev->dev,
  2239. "Timeout waiting for boot fit load ack\n");
  2240. goto out;
  2241. }
  2242. /* Clear message */
  2243. WREG32(msg_to_cpu_reg, KMD_MSG_NA);
  2244. }
  2245. /*
  2246. * Make sure CPU boot-loader is running
  2247. * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
  2248. * yet there is a debug scenario in which we loading uboot (without Linux)
  2249. * which at later stage is relocated to DRAM. In this case we expect
  2250. * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
  2251. * poll flags
  2252. */
  2253. rc = hl_poll_timeout(
  2254. hdev,
  2255. cpu_boot_status_reg,
  2256. status,
  2257. (status == CPU_BOOT_STATUS_DRAM_RDY) ||
  2258. (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
  2259. (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
  2260. (status == CPU_BOOT_STATUS_SRAM_AVAIL),
  2261. hdev->fw_poll_interval_usec,
  2262. cpu_timeout);
  2263. dev_dbg(hdev->dev, "uboot status = %d\n", status);
  2264. /* Read U-Boot version now in case we will later fail */
  2265. hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
  2266. /* update state according to boot stage */
  2267. hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
  2268. cpu_boot_dev_status1_reg);
  2269. if (rc) {
  2270. detect_cpu_boot_status(hdev, status);
  2271. rc = -EIO;
  2272. goto out;
  2273. }
  2274. /* Enable DRAM scrambling before Linux boot and after successful
  2275. * UBoot
  2276. */
  2277. hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
  2278. if (!(hdev->fw_components & FW_TYPE_LINUX)) {
  2279. dev_info(hdev->dev, "Skip loading Linux F/W\n");
  2280. rc = 0;
  2281. goto out;
  2282. }
  2283. if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
  2284. rc = 0;
  2285. goto out;
  2286. }
  2287. dev_info(hdev->dev,
  2288. "Loading firmware to device, may take some time...\n");
  2289. rc = hdev->asic_funcs->load_firmware_to_device(hdev);
  2290. if (rc)
  2291. goto out;
  2292. if (fw_loader->skip_bmc) {
  2293. WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
  2294. rc = hl_poll_timeout(
  2295. hdev,
  2296. cpu_boot_status_reg,
  2297. status,
  2298. (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
  2299. hdev->fw_poll_interval_usec,
  2300. cpu_timeout);
  2301. if (rc) {
  2302. dev_err(hdev->dev,
  2303. "Failed to get ACK on skipping BMC, %d\n",
  2304. status);
  2305. WREG32(msg_to_cpu_reg, KMD_MSG_NA);
  2306. rc = -EIO;
  2307. goto out;
  2308. }
  2309. }
  2310. WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
  2311. rc = hl_poll_timeout(
  2312. hdev,
  2313. cpu_boot_status_reg,
  2314. status,
  2315. (status == CPU_BOOT_STATUS_SRAM_AVAIL),
  2316. hdev->fw_poll_interval_usec,
  2317. cpu_timeout);
  2318. /* Clear message */
  2319. WREG32(msg_to_cpu_reg, KMD_MSG_NA);
  2320. if (rc) {
  2321. if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
  2322. dev_err(hdev->dev,
  2323. "Device reports FIT image is corrupted\n");
  2324. else
  2325. dev_err(hdev->dev,
  2326. "Failed to load firmware to device, %d\n",
  2327. status);
  2328. rc = -EIO;
  2329. goto out;
  2330. }
  2331. rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
  2332. fw_loader->static_loader.boot_err1_reg,
  2333. cpu_boot_dev_status0_reg,
  2334. cpu_boot_dev_status1_reg);
  2335. if (rc)
  2336. return rc;
  2337. hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
  2338. cpu_boot_dev_status1_reg);
  2339. return 0;
  2340. out:
  2341. fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
  2342. fw_loader->static_loader.boot_err1_reg,
  2343. cpu_boot_dev_status0_reg,
  2344. cpu_boot_dev_status1_reg);
  2345. return rc;
  2346. }
  2347. /**
  2348. * hl_fw_init_cpu - initialize the device CPU
  2349. *
  2350. * @hdev: pointer to the habanalabs device structure
  2351. *
  2352. * @return 0 on success, otherwise non-zero error code
  2353. *
  2354. * perform necessary initializations for device's CPU. takes into account if
  2355. * init protocol is static or dynamic.
  2356. */
  2357. int hl_fw_init_cpu(struct hl_device *hdev)
  2358. {
  2359. struct asic_fixed_properties *prop = &hdev->asic_prop;
  2360. struct fw_load_mgr *fw_loader = &hdev->fw_loader;
  2361. return prop->dynamic_fw_load ?
  2362. hl_fw_dynamic_init_cpu(hdev, fw_loader) :
  2363. hl_fw_static_init_cpu(hdev, fw_loader);
  2364. }
  2365. void hl_fw_set_pll_profile(struct hl_device *hdev)
  2366. {
  2367. hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
  2368. hdev->asic_prop.max_freq_value);
  2369. }
  2370. int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
  2371. {
  2372. long value;
  2373. if (!hl_device_operational(hdev, NULL))
  2374. return -ENODEV;
  2375. if (!hdev->pdev) {
  2376. *cur_clk = 0;
  2377. *max_clk = 0;
  2378. return 0;
  2379. }
  2380. value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
  2381. if (value < 0) {
  2382. dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
  2383. return value;
  2384. }
  2385. *max_clk = (value / 1000 / 1000);
  2386. value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
  2387. if (value < 0) {
  2388. dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
  2389. return value;
  2390. }
  2391. *cur_clk = (value / 1000 / 1000);
  2392. return 0;
  2393. }
  2394. long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
  2395. {
  2396. struct cpucp_packet pkt;
  2397. u32 used_pll_idx;
  2398. u64 result;
  2399. int rc;
  2400. rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
  2401. if (rc)
  2402. return rc;
  2403. memset(&pkt, 0, sizeof(pkt));
  2404. if (curr)
  2405. pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
  2406. CPUCP_PKT_CTL_OPCODE_SHIFT);
  2407. else
  2408. pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  2409. pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
  2410. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
  2411. if (rc) {
  2412. dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
  2413. used_pll_idx, rc);
  2414. return rc;
  2415. }
  2416. return (long) result;
  2417. }
  2418. void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
  2419. {
  2420. struct cpucp_packet pkt;
  2421. u32 used_pll_idx;
  2422. int rc;
  2423. rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
  2424. if (rc)
  2425. return;
  2426. memset(&pkt, 0, sizeof(pkt));
  2427. pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  2428. pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
  2429. pkt.value = cpu_to_le64(freq);
  2430. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
  2431. if (rc)
  2432. dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
  2433. used_pll_idx, rc);
  2434. }
  2435. long hl_fw_get_max_power(struct hl_device *hdev)
  2436. {
  2437. struct cpucp_packet pkt;
  2438. u64 result;
  2439. int rc;
  2440. memset(&pkt, 0, sizeof(pkt));
  2441. pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  2442. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
  2443. if (rc) {
  2444. dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
  2445. return rc;
  2446. }
  2447. return result;
  2448. }
  2449. void hl_fw_set_max_power(struct hl_device *hdev)
  2450. {
  2451. struct cpucp_packet pkt;
  2452. int rc;
  2453. /* TODO: remove this after simulator supports this packet */
  2454. if (!hdev->pdev)
  2455. return;
  2456. memset(&pkt, 0, sizeof(pkt));
  2457. pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
  2458. pkt.value = cpu_to_le64(hdev->max_power);
  2459. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
  2460. if (rc)
  2461. dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
  2462. }
  2463. static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
  2464. u32 nonce, u32 timeout)
  2465. {
  2466. struct cpucp_packet pkt = {};
  2467. dma_addr_t req_dma_addr;
  2468. void *req_cpu_addr;
  2469. int rc;
  2470. req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
  2471. if (!req_cpu_addr) {
  2472. dev_err(hdev->dev,
  2473. "Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
  2474. return -ENOMEM;
  2475. }
  2476. memset(data, 0, size);
  2477. pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
  2478. pkt.addr = cpu_to_le64(req_dma_addr);
  2479. pkt.data_max_size = cpu_to_le32(size);
  2480. pkt.nonce = cpu_to_le32(nonce);
  2481. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  2482. timeout, NULL);
  2483. if (rc) {
  2484. dev_err(hdev->dev,
  2485. "Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
  2486. goto out;
  2487. }
  2488. memcpy(data, req_cpu_addr, size);
  2489. out:
  2490. hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
  2491. return rc;
  2492. }
  2493. int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
  2494. u32 nonce)
  2495. {
  2496. return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
  2497. sizeof(struct cpucp_sec_attest_info), nonce,
  2498. HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
  2499. }