debugfs.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2016-2021 HabanaLabs, Ltd.
  4. * All Rights Reserved.
  5. */
  6. #include "habanalabs.h"
  7. #include "../include/hw_ip/mmu/mmu_general.h"
  8. #include <linux/pci.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/iommu.h>
  12. #define MMU_ADDR_BUF_SIZE 40
  13. #define MMU_ASID_BUF_SIZE 10
  14. #define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
  15. #define I2C_MAX_TRANSACTION_LEN 8
  16. static struct dentry *hl_debug_root;
  17. static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
  18. u8 i2c_reg, u8 i2c_len, u64 *val)
  19. {
  20. struct cpucp_packet pkt;
  21. int rc;
  22. if (!hl_device_operational(hdev, NULL))
  23. return -EBUSY;
  24. if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
  25. dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
  26. i2c_len, I2C_MAX_TRANSACTION_LEN);
  27. return -EINVAL;
  28. }
  29. memset(&pkt, 0, sizeof(pkt));
  30. pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
  31. CPUCP_PKT_CTL_OPCODE_SHIFT);
  32. pkt.i2c_bus = i2c_bus;
  33. pkt.i2c_addr = i2c_addr;
  34. pkt.i2c_reg = i2c_reg;
  35. pkt.i2c_len = i2c_len;
  36. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  37. 0, val);
  38. if (rc)
  39. dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
  40. return rc;
  41. }
  42. static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
  43. u8 i2c_reg, u8 i2c_len, u64 val)
  44. {
  45. struct cpucp_packet pkt;
  46. int rc;
  47. if (!hl_device_operational(hdev, NULL))
  48. return -EBUSY;
  49. if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
  50. dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
  51. i2c_len, I2C_MAX_TRANSACTION_LEN);
  52. return -EINVAL;
  53. }
  54. memset(&pkt, 0, sizeof(pkt));
  55. pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
  56. CPUCP_PKT_CTL_OPCODE_SHIFT);
  57. pkt.i2c_bus = i2c_bus;
  58. pkt.i2c_addr = i2c_addr;
  59. pkt.i2c_reg = i2c_reg;
  60. pkt.i2c_len = i2c_len;
  61. pkt.value = cpu_to_le64(val);
  62. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  63. 0, NULL);
  64. if (rc)
  65. dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
  66. return rc;
  67. }
  68. static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
  69. {
  70. struct cpucp_packet pkt;
  71. int rc;
  72. if (!hl_device_operational(hdev, NULL))
  73. return;
  74. memset(&pkt, 0, sizeof(pkt));
  75. pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
  76. CPUCP_PKT_CTL_OPCODE_SHIFT);
  77. pkt.led_index = cpu_to_le32(led);
  78. pkt.value = cpu_to_le64(state);
  79. rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
  80. 0, NULL);
  81. if (rc)
  82. dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
  83. }
  84. static int command_buffers_show(struct seq_file *s, void *data)
  85. {
  86. struct hl_debugfs_entry *entry = s->private;
  87. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  88. struct hl_cb *cb;
  89. bool first = true;
  90. spin_lock(&dev_entry->cb_spinlock);
  91. list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
  92. if (first) {
  93. first = false;
  94. seq_puts(s, "\n");
  95. seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n");
  96. seq_puts(s, "---------------------------------------------------------------\n");
  97. }
  98. seq_printf(s,
  99. " %03llu %d 0x%08x %d %d %d\n",
  100. cb->buf->handle, cb->ctx->asid, cb->size,
  101. kref_read(&cb->buf->refcount),
  102. atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
  103. }
  104. spin_unlock(&dev_entry->cb_spinlock);
  105. if (!first)
  106. seq_puts(s, "\n");
  107. return 0;
  108. }
  109. static int command_submission_show(struct seq_file *s, void *data)
  110. {
  111. struct hl_debugfs_entry *entry = s->private;
  112. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  113. struct hl_cs *cs;
  114. bool first = true;
  115. spin_lock(&dev_entry->cs_spinlock);
  116. list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
  117. if (first) {
  118. first = false;
  119. seq_puts(s, "\n");
  120. seq_puts(s, " CS ID CS TYPE CTX ASID CS RefCnt Submitted Completed\n");
  121. seq_puts(s, "----------------------------------------------------------------\n");
  122. }
  123. seq_printf(s,
  124. " %llu %d %d %d %d %d\n",
  125. cs->sequence, cs->type, cs->ctx->asid,
  126. kref_read(&cs->refcount),
  127. cs->submitted, cs->completed);
  128. }
  129. spin_unlock(&dev_entry->cs_spinlock);
  130. if (!first)
  131. seq_puts(s, "\n");
  132. return 0;
  133. }
  134. static int command_submission_jobs_show(struct seq_file *s, void *data)
  135. {
  136. struct hl_debugfs_entry *entry = s->private;
  137. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  138. struct hl_cs_job *job;
  139. bool first = true;
  140. spin_lock(&dev_entry->cs_job_spinlock);
  141. list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
  142. if (first) {
  143. first = false;
  144. seq_puts(s, "\n");
  145. seq_puts(s, " JOB ID CS ID CS TYPE CTX ASID JOB RefCnt H/W Queue\n");
  146. seq_puts(s, "---------------------------------------------------------------\n");
  147. }
  148. if (job->cs)
  149. seq_printf(s,
  150. " %02d %llu %d %d %d %d\n",
  151. job->id, job->cs->sequence, job->cs->type,
  152. job->cs->ctx->asid, kref_read(&job->refcount),
  153. job->hw_queue_id);
  154. else
  155. seq_printf(s,
  156. " %02d 0 0 %d %d %d\n",
  157. job->id, HL_KERNEL_ASID_ID,
  158. kref_read(&job->refcount), job->hw_queue_id);
  159. }
  160. spin_unlock(&dev_entry->cs_job_spinlock);
  161. if (!first)
  162. seq_puts(s, "\n");
  163. return 0;
  164. }
  165. static int userptr_show(struct seq_file *s, void *data)
  166. {
  167. struct hl_debugfs_entry *entry = s->private;
  168. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  169. struct hl_userptr *userptr;
  170. char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
  171. "DMA_FROM_DEVICE", "DMA_NONE"};
  172. bool first = true;
  173. spin_lock(&dev_entry->userptr_spinlock);
  174. list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
  175. if (first) {
  176. first = false;
  177. seq_puts(s, "\n");
  178. seq_puts(s, " pid user virtual address size dma dir\n");
  179. seq_puts(s, "----------------------------------------------------------\n");
  180. }
  181. seq_printf(s, " %-7d 0x%-14llx %-10llu %-30s\n",
  182. userptr->pid, userptr->addr, userptr->size,
  183. dma_dir[userptr->dir]);
  184. }
  185. spin_unlock(&dev_entry->userptr_spinlock);
  186. if (!first)
  187. seq_puts(s, "\n");
  188. return 0;
  189. }
  190. static int vm_show(struct seq_file *s, void *data)
  191. {
  192. struct hl_debugfs_entry *entry = s->private;
  193. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  194. struct hl_vm_hw_block_list_node *lnode;
  195. struct hl_ctx *ctx;
  196. struct hl_vm *vm;
  197. struct hl_vm_hash_node *hnode;
  198. struct hl_userptr *userptr;
  199. struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
  200. struct hl_va_range *va_range;
  201. struct hl_vm_va_block *va_block;
  202. enum vm_type *vm_type;
  203. bool once = true;
  204. u64 j;
  205. int i;
  206. if (!dev_entry->hdev->mmu_enable)
  207. return 0;
  208. spin_lock(&dev_entry->ctx_mem_hash_spinlock);
  209. list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
  210. once = false;
  211. seq_puts(s, "\n\n----------------------------------------------------");
  212. seq_puts(s, "\n----------------------------------------------------\n\n");
  213. seq_printf(s, "ctx asid: %u\n", ctx->asid);
  214. seq_puts(s, "\nmappings:\n\n");
  215. seq_puts(s, " virtual address size handle\n");
  216. seq_puts(s, "----------------------------------------------------\n");
  217. mutex_lock(&ctx->mem_hash_lock);
  218. hash_for_each(ctx->mem_hash, i, hnode, node) {
  219. vm_type = hnode->ptr;
  220. if (*vm_type == VM_TYPE_USERPTR) {
  221. userptr = hnode->ptr;
  222. seq_printf(s,
  223. " 0x%-14llx %-10llu\n",
  224. hnode->vaddr, userptr->size);
  225. } else {
  226. phys_pg_pack = hnode->ptr;
  227. seq_printf(s,
  228. " 0x%-14llx %-10llu %-4u\n",
  229. hnode->vaddr, phys_pg_pack->total_size,
  230. phys_pg_pack->handle);
  231. }
  232. }
  233. mutex_unlock(&ctx->mem_hash_lock);
  234. if (ctx->asid != HL_KERNEL_ASID_ID &&
  235. !list_empty(&ctx->hw_block_mem_list)) {
  236. seq_puts(s, "\nhw_block mappings:\n\n");
  237. seq_puts(s,
  238. " virtual address block size mapped size HW block id\n");
  239. seq_puts(s,
  240. "---------------------------------------------------------------\n");
  241. mutex_lock(&ctx->hw_block_list_lock);
  242. list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
  243. seq_printf(s,
  244. " 0x%-14lx %-6u %-6u %-9u\n",
  245. lnode->vaddr, lnode->block_size, lnode->mapped_size,
  246. lnode->id);
  247. }
  248. mutex_unlock(&ctx->hw_block_list_lock);
  249. }
  250. vm = &ctx->hdev->vm;
  251. spin_lock(&vm->idr_lock);
  252. if (!idr_is_empty(&vm->phys_pg_pack_handles))
  253. seq_puts(s, "\n\nallocations:\n");
  254. idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
  255. if (phys_pg_pack->asid != ctx->asid)
  256. continue;
  257. seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
  258. seq_printf(s, "page size: %u\n\n",
  259. phys_pg_pack->page_size);
  260. seq_puts(s, " physical address\n");
  261. seq_puts(s, "---------------------\n");
  262. for (j = 0 ; j < phys_pg_pack->npages ; j++) {
  263. seq_printf(s, " 0x%-14llx\n",
  264. phys_pg_pack->pages[j]);
  265. }
  266. }
  267. spin_unlock(&vm->idr_lock);
  268. }
  269. spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
  270. ctx = hl_get_compute_ctx(dev_entry->hdev);
  271. if (ctx) {
  272. seq_puts(s, "\nVA ranges:\n\n");
  273. for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
  274. va_range = ctx->va_range[i];
  275. seq_printf(s, " va_range %d\n", i);
  276. seq_puts(s, "---------------------\n");
  277. mutex_lock(&va_range->lock);
  278. list_for_each_entry(va_block, &va_range->list, node) {
  279. seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
  280. va_block->start, va_block->end,
  281. va_block->size);
  282. }
  283. mutex_unlock(&va_range->lock);
  284. seq_puts(s, "\n");
  285. }
  286. hl_ctx_put(ctx);
  287. }
  288. if (!once)
  289. seq_puts(s, "\n");
  290. return 0;
  291. }
  292. static int userptr_lookup_show(struct seq_file *s, void *data)
  293. {
  294. struct hl_debugfs_entry *entry = s->private;
  295. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  296. struct scatterlist *sg;
  297. struct hl_userptr *userptr;
  298. bool first = true;
  299. u64 total_npages, npages, sg_start, sg_end;
  300. dma_addr_t dma_addr;
  301. int i;
  302. spin_lock(&dev_entry->userptr_spinlock);
  303. list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
  304. if (dev_entry->userptr_lookup >= userptr->addr &&
  305. dev_entry->userptr_lookup < userptr->addr + userptr->size) {
  306. total_npages = 0;
  307. for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
  308. npages = hl_get_sg_info(sg, &dma_addr);
  309. sg_start = userptr->addr +
  310. total_npages * PAGE_SIZE;
  311. sg_end = userptr->addr +
  312. (total_npages + npages) * PAGE_SIZE;
  313. if (dev_entry->userptr_lookup >= sg_start &&
  314. dev_entry->userptr_lookup < sg_end) {
  315. dma_addr += (dev_entry->userptr_lookup -
  316. sg_start);
  317. if (first) {
  318. first = false;
  319. seq_puts(s, "\n");
  320. seq_puts(s, " user virtual address dma address pid region start region size\n");
  321. seq_puts(s, "---------------------------------------------------------------------------------------\n");
  322. }
  323. seq_printf(s, " 0x%-18llx 0x%-16llx %-8u 0x%-16llx %-12llu\n",
  324. dev_entry->userptr_lookup,
  325. (u64)dma_addr, userptr->pid,
  326. userptr->addr, userptr->size);
  327. }
  328. total_npages += npages;
  329. }
  330. }
  331. }
  332. spin_unlock(&dev_entry->userptr_spinlock);
  333. if (!first)
  334. seq_puts(s, "\n");
  335. return 0;
  336. }
  337. static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
  338. size_t count, loff_t *f_pos)
  339. {
  340. struct seq_file *s = file->private_data;
  341. struct hl_debugfs_entry *entry = s->private;
  342. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  343. ssize_t rc;
  344. u64 value;
  345. rc = kstrtoull_from_user(buf, count, 16, &value);
  346. if (rc)
  347. return rc;
  348. dev_entry->userptr_lookup = value;
  349. return count;
  350. }
  351. static int mmu_show(struct seq_file *s, void *data)
  352. {
  353. struct hl_debugfs_entry *entry = s->private;
  354. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  355. struct hl_device *hdev = dev_entry->hdev;
  356. struct hl_ctx *ctx;
  357. struct hl_mmu_hop_info hops_info = {0};
  358. u64 virt_addr = dev_entry->mmu_addr, phys_addr;
  359. int i;
  360. if (!hdev->mmu_enable)
  361. return 0;
  362. if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
  363. ctx = hdev->kernel_ctx;
  364. else
  365. ctx = hl_get_compute_ctx(hdev);
  366. if (!ctx) {
  367. dev_err(hdev->dev, "no ctx available\n");
  368. return 0;
  369. }
  370. if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
  371. dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
  372. virt_addr);
  373. goto put_ctx;
  374. }
  375. hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
  376. if (hops_info.scrambled_vaddr &&
  377. (dev_entry->mmu_addr != hops_info.scrambled_vaddr))
  378. seq_printf(s,
  379. "asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
  380. dev_entry->mmu_asid, dev_entry->mmu_addr,
  381. hops_info.scrambled_vaddr,
  382. hops_info.unscrambled_paddr, phys_addr);
  383. else
  384. seq_printf(s,
  385. "asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
  386. dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
  387. for (i = 0 ; i < hops_info.used_hops ; i++) {
  388. seq_printf(s, "hop%d_addr: 0x%llx\n",
  389. i, hops_info.hop_info[i].hop_addr);
  390. seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
  391. i, hops_info.hop_info[i].hop_pte_addr);
  392. seq_printf(s, "hop%d_pte: 0x%llx\n",
  393. i, hops_info.hop_info[i].hop_pte_val);
  394. }
  395. put_ctx:
  396. if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
  397. hl_ctx_put(ctx);
  398. return 0;
  399. }
  400. static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
  401. size_t count, loff_t *f_pos)
  402. {
  403. struct seq_file *s = file->private_data;
  404. struct hl_debugfs_entry *entry = s->private;
  405. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  406. struct hl_device *hdev = dev_entry->hdev;
  407. char kbuf[MMU_KBUF_SIZE];
  408. char *c;
  409. ssize_t rc;
  410. if (!hdev->mmu_enable)
  411. return count;
  412. if (count > sizeof(kbuf) - 1)
  413. goto err;
  414. if (copy_from_user(kbuf, buf, count))
  415. goto err;
  416. kbuf[count] = 0;
  417. c = strchr(kbuf, ' ');
  418. if (!c)
  419. goto err;
  420. *c = '\0';
  421. rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
  422. if (rc)
  423. goto err;
  424. if (strncmp(c+1, "0x", 2))
  425. goto err;
  426. rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
  427. if (rc)
  428. goto err;
  429. return count;
  430. err:
  431. dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
  432. return -EINVAL;
  433. }
  434. static int mmu_ack_error(struct seq_file *s, void *data)
  435. {
  436. struct hl_debugfs_entry *entry = s->private;
  437. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  438. struct hl_device *hdev = dev_entry->hdev;
  439. int rc;
  440. if (!hdev->mmu_enable)
  441. return 0;
  442. if (!dev_entry->mmu_cap_mask) {
  443. dev_err(hdev->dev, "mmu_cap_mask is not set\n");
  444. goto err;
  445. }
  446. rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
  447. if (rc)
  448. goto err;
  449. return 0;
  450. err:
  451. return -EINVAL;
  452. }
  453. static ssize_t mmu_ack_error_value_write(struct file *file,
  454. const char __user *buf,
  455. size_t count, loff_t *f_pos)
  456. {
  457. struct seq_file *s = file->private_data;
  458. struct hl_debugfs_entry *entry = s->private;
  459. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  460. struct hl_device *hdev = dev_entry->hdev;
  461. char kbuf[MMU_KBUF_SIZE];
  462. ssize_t rc;
  463. if (!hdev->mmu_enable)
  464. return count;
  465. if (count > sizeof(kbuf) - 1)
  466. goto err;
  467. if (copy_from_user(kbuf, buf, count))
  468. goto err;
  469. kbuf[count] = 0;
  470. if (strncmp(kbuf, "0x", 2))
  471. goto err;
  472. rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
  473. if (rc)
  474. goto err;
  475. return count;
  476. err:
  477. dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
  478. return -EINVAL;
  479. }
  480. static int engines_show(struct seq_file *s, void *data)
  481. {
  482. struct hl_debugfs_entry *entry = s->private;
  483. struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
  484. struct hl_device *hdev = dev_entry->hdev;
  485. struct engines_data eng_data;
  486. if (hdev->reset_info.in_reset) {
  487. dev_warn_ratelimited(hdev->dev,
  488. "Can't check device idle during reset\n");
  489. return 0;
  490. }
  491. eng_data.actual_size = 0;
  492. eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
  493. eng_data.buf = vmalloc(eng_data.allocated_buf_size);
  494. if (!eng_data.buf)
  495. return -ENOMEM;
  496. hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
  497. if (eng_data.actual_size > eng_data.allocated_buf_size) {
  498. dev_err(hdev->dev,
  499. "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
  500. eng_data.actual_size, eng_data.allocated_buf_size);
  501. vfree(eng_data.buf);
  502. return -ENOMEM;
  503. }
  504. seq_write(s, eng_data.buf, eng_data.actual_size);
  505. vfree(eng_data.buf);
  506. return 0;
  507. }
  508. static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
  509. size_t count, loff_t *ppos)
  510. {
  511. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  512. struct hl_device *hdev = entry->hdev;
  513. u64 val = hdev->memory_scrub_val;
  514. int rc;
  515. if (!hl_device_operational(hdev, NULL)) {
  516. dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
  517. return -EIO;
  518. }
  519. mutex_lock(&hdev->fpriv_list_lock);
  520. if (hdev->is_compute_ctx_active) {
  521. mutex_unlock(&hdev->fpriv_list_lock);
  522. dev_err(hdev->dev, "can't scrub dram, context exist\n");
  523. return -EBUSY;
  524. }
  525. hdev->is_in_dram_scrub = true;
  526. mutex_unlock(&hdev->fpriv_list_lock);
  527. rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
  528. mutex_lock(&hdev->fpriv_list_lock);
  529. hdev->is_in_dram_scrub = false;
  530. mutex_unlock(&hdev->fpriv_list_lock);
  531. if (rc)
  532. return rc;
  533. return count;
  534. }
  535. static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
  536. {
  537. struct asic_fixed_properties *prop = &hdev->asic_prop;
  538. if (!hdev->mmu_enable)
  539. goto out;
  540. if (prop->dram_supports_virtual_memory &&
  541. (addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
  542. return true;
  543. if (addr >= prop->pmmu.start_addr &&
  544. addr < prop->pmmu.end_addr)
  545. return true;
  546. if (addr >= prop->pmmu_huge.start_addr &&
  547. addr < prop->pmmu_huge.end_addr)
  548. return true;
  549. out:
  550. return false;
  551. }
  552. static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
  553. u32 size)
  554. {
  555. struct asic_fixed_properties *prop = &hdev->asic_prop;
  556. u64 dram_start_addr, dram_end_addr;
  557. if (!hdev->mmu_enable)
  558. return false;
  559. if (prop->dram_supports_virtual_memory) {
  560. dram_start_addr = prop->dmmu.start_addr;
  561. dram_end_addr = prop->dmmu.end_addr;
  562. } else {
  563. dram_start_addr = prop->dram_base_address;
  564. dram_end_addr = prop->dram_end_address;
  565. }
  566. if (hl_mem_area_inside_range(addr, size, dram_start_addr,
  567. dram_end_addr))
  568. return true;
  569. if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
  570. prop->sram_end_address))
  571. return true;
  572. return false;
  573. }
  574. static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
  575. u64 *phys_addr)
  576. {
  577. struct hl_vm_phys_pg_pack *phys_pg_pack;
  578. struct hl_ctx *ctx;
  579. struct hl_vm_hash_node *hnode;
  580. u64 end_address, range_size;
  581. struct hl_userptr *userptr;
  582. enum vm_type *vm_type;
  583. bool valid = false;
  584. int i, rc = 0;
  585. ctx = hl_get_compute_ctx(hdev);
  586. if (!ctx) {
  587. dev_err(hdev->dev, "no ctx available\n");
  588. return -EINVAL;
  589. }
  590. /* Verify address is mapped */
  591. mutex_lock(&ctx->mem_hash_lock);
  592. hash_for_each(ctx->mem_hash, i, hnode, node) {
  593. vm_type = hnode->ptr;
  594. if (*vm_type == VM_TYPE_USERPTR) {
  595. userptr = hnode->ptr;
  596. range_size = userptr->size;
  597. } else {
  598. phys_pg_pack = hnode->ptr;
  599. range_size = phys_pg_pack->total_size;
  600. }
  601. end_address = virt_addr + size;
  602. if ((virt_addr >= hnode->vaddr) &&
  603. (end_address <= hnode->vaddr + range_size)) {
  604. valid = true;
  605. break;
  606. }
  607. }
  608. mutex_unlock(&ctx->mem_hash_lock);
  609. if (!valid) {
  610. dev_err(hdev->dev,
  611. "virt addr 0x%llx is not mapped\n",
  612. virt_addr);
  613. rc = -EINVAL;
  614. goto put_ctx;
  615. }
  616. rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
  617. if (rc) {
  618. dev_err(hdev->dev,
  619. "virt addr 0x%llx is not mapped to phys addr\n",
  620. virt_addr);
  621. rc = -EINVAL;
  622. }
  623. put_ctx:
  624. hl_ctx_put(ctx);
  625. return rc;
  626. }
  627. static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
  628. u64 *val, enum debugfs_access_type acc_type, bool *found)
  629. {
  630. size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
  631. sizeof(u64) : sizeof(u32);
  632. struct pci_mem_region *mem_reg;
  633. int i;
  634. for (i = 0; i < PCI_REGION_NUMBER; i++) {
  635. mem_reg = &hdev->pci_mem_region[i];
  636. if (!mem_reg->used)
  637. continue;
  638. if (addr >= mem_reg->region_base &&
  639. addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
  640. *found = true;
  641. return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
  642. }
  643. }
  644. return 0;
  645. }
  646. static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
  647. enum debugfs_access_type acc_type)
  648. {
  649. struct asic_fixed_properties *prop = &hdev->asic_prop;
  650. u64 offset = prop->device_dma_offset_for_host_access;
  651. switch (acc_type) {
  652. case DEBUGFS_READ32:
  653. *val = *(u32 *) phys_to_virt(addr - offset);
  654. break;
  655. case DEBUGFS_WRITE32:
  656. *(u32 *) phys_to_virt(addr - offset) = *val;
  657. break;
  658. case DEBUGFS_READ64:
  659. *val = *(u64 *) phys_to_virt(addr - offset);
  660. break;
  661. case DEBUGFS_WRITE64:
  662. *(u64 *) phys_to_virt(addr - offset) = *val;
  663. break;
  664. default:
  665. dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
  666. break;
  667. }
  668. }
  669. static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
  670. enum debugfs_access_type acc_type)
  671. {
  672. size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
  673. sizeof(u64) : sizeof(u32);
  674. u64 host_start = hdev->asic_prop.host_base_address;
  675. u64 host_end = hdev->asic_prop.host_end_address;
  676. bool user_address, found = false;
  677. int rc;
  678. user_address = hl_is_device_va(hdev, addr);
  679. if (user_address) {
  680. rc = device_va_to_pa(hdev, addr, acc_size, &addr);
  681. if (rc)
  682. return rc;
  683. }
  684. rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
  685. if (rc) {
  686. dev_err(hdev->dev,
  687. "Failed reading addr %#llx from dev mem (%d)\n",
  688. addr, rc);
  689. return rc;
  690. }
  691. if (found)
  692. return 0;
  693. if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
  694. rc = -EINVAL;
  695. goto err;
  696. }
  697. if (addr >= host_start && addr <= host_end - acc_size) {
  698. hl_access_host_mem(hdev, addr, val, acc_type);
  699. } else {
  700. rc = -EINVAL;
  701. goto err;
  702. }
  703. return 0;
  704. err:
  705. dev_err(hdev->dev, "invalid addr %#llx\n", addr);
  706. return rc;
  707. }
  708. static ssize_t hl_data_read32(struct file *f, char __user *buf,
  709. size_t count, loff_t *ppos)
  710. {
  711. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  712. struct hl_device *hdev = entry->hdev;
  713. u64 value64, addr = entry->addr;
  714. char tmp_buf[32];
  715. ssize_t rc;
  716. u32 val;
  717. if (hdev->reset_info.in_reset) {
  718. dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
  719. return 0;
  720. }
  721. if (*ppos)
  722. return 0;
  723. rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
  724. if (rc)
  725. return rc;
  726. val = value64; /* downcast back to 32 */
  727. sprintf(tmp_buf, "0x%08x\n", val);
  728. return simple_read_from_buffer(buf, count, ppos, tmp_buf,
  729. strlen(tmp_buf));
  730. }
  731. static ssize_t hl_data_write32(struct file *f, const char __user *buf,
  732. size_t count, loff_t *ppos)
  733. {
  734. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  735. struct hl_device *hdev = entry->hdev;
  736. u64 value64, addr = entry->addr;
  737. u32 value;
  738. ssize_t rc;
  739. if (hdev->reset_info.in_reset) {
  740. dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
  741. return 0;
  742. }
  743. rc = kstrtouint_from_user(buf, count, 16, &value);
  744. if (rc)
  745. return rc;
  746. value64 = value;
  747. rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
  748. if (rc)
  749. return rc;
  750. return count;
  751. }
  752. static ssize_t hl_data_read64(struct file *f, char __user *buf,
  753. size_t count, loff_t *ppos)
  754. {
  755. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  756. struct hl_device *hdev = entry->hdev;
  757. u64 addr = entry->addr;
  758. char tmp_buf[32];
  759. ssize_t rc;
  760. u64 val;
  761. if (hdev->reset_info.in_reset) {
  762. dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
  763. return 0;
  764. }
  765. if (*ppos)
  766. return 0;
  767. rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
  768. if (rc)
  769. return rc;
  770. sprintf(tmp_buf, "0x%016llx\n", val);
  771. return simple_read_from_buffer(buf, count, ppos, tmp_buf,
  772. strlen(tmp_buf));
  773. }
  774. static ssize_t hl_data_write64(struct file *f, const char __user *buf,
  775. size_t count, loff_t *ppos)
  776. {
  777. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  778. struct hl_device *hdev = entry->hdev;
  779. u64 addr = entry->addr;
  780. u64 value;
  781. ssize_t rc;
  782. if (hdev->reset_info.in_reset) {
  783. dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
  784. return 0;
  785. }
  786. rc = kstrtoull_from_user(buf, count, 16, &value);
  787. if (rc)
  788. return rc;
  789. rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
  790. if (rc)
  791. return rc;
  792. return count;
  793. }
  794. static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
  795. size_t count, loff_t *ppos)
  796. {
  797. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  798. struct hl_device *hdev = entry->hdev;
  799. u64 addr = entry->addr;
  800. ssize_t rc;
  801. u32 size;
  802. if (hdev->reset_info.in_reset) {
  803. dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
  804. return 0;
  805. }
  806. rc = kstrtouint_from_user(buf, count, 16, &size);
  807. if (rc)
  808. return rc;
  809. if (!size) {
  810. dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
  811. return -EINVAL;
  812. }
  813. if (size > SZ_128M) {
  814. dev_err(hdev->dev,
  815. "DMA read failed. size can't be larger than 128MB\n");
  816. return -EINVAL;
  817. }
  818. if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
  819. dev_err(hdev->dev,
  820. "DMA read failed. Invalid 0x%010llx + 0x%08x\n",
  821. addr, size);
  822. return -EINVAL;
  823. }
  824. /* Free the previous allocation, if there was any */
  825. entry->data_dma_blob_desc.size = 0;
  826. vfree(entry->data_dma_blob_desc.data);
  827. entry->data_dma_blob_desc.data = vmalloc(size);
  828. if (!entry->data_dma_blob_desc.data)
  829. return -ENOMEM;
  830. rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
  831. entry->data_dma_blob_desc.data);
  832. if (rc) {
  833. dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
  834. vfree(entry->data_dma_blob_desc.data);
  835. entry->data_dma_blob_desc.data = NULL;
  836. return -EIO;
  837. }
  838. entry->data_dma_blob_desc.size = size;
  839. return count;
  840. }
  841. static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
  842. size_t count, loff_t *ppos)
  843. {
  844. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  845. struct hl_device *hdev = entry->hdev;
  846. u32 size, trig;
  847. ssize_t rc;
  848. if (hdev->reset_info.in_reset) {
  849. dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
  850. return 0;
  851. }
  852. rc = kstrtouint_from_user(buf, count, 10, &trig);
  853. if (rc)
  854. return rc;
  855. if (trig != 1) {
  856. dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
  857. return -EINVAL;
  858. }
  859. size = sizeof(struct cpucp_monitor_dump);
  860. /* Free the previous allocation, if there was any */
  861. entry->mon_dump_blob_desc.size = 0;
  862. vfree(entry->mon_dump_blob_desc.data);
  863. entry->mon_dump_blob_desc.data = vmalloc(size);
  864. if (!entry->mon_dump_blob_desc.data)
  865. return -ENOMEM;
  866. rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
  867. if (rc) {
  868. dev_err(hdev->dev, "Failed to dump monitors\n");
  869. vfree(entry->mon_dump_blob_desc.data);
  870. entry->mon_dump_blob_desc.data = NULL;
  871. return -EIO;
  872. }
  873. entry->mon_dump_blob_desc.size = size;
  874. return count;
  875. }
  876. static ssize_t hl_get_power_state(struct file *f, char __user *buf,
  877. size_t count, loff_t *ppos)
  878. {
  879. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  880. struct hl_device *hdev = entry->hdev;
  881. char tmp_buf[200];
  882. int i;
  883. if (*ppos)
  884. return 0;
  885. if (hdev->pdev->current_state == PCI_D0)
  886. i = 1;
  887. else if (hdev->pdev->current_state == PCI_D3hot)
  888. i = 2;
  889. else
  890. i = 3;
  891. sprintf(tmp_buf,
  892. "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
  893. return simple_read_from_buffer(buf, count, ppos, tmp_buf,
  894. strlen(tmp_buf));
  895. }
  896. static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
  897. size_t count, loff_t *ppos)
  898. {
  899. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  900. struct hl_device *hdev = entry->hdev;
  901. u32 value;
  902. ssize_t rc;
  903. rc = kstrtouint_from_user(buf, count, 10, &value);
  904. if (rc)
  905. return rc;
  906. if (value == 1) {
  907. pci_set_power_state(hdev->pdev, PCI_D0);
  908. pci_restore_state(hdev->pdev);
  909. rc = pci_enable_device(hdev->pdev);
  910. if (rc < 0)
  911. return rc;
  912. } else if (value == 2) {
  913. pci_save_state(hdev->pdev);
  914. pci_disable_device(hdev->pdev);
  915. pci_set_power_state(hdev->pdev, PCI_D3hot);
  916. } else {
  917. dev_dbg(hdev->dev, "invalid power state value %u\n", value);
  918. return -EINVAL;
  919. }
  920. return count;
  921. }
  922. static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
  923. size_t count, loff_t *ppos)
  924. {
  925. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  926. struct hl_device *hdev = entry->hdev;
  927. char tmp_buf[32];
  928. u64 val;
  929. ssize_t rc;
  930. if (*ppos)
  931. return 0;
  932. rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
  933. entry->i2c_reg, entry->i2c_len, &val);
  934. if (rc) {
  935. dev_err(hdev->dev,
  936. "Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
  937. entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
  938. return rc;
  939. }
  940. sprintf(tmp_buf, "%#02llx\n", val);
  941. rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
  942. strlen(tmp_buf));
  943. return rc;
  944. }
  945. static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
  946. size_t count, loff_t *ppos)
  947. {
  948. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  949. struct hl_device *hdev = entry->hdev;
  950. u64 value;
  951. ssize_t rc;
  952. rc = kstrtou64_from_user(buf, count, 16, &value);
  953. if (rc)
  954. return rc;
  955. rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
  956. entry->i2c_reg, entry->i2c_len, value);
  957. if (rc) {
  958. dev_err(hdev->dev,
  959. "Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
  960. value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
  961. return rc;
  962. }
  963. return count;
  964. }
  965. static ssize_t hl_led0_write(struct file *f, const char __user *buf,
  966. size_t count, loff_t *ppos)
  967. {
  968. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  969. struct hl_device *hdev = entry->hdev;
  970. u32 value;
  971. ssize_t rc;
  972. rc = kstrtouint_from_user(buf, count, 10, &value);
  973. if (rc)
  974. return rc;
  975. value = value ? 1 : 0;
  976. hl_debugfs_led_set(hdev, 0, value);
  977. return count;
  978. }
  979. static ssize_t hl_led1_write(struct file *f, const char __user *buf,
  980. size_t count, loff_t *ppos)
  981. {
  982. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  983. struct hl_device *hdev = entry->hdev;
  984. u32 value;
  985. ssize_t rc;
  986. rc = kstrtouint_from_user(buf, count, 10, &value);
  987. if (rc)
  988. return rc;
  989. value = value ? 1 : 0;
  990. hl_debugfs_led_set(hdev, 1, value);
  991. return count;
  992. }
  993. static ssize_t hl_led2_write(struct file *f, const char __user *buf,
  994. size_t count, loff_t *ppos)
  995. {
  996. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  997. struct hl_device *hdev = entry->hdev;
  998. u32 value;
  999. ssize_t rc;
  1000. rc = kstrtouint_from_user(buf, count, 10, &value);
  1001. if (rc)
  1002. return rc;
  1003. value = value ? 1 : 0;
  1004. hl_debugfs_led_set(hdev, 2, value);
  1005. return count;
  1006. }
  1007. static ssize_t hl_device_read(struct file *f, char __user *buf,
  1008. size_t count, loff_t *ppos)
  1009. {
  1010. static const char *help =
  1011. "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
  1012. return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
  1013. }
  1014. static ssize_t hl_device_write(struct file *f, const char __user *buf,
  1015. size_t count, loff_t *ppos)
  1016. {
  1017. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1018. struct hl_device *hdev = entry->hdev;
  1019. char data[30] = {0};
  1020. /* don't allow partial writes */
  1021. if (*ppos != 0)
  1022. return 0;
  1023. simple_write_to_buffer(data, 29, ppos, buf, count);
  1024. if (strncmp("disable", data, strlen("disable")) == 0) {
  1025. hdev->disabled = true;
  1026. } else if (strncmp("enable", data, strlen("enable")) == 0) {
  1027. hdev->disabled = false;
  1028. } else if (strncmp("suspend", data, strlen("suspend")) == 0) {
  1029. hdev->asic_funcs->suspend(hdev);
  1030. } else if (strncmp("resume", data, strlen("resume")) == 0) {
  1031. hdev->asic_funcs->resume(hdev);
  1032. } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
  1033. hdev->device_cpu_disabled = true;
  1034. } else {
  1035. dev_err(hdev->dev,
  1036. "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
  1037. count = -EINVAL;
  1038. }
  1039. return count;
  1040. }
  1041. static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
  1042. size_t count, loff_t *ppos)
  1043. {
  1044. return 0;
  1045. }
  1046. static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
  1047. size_t count, loff_t *ppos)
  1048. {
  1049. return count;
  1050. }
  1051. static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
  1052. size_t count, loff_t *ppos)
  1053. {
  1054. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1055. struct hl_device *hdev = entry->hdev;
  1056. char tmp_buf[200];
  1057. ssize_t rc;
  1058. if (!hdev->asic_prop.configurable_stop_on_err)
  1059. return -EOPNOTSUPP;
  1060. if (*ppos)
  1061. return 0;
  1062. sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
  1063. rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
  1064. strlen(tmp_buf) + 1);
  1065. return rc;
  1066. }
  1067. static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
  1068. size_t count, loff_t *ppos)
  1069. {
  1070. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1071. struct hl_device *hdev = entry->hdev;
  1072. u32 value;
  1073. ssize_t rc;
  1074. if (!hdev->asic_prop.configurable_stop_on_err)
  1075. return -EOPNOTSUPP;
  1076. if (hdev->reset_info.in_reset) {
  1077. dev_warn_ratelimited(hdev->dev,
  1078. "Can't change stop on error during reset\n");
  1079. return 0;
  1080. }
  1081. rc = kstrtouint_from_user(buf, count, 10, &value);
  1082. if (rc)
  1083. return rc;
  1084. hdev->stop_on_err = value ? 1 : 0;
  1085. hl_device_reset(hdev, 0);
  1086. return count;
  1087. }
  1088. static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
  1089. size_t count, loff_t *ppos)
  1090. {
  1091. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1092. struct hl_device *hdev = entry->hdev;
  1093. hdev->asic_funcs->ack_protection_bits_errors(hdev);
  1094. return 0;
  1095. }
  1096. static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
  1097. size_t count, loff_t *ppos)
  1098. {
  1099. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1100. ssize_t rc;
  1101. down_read(&entry->state_dump_sem);
  1102. if (!entry->state_dump[entry->state_dump_head])
  1103. rc = 0;
  1104. else
  1105. rc = simple_read_from_buffer(
  1106. buf, count, ppos,
  1107. entry->state_dump[entry->state_dump_head],
  1108. strlen(entry->state_dump[entry->state_dump_head]));
  1109. up_read(&entry->state_dump_sem);
  1110. return rc;
  1111. }
  1112. static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
  1113. size_t count, loff_t *ppos)
  1114. {
  1115. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1116. struct hl_device *hdev = entry->hdev;
  1117. ssize_t rc;
  1118. u32 size;
  1119. int i;
  1120. rc = kstrtouint_from_user(buf, count, 10, &size);
  1121. if (rc)
  1122. return rc;
  1123. if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
  1124. dev_err(hdev->dev, "Invalid number of dumps to skip\n");
  1125. return -EINVAL;
  1126. }
  1127. if (entry->state_dump[entry->state_dump_head]) {
  1128. down_write(&entry->state_dump_sem);
  1129. for (i = 0; i < size; ++i) {
  1130. vfree(entry->state_dump[entry->state_dump_head]);
  1131. entry->state_dump[entry->state_dump_head] = NULL;
  1132. if (entry->state_dump_head > 0)
  1133. entry->state_dump_head--;
  1134. else
  1135. entry->state_dump_head =
  1136. ARRAY_SIZE(entry->state_dump) - 1;
  1137. }
  1138. up_write(&entry->state_dump_sem);
  1139. }
  1140. return count;
  1141. }
  1142. static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
  1143. size_t count, loff_t *ppos)
  1144. {
  1145. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1146. struct hl_device *hdev = entry->hdev;
  1147. char tmp_buf[200];
  1148. ssize_t rc;
  1149. if (*ppos)
  1150. return 0;
  1151. sprintf(tmp_buf, "%d\n",
  1152. jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
  1153. rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
  1154. strlen(tmp_buf) + 1);
  1155. return rc;
  1156. }
  1157. static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
  1158. size_t count, loff_t *ppos)
  1159. {
  1160. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1161. struct hl_device *hdev = entry->hdev;
  1162. u32 value;
  1163. ssize_t rc;
  1164. rc = kstrtouint_from_user(buf, count, 10, &value);
  1165. if (rc)
  1166. return rc;
  1167. if (value)
  1168. hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
  1169. else
  1170. hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
  1171. return count;
  1172. }
  1173. static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
  1174. size_t count, loff_t *ppos)
  1175. {
  1176. struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
  1177. struct hl_device *hdev = entry->hdev;
  1178. hdev->asic_funcs->check_if_razwi_happened(hdev);
  1179. return 0;
  1180. }
  1181. static const struct file_operations hl_mem_scrub_fops = {
  1182. .owner = THIS_MODULE,
  1183. .write = hl_memory_scrub,
  1184. };
  1185. static const struct file_operations hl_data32b_fops = {
  1186. .owner = THIS_MODULE,
  1187. .read = hl_data_read32,
  1188. .write = hl_data_write32
  1189. };
  1190. static const struct file_operations hl_data64b_fops = {
  1191. .owner = THIS_MODULE,
  1192. .read = hl_data_read64,
  1193. .write = hl_data_write64
  1194. };
  1195. static const struct file_operations hl_dma_size_fops = {
  1196. .owner = THIS_MODULE,
  1197. .write = hl_dma_size_write
  1198. };
  1199. static const struct file_operations hl_monitor_dump_fops = {
  1200. .owner = THIS_MODULE,
  1201. .write = hl_monitor_dump_trigger
  1202. };
  1203. static const struct file_operations hl_i2c_data_fops = {
  1204. .owner = THIS_MODULE,
  1205. .read = hl_i2c_data_read,
  1206. .write = hl_i2c_data_write
  1207. };
  1208. static const struct file_operations hl_power_fops = {
  1209. .owner = THIS_MODULE,
  1210. .read = hl_get_power_state,
  1211. .write = hl_set_power_state
  1212. };
  1213. static const struct file_operations hl_led0_fops = {
  1214. .owner = THIS_MODULE,
  1215. .write = hl_led0_write
  1216. };
  1217. static const struct file_operations hl_led1_fops = {
  1218. .owner = THIS_MODULE,
  1219. .write = hl_led1_write
  1220. };
  1221. static const struct file_operations hl_led2_fops = {
  1222. .owner = THIS_MODULE,
  1223. .write = hl_led2_write
  1224. };
  1225. static const struct file_operations hl_device_fops = {
  1226. .owner = THIS_MODULE,
  1227. .read = hl_device_read,
  1228. .write = hl_device_write
  1229. };
  1230. static const struct file_operations hl_clk_gate_fops = {
  1231. .owner = THIS_MODULE,
  1232. .read = hl_clk_gate_read,
  1233. .write = hl_clk_gate_write
  1234. };
  1235. static const struct file_operations hl_stop_on_err_fops = {
  1236. .owner = THIS_MODULE,
  1237. .read = hl_stop_on_err_read,
  1238. .write = hl_stop_on_err_write
  1239. };
  1240. static const struct file_operations hl_security_violations_fops = {
  1241. .owner = THIS_MODULE,
  1242. .read = hl_security_violations_read
  1243. };
  1244. static const struct file_operations hl_state_dump_fops = {
  1245. .owner = THIS_MODULE,
  1246. .read = hl_state_dump_read,
  1247. .write = hl_state_dump_write
  1248. };
  1249. static const struct file_operations hl_timeout_locked_fops = {
  1250. .owner = THIS_MODULE,
  1251. .read = hl_timeout_locked_read,
  1252. .write = hl_timeout_locked_write
  1253. };
  1254. static const struct file_operations hl_razwi_check_fops = {
  1255. .owner = THIS_MODULE,
  1256. .read = hl_check_razwi_happened
  1257. };
  1258. static const struct hl_info_list hl_debugfs_list[] = {
  1259. {"command_buffers", command_buffers_show, NULL},
  1260. {"command_submission", command_submission_show, NULL},
  1261. {"command_submission_jobs", command_submission_jobs_show, NULL},
  1262. {"userptr", userptr_show, NULL},
  1263. {"vm", vm_show, NULL},
  1264. {"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
  1265. {"mmu", mmu_show, mmu_asid_va_write},
  1266. {"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
  1267. {"engines", engines_show, NULL},
  1268. };
  1269. static int hl_debugfs_open(struct inode *inode, struct file *file)
  1270. {
  1271. struct hl_debugfs_entry *node = inode->i_private;
  1272. return single_open(file, node->info_ent->show, node);
  1273. }
  1274. static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
  1275. size_t count, loff_t *f_pos)
  1276. {
  1277. struct hl_debugfs_entry *node = file->f_inode->i_private;
  1278. if (node->info_ent->write)
  1279. return node->info_ent->write(file, buf, count, f_pos);
  1280. else
  1281. return -EINVAL;
  1282. }
  1283. static const struct file_operations hl_debugfs_fops = {
  1284. .owner = THIS_MODULE,
  1285. .open = hl_debugfs_open,
  1286. .read = seq_read,
  1287. .write = hl_debugfs_write,
  1288. .llseek = seq_lseek,
  1289. .release = single_release,
  1290. };
  1291. static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry)
  1292. {
  1293. debugfs_create_u8("i2c_bus",
  1294. 0644,
  1295. dev_entry->root,
  1296. &dev_entry->i2c_bus);
  1297. debugfs_create_u8("i2c_addr",
  1298. 0644,
  1299. dev_entry->root,
  1300. &dev_entry->i2c_addr);
  1301. debugfs_create_u8("i2c_reg",
  1302. 0644,
  1303. dev_entry->root,
  1304. &dev_entry->i2c_reg);
  1305. debugfs_create_u8("i2c_len",
  1306. 0644,
  1307. dev_entry->root,
  1308. &dev_entry->i2c_len);
  1309. debugfs_create_file("i2c_data",
  1310. 0644,
  1311. dev_entry->root,
  1312. dev_entry,
  1313. &hl_i2c_data_fops);
  1314. debugfs_create_file("led0",
  1315. 0200,
  1316. dev_entry->root,
  1317. dev_entry,
  1318. &hl_led0_fops);
  1319. debugfs_create_file("led1",
  1320. 0200,
  1321. dev_entry->root,
  1322. dev_entry,
  1323. &hl_led1_fops);
  1324. debugfs_create_file("led2",
  1325. 0200,
  1326. dev_entry->root,
  1327. dev_entry,
  1328. &hl_led2_fops);
  1329. }
  1330. void hl_debugfs_add_device(struct hl_device *hdev)
  1331. {
  1332. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1333. int count = ARRAY_SIZE(hl_debugfs_list);
  1334. struct hl_debugfs_entry *entry;
  1335. int i;
  1336. dev_entry->hdev = hdev;
  1337. dev_entry->entry_arr = kmalloc_array(count,
  1338. sizeof(struct hl_debugfs_entry),
  1339. GFP_KERNEL);
  1340. if (!dev_entry->entry_arr)
  1341. return;
  1342. dev_entry->data_dma_blob_desc.size = 0;
  1343. dev_entry->data_dma_blob_desc.data = NULL;
  1344. dev_entry->mon_dump_blob_desc.size = 0;
  1345. dev_entry->mon_dump_blob_desc.data = NULL;
  1346. INIT_LIST_HEAD(&dev_entry->file_list);
  1347. INIT_LIST_HEAD(&dev_entry->cb_list);
  1348. INIT_LIST_HEAD(&dev_entry->cs_list);
  1349. INIT_LIST_HEAD(&dev_entry->cs_job_list);
  1350. INIT_LIST_HEAD(&dev_entry->userptr_list);
  1351. INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
  1352. mutex_init(&dev_entry->file_mutex);
  1353. init_rwsem(&dev_entry->state_dump_sem);
  1354. spin_lock_init(&dev_entry->cb_spinlock);
  1355. spin_lock_init(&dev_entry->cs_spinlock);
  1356. spin_lock_init(&dev_entry->cs_job_spinlock);
  1357. spin_lock_init(&dev_entry->userptr_spinlock);
  1358. spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
  1359. dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
  1360. hl_debug_root);
  1361. debugfs_create_x64("memory_scrub_val",
  1362. 0644,
  1363. dev_entry->root,
  1364. &hdev->memory_scrub_val);
  1365. debugfs_create_file("memory_scrub",
  1366. 0200,
  1367. dev_entry->root,
  1368. dev_entry,
  1369. &hl_mem_scrub_fops);
  1370. debugfs_create_x64("addr",
  1371. 0644,
  1372. dev_entry->root,
  1373. &dev_entry->addr);
  1374. debugfs_create_file("data32",
  1375. 0644,
  1376. dev_entry->root,
  1377. dev_entry,
  1378. &hl_data32b_fops);
  1379. debugfs_create_file("data64",
  1380. 0644,
  1381. dev_entry->root,
  1382. dev_entry,
  1383. &hl_data64b_fops);
  1384. debugfs_create_file("set_power_state",
  1385. 0200,
  1386. dev_entry->root,
  1387. dev_entry,
  1388. &hl_power_fops);
  1389. debugfs_create_file("device",
  1390. 0200,
  1391. dev_entry->root,
  1392. dev_entry,
  1393. &hl_device_fops);
  1394. debugfs_create_file("clk_gate",
  1395. 0200,
  1396. dev_entry->root,
  1397. dev_entry,
  1398. &hl_clk_gate_fops);
  1399. debugfs_create_file("stop_on_err",
  1400. 0644,
  1401. dev_entry->root,
  1402. dev_entry,
  1403. &hl_stop_on_err_fops);
  1404. debugfs_create_file("dump_security_violations",
  1405. 0644,
  1406. dev_entry->root,
  1407. dev_entry,
  1408. &hl_security_violations_fops);
  1409. debugfs_create_file("dump_razwi_events",
  1410. 0644,
  1411. dev_entry->root,
  1412. dev_entry,
  1413. &hl_razwi_check_fops);
  1414. debugfs_create_file("dma_size",
  1415. 0200,
  1416. dev_entry->root,
  1417. dev_entry,
  1418. &hl_dma_size_fops);
  1419. debugfs_create_blob("data_dma",
  1420. 0400,
  1421. dev_entry->root,
  1422. &dev_entry->data_dma_blob_desc);
  1423. debugfs_create_file("monitor_dump_trig",
  1424. 0200,
  1425. dev_entry->root,
  1426. dev_entry,
  1427. &hl_monitor_dump_fops);
  1428. debugfs_create_blob("monitor_dump",
  1429. 0400,
  1430. dev_entry->root,
  1431. &dev_entry->mon_dump_blob_desc);
  1432. debugfs_create_x8("skip_reset_on_timeout",
  1433. 0644,
  1434. dev_entry->root,
  1435. &hdev->reset_info.skip_reset_on_timeout);
  1436. debugfs_create_file("state_dump",
  1437. 0600,
  1438. dev_entry->root,
  1439. dev_entry,
  1440. &hl_state_dump_fops);
  1441. debugfs_create_file("timeout_locked",
  1442. 0644,
  1443. dev_entry->root,
  1444. dev_entry,
  1445. &hl_timeout_locked_fops);
  1446. for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
  1447. debugfs_create_file(hl_debugfs_list[i].name,
  1448. 0444,
  1449. dev_entry->root,
  1450. entry,
  1451. &hl_debugfs_fops);
  1452. entry->info_ent = &hl_debugfs_list[i];
  1453. entry->dev_entry = dev_entry;
  1454. }
  1455. if (!hdev->asic_prop.fw_security_enabled)
  1456. add_secured_nodes(dev_entry);
  1457. }
  1458. void hl_debugfs_remove_device(struct hl_device *hdev)
  1459. {
  1460. struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
  1461. int i;
  1462. debugfs_remove_recursive(entry->root);
  1463. mutex_destroy(&entry->file_mutex);
  1464. vfree(entry->data_dma_blob_desc.data);
  1465. vfree(entry->mon_dump_blob_desc.data);
  1466. for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
  1467. vfree(entry->state_dump[i]);
  1468. kfree(entry->entry_arr);
  1469. }
  1470. void hl_debugfs_add_file(struct hl_fpriv *hpriv)
  1471. {
  1472. struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
  1473. mutex_lock(&dev_entry->file_mutex);
  1474. list_add(&hpriv->debugfs_list, &dev_entry->file_list);
  1475. mutex_unlock(&dev_entry->file_mutex);
  1476. }
  1477. void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
  1478. {
  1479. struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
  1480. mutex_lock(&dev_entry->file_mutex);
  1481. list_del(&hpriv->debugfs_list);
  1482. mutex_unlock(&dev_entry->file_mutex);
  1483. }
  1484. void hl_debugfs_add_cb(struct hl_cb *cb)
  1485. {
  1486. struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
  1487. spin_lock(&dev_entry->cb_spinlock);
  1488. list_add(&cb->debugfs_list, &dev_entry->cb_list);
  1489. spin_unlock(&dev_entry->cb_spinlock);
  1490. }
  1491. void hl_debugfs_remove_cb(struct hl_cb *cb)
  1492. {
  1493. struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
  1494. spin_lock(&dev_entry->cb_spinlock);
  1495. list_del(&cb->debugfs_list);
  1496. spin_unlock(&dev_entry->cb_spinlock);
  1497. }
  1498. void hl_debugfs_add_cs(struct hl_cs *cs)
  1499. {
  1500. struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
  1501. spin_lock(&dev_entry->cs_spinlock);
  1502. list_add(&cs->debugfs_list, &dev_entry->cs_list);
  1503. spin_unlock(&dev_entry->cs_spinlock);
  1504. }
  1505. void hl_debugfs_remove_cs(struct hl_cs *cs)
  1506. {
  1507. struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
  1508. spin_lock(&dev_entry->cs_spinlock);
  1509. list_del(&cs->debugfs_list);
  1510. spin_unlock(&dev_entry->cs_spinlock);
  1511. }
  1512. void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
  1513. {
  1514. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1515. spin_lock(&dev_entry->cs_job_spinlock);
  1516. list_add(&job->debugfs_list, &dev_entry->cs_job_list);
  1517. spin_unlock(&dev_entry->cs_job_spinlock);
  1518. }
  1519. void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
  1520. {
  1521. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1522. spin_lock(&dev_entry->cs_job_spinlock);
  1523. list_del(&job->debugfs_list);
  1524. spin_unlock(&dev_entry->cs_job_spinlock);
  1525. }
  1526. void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
  1527. {
  1528. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1529. spin_lock(&dev_entry->userptr_spinlock);
  1530. list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
  1531. spin_unlock(&dev_entry->userptr_spinlock);
  1532. }
  1533. void hl_debugfs_remove_userptr(struct hl_device *hdev,
  1534. struct hl_userptr *userptr)
  1535. {
  1536. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1537. spin_lock(&dev_entry->userptr_spinlock);
  1538. list_del(&userptr->debugfs_list);
  1539. spin_unlock(&dev_entry->userptr_spinlock);
  1540. }
  1541. void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
  1542. {
  1543. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1544. spin_lock(&dev_entry->ctx_mem_hash_spinlock);
  1545. list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
  1546. spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
  1547. }
  1548. void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
  1549. {
  1550. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1551. spin_lock(&dev_entry->ctx_mem_hash_spinlock);
  1552. list_del(&ctx->debugfs_list);
  1553. spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
  1554. }
  1555. /**
  1556. * hl_debugfs_set_state_dump - register state dump making it accessible via
  1557. * debugfs
  1558. * @hdev: pointer to the device structure
  1559. * @data: the actual dump data
  1560. * @length: the length of the data
  1561. */
  1562. void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
  1563. unsigned long length)
  1564. {
  1565. struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
  1566. down_write(&dev_entry->state_dump_sem);
  1567. dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
  1568. ARRAY_SIZE(dev_entry->state_dump);
  1569. vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
  1570. dev_entry->state_dump[dev_entry->state_dump_head] = data;
  1571. up_write(&dev_entry->state_dump_sem);
  1572. }
  1573. void __init hl_debugfs_init(void)
  1574. {
  1575. hl_debug_root = debugfs_create_dir("habanalabs", NULL);
  1576. }
  1577. void hl_debugfs_fini(void)
  1578. {
  1579. debugfs_remove_recursive(hl_debug_root);
  1580. }