qcom-iommu-debug-user.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
  5. *
  6. */
  7. #include <linux/bitfield.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/qcom-dma-mapping.h>
  12. #include <linux/qcom-iommu-util.h>
  13. #include "qcom-iommu-debug.h"
  14. #ifdef CONFIG_64BIT
  15. #define kstrtoux kstrtou64
  16. #define kstrtox_from_user kstrtoull_from_user
  17. #define kstrtosize_t kstrtoul
  18. #else
  19. #define kstrtoux kstrtou32
  20. #define kstrtox_from_user kstrtouint_from_user
  21. #define kstrtosize_t kstrtouint
  22. #endif
  23. static void *test_virt_addr;
  24. static DEFINE_MUTEX(test_virt_addr_lock);
  25. static ssize_t iommu_debug_dma_atos_read(struct file *file, char __user *ubuf,
  26. size_t count, loff_t *offset)
  27. {
  28. struct iommu_debug_device *ddev = file->private_data;
  29. struct iommu_fwspec *fwspec;
  30. phys_addr_t phys;
  31. char buf[100] = {0};
  32. struct qcom_iommu_atos_txn txn;
  33. int len;
  34. if (*offset)
  35. return 0;
  36. mutex_lock(&ddev->state_lock);
  37. if (!ddev->domain) {
  38. pr_err("%s: No domain. Have you selected a usecase?\n", __func__);
  39. mutex_unlock(&ddev->state_lock);
  40. return -EINVAL;
  41. }
  42. fwspec = dev_iommu_fwspec_get(ddev->test_dev);
  43. if (!fwspec) {
  44. pr_err("%s: No fwspec.\n", __func__);
  45. mutex_unlock(&ddev->state_lock);
  46. return 0;
  47. }
  48. txn.addr = ddev->iova;
  49. txn.flags = IOMMU_TRANS_DEFAULT;
  50. txn.id = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[0]);
  51. phys = qcom_iommu_iova_to_phys_hard(ddev->domain, &txn);
  52. if (!phys)
  53. len = strscpy(buf, "FAIL\n", sizeof(buf));
  54. else
  55. len = scnprintf(buf, sizeof(buf), "%pa\n", &phys);
  56. mutex_unlock(&ddev->state_lock);
  57. return simple_read_from_buffer(ubuf, count, offset, buf, len);
  58. }
  59. static ssize_t iommu_debug_atos_write(struct file *file,
  60. const char __user *ubuf,
  61. size_t count, loff_t *offset)
  62. {
  63. struct iommu_debug_device *ddev = file->private_data;
  64. dma_addr_t iova;
  65. phys_addr_t phys;
  66. unsigned long pfn;
  67. mutex_lock(&ddev->state_lock);
  68. if (!ddev->domain) {
  69. pr_err("%s: No domain. Have you selected a usecase?\n", __func__);
  70. mutex_unlock(&ddev->state_lock);
  71. return -EINVAL;
  72. }
  73. if (kstrtox_from_user(ubuf, count, 0, &iova)) {
  74. dev_err(ddev->test_dev, "Invalid format for iova\n");
  75. ddev->iova = 0;
  76. mutex_unlock(&ddev->state_lock);
  77. return -EINVAL;
  78. }
  79. phys = iommu_iova_to_phys(ddev->domain, iova);
  80. pfn = __phys_to_pfn(phys);
  81. if (!pfn_valid(pfn)) {
  82. dev_err(ddev->test_dev, "Invalid ATOS operation page %pa\n", &phys);
  83. mutex_unlock(&ddev->state_lock);
  84. return -EINVAL;
  85. }
  86. ddev->iova = iova;
  87. mutex_unlock(&ddev->state_lock);
  88. pr_info("Saved iova=%pa for future ATOS commands\n", &iova);
  89. return count;
  90. }
  91. const struct file_operations iommu_debug_atos_fops = {
  92. .open = simple_open,
  93. .write = iommu_debug_atos_write,
  94. .read = iommu_debug_dma_atos_read,
  95. };
  96. static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
  97. size_t count, loff_t *offset)
  98. {
  99. ssize_t retval = -EINVAL;
  100. int ret;
  101. char *comma1, *comma2, *comma3;
  102. char buf[100] = {0};
  103. dma_addr_t iova;
  104. phys_addr_t phys;
  105. size_t size;
  106. int prot;
  107. struct iommu_debug_device *ddev = file->private_data;
  108. if (count >= 100) {
  109. pr_err_ratelimited("Value too large\n");
  110. return -EINVAL;
  111. }
  112. if (copy_from_user(buf, ubuf, count)) {
  113. pr_err_ratelimited("Couldn't copy from user\n");
  114. retval = -EFAULT;
  115. }
  116. comma1 = strnchr(buf, count, ',');
  117. if (!comma1)
  118. goto invalid_format;
  119. comma2 = strnchr(comma1 + 1, count, ',');
  120. if (!comma2)
  121. goto invalid_format;
  122. comma3 = strnchr(comma2 + 1, count, ',');
  123. if (!comma3)
  124. goto invalid_format;
  125. /* split up the words */
  126. *comma1 = *comma2 = *comma3 = '\0';
  127. if (kstrtoux(buf, 0, &iova))
  128. goto invalid_format;
  129. if (kstrtoux(comma1 + 1, 0, &phys))
  130. goto invalid_format;
  131. if (kstrtosize_t(comma2 + 1, 0, &size))
  132. goto invalid_format;
  133. if (kstrtoint(comma3 + 1, 0, &prot))
  134. goto invalid_format;
  135. mutex_lock(&ddev->state_lock);
  136. if (!ddev->domain) {
  137. pr_err_ratelimited("%s: No domain. Have you selected a usecase?\n", __func__);
  138. mutex_unlock(&ddev->state_lock);
  139. return -EINVAL;
  140. }
  141. ret = iommu_map(ddev->domain, iova, phys, size, prot);
  142. if (ret) {
  143. pr_err_ratelimited("iommu_map failed with %d\n", ret);
  144. retval = -EIO;
  145. goto out;
  146. }
  147. retval = count;
  148. pr_info("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n", &iova, &phys, size, prot);
  149. out:
  150. mutex_unlock(&ddev->state_lock);
  151. return retval;
  152. invalid_format:
  153. pr_err_ratelimited("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
  154. return -EINVAL;
  155. }
  156. const struct file_operations iommu_debug_map_fops = {
  157. .open = simple_open,
  158. .write = iommu_debug_map_write,
  159. };
  160. static ssize_t iommu_debug_unmap_write(struct file *file,
  161. const char __user *ubuf,
  162. size_t count, loff_t *offset)
  163. {
  164. ssize_t retval = 0;
  165. char *comma1;
  166. char buf[100] = {0};
  167. dma_addr_t iova;
  168. size_t size;
  169. size_t unmapped;
  170. struct iommu_debug_device *ddev = file->private_data;
  171. if (count >= 100) {
  172. pr_err_ratelimited("Value too large\n");
  173. return -EINVAL;
  174. }
  175. if (!ddev->domain) {
  176. pr_err_ratelimited("%s: No domain. Have you selected a usecase?\n", __func__);
  177. return -EINVAL;
  178. }
  179. if (copy_from_user(buf, ubuf, count)) {
  180. pr_err_ratelimited("Couldn't copy from user\n");
  181. retval = -EFAULT;
  182. goto out;
  183. }
  184. comma1 = strnchr(buf, count, ',');
  185. if (!comma1)
  186. goto invalid_format;
  187. /* split up the words */
  188. *comma1 = '\0';
  189. if (kstrtoux(buf, 0, &iova))
  190. goto invalid_format;
  191. if (kstrtosize_t(comma1 + 1, 0, &size))
  192. goto invalid_format;
  193. mutex_lock(&ddev->state_lock);
  194. if (!ddev->domain) {
  195. pr_err_ratelimited("No domain. Did you already attach?\n");
  196. mutex_unlock(&ddev->state_lock);
  197. return -EINVAL;
  198. }
  199. unmapped = iommu_unmap(ddev->domain, iova, size);
  200. if (unmapped != size) {
  201. pr_err_ratelimited("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
  202. size, unmapped);
  203. retval = -EIO;
  204. goto out;
  205. }
  206. retval = count;
  207. pr_info("Unmapped %pa (len=0x%zx)\n", &iova, size);
  208. out:
  209. mutex_unlock(&ddev->state_lock);
  210. return retval;
  211. invalid_format:
  212. pr_err_ratelimited("Invalid format. Expected: iova,len\n");
  213. return -EINVAL;
  214. }
  215. const struct file_operations iommu_debug_unmap_fops = {
  216. .open = simple_open,
  217. .write = iommu_debug_unmap_write,
  218. };
  219. /*
  220. * Performs DMA mapping of a given virtual address and size to an iova address.
  221. * User input format: (addr,len,dma attr) where dma attr is:
  222. * 0: normal mapping
  223. * 1: force coherent mapping
  224. * 2: force non-cohernet mapping
  225. * 3: use system cache
  226. */
  227. static ssize_t iommu_debug_dma_map_write(struct file *file,
  228. const char __user *ubuf, size_t count, loff_t *offset)
  229. {
  230. ssize_t retval = -EINVAL;
  231. int ret;
  232. char *comma1, *comma2;
  233. char buf[100] = {0};
  234. unsigned long addr;
  235. void *v_addr;
  236. dma_addr_t iova;
  237. size_t size;
  238. unsigned int attr;
  239. unsigned long dma_attrs;
  240. struct iommu_debug_device *ddev = file->private_data;
  241. struct device *dev = ddev->test_dev;
  242. if (count >= sizeof(buf)) {
  243. pr_err_ratelimited("Value too large\n");
  244. return -EINVAL;
  245. }
  246. if (copy_from_user(buf, ubuf, count)) {
  247. pr_err_ratelimited("Couldn't copy from user\n");
  248. return -EFAULT;
  249. }
  250. comma1 = strnchr(buf, count, ',');
  251. if (!comma1)
  252. goto invalid_format;
  253. comma2 = strnchr(comma1 + 1, count, ',');
  254. if (!comma2)
  255. goto invalid_format;
  256. *comma1 = *comma2 = '\0';
  257. if (kstrtoul(buf, 0, &addr))
  258. goto invalid_format;
  259. v_addr = (void *)addr;
  260. if (kstrtosize_t(comma1 + 1, 0, &size))
  261. goto invalid_format;
  262. if (kstrtouint(comma2 + 1, 0, &attr))
  263. goto invalid_format;
  264. mutex_lock(&test_virt_addr_lock);
  265. if (IS_ERR(test_virt_addr)) {
  266. mutex_unlock(&test_virt_addr_lock);
  267. goto allocation_failure;
  268. }
  269. if (!test_virt_addr) {
  270. mutex_unlock(&test_virt_addr_lock);
  271. goto missing_allocation;
  272. }
  273. mutex_unlock(&test_virt_addr_lock);
  274. if (v_addr < test_virt_addr || v_addr + size > test_virt_addr + SZ_1M)
  275. goto invalid_addr;
  276. if (attr == 0)
  277. dma_attrs = 0;
  278. else if (attr == 1)
  279. dma_attrs = DMA_ATTR_FORCE_COHERENT;
  280. else if (attr == 2)
  281. dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
  282. else if (attr == 3)
  283. dma_attrs = DMA_ATTR_SYS_CACHE_ONLY;
  284. else
  285. goto invalid_format;
  286. mutex_lock(&ddev->state_lock);
  287. if (!ddev->domain) {
  288. pr_err_ratelimited("%s: No domain. Have you selected a usecase?\n", __func__);
  289. mutex_unlock(&ddev->state_lock);
  290. return -EINVAL;
  291. }
  292. iova = dma_map_single_attrs(dev, v_addr, size, DMA_TO_DEVICE, dma_attrs);
  293. if (dma_mapping_error(dev, iova)) {
  294. pr_err_ratelimited("Failed to perform dma_map_single\n");
  295. ret = -EINVAL;
  296. goto out;
  297. }
  298. retval = count;
  299. pr_err_ratelimited("Mapped 0x%p to %pa (len=0x%zx)\n", v_addr, &iova, size);
  300. ddev->iova = iova;
  301. pr_err_ratelimited("Saved iova=%pa for future PTE commands\n", &iova);
  302. out:
  303. mutex_unlock(&ddev->state_lock);
  304. return retval;
  305. invalid_format:
  306. pr_err_ratelimited("Invalid format. Expected: addr,len,dma attr where 'dma attr' is\n0: normal mapping\n1: force coherent\n2: force non-cohernet\n3: use system cache\n");
  307. return retval;
  308. invalid_addr:
  309. pr_err_ratelimited("Invalid addr given (0x%p)! Address should be within 1MB size from start addr returned by doing 'cat test_virt_addr'.\n",
  310. v_addr);
  311. return retval;
  312. allocation_failure:
  313. pr_err_ratelimited("Allocation of test_virt_addr failed.\n");
  314. return -ENOMEM;
  315. missing_allocation:
  316. pr_err_ratelimited("Please attempt to do 'cat test_virt_addr'.\n");
  317. return retval;
  318. }
  319. static ssize_t iommu_debug_dma_map_read(struct file *file, char __user *ubuf,
  320. size_t count, loff_t *offset)
  321. {
  322. struct iommu_debug_device *ddev = file->private_data;
  323. char buf[100] = {};
  324. dma_addr_t iova;
  325. int len;
  326. if (*offset)
  327. return 0;
  328. iova = ddev->iova;
  329. len = scnprintf(buf, sizeof(buf), "%pa\n", &iova);
  330. return simple_read_from_buffer(ubuf, count, offset, buf, len);
  331. }
  332. const struct file_operations iommu_debug_dma_map_fops = {
  333. .open = simple_open,
  334. .read = iommu_debug_dma_map_read,
  335. .write = iommu_debug_dma_map_write,
  336. };
  337. static ssize_t iommu_debug_dma_unmap_write(struct file *file, const char __user *ubuf,
  338. size_t count, loff_t *offset)
  339. {
  340. ssize_t retval = 0;
  341. char *comma1, *comma2;
  342. char buf[100] = {};
  343. size_t size;
  344. unsigned int attr;
  345. dma_addr_t iova;
  346. unsigned long dma_attrs;
  347. struct iommu_debug_device *ddev = file->private_data;
  348. struct device *dev = ddev->test_dev;
  349. if (count >= sizeof(buf)) {
  350. pr_err_ratelimited("Value too large\n");
  351. return -EINVAL;
  352. }
  353. if (copy_from_user(buf, ubuf, count)) {
  354. pr_err_ratelimited("Couldn't copy from user\n");
  355. retval = -EFAULT;
  356. goto out;
  357. }
  358. comma1 = strnchr(buf, count, ',');
  359. if (!comma1)
  360. goto invalid_format;
  361. comma2 = strnchr(comma1 + 1, count, ',');
  362. if (!comma2)
  363. goto invalid_format;
  364. *comma1 = *comma2 = '\0';
  365. if (kstrtoux(buf, 0, &iova))
  366. goto invalid_format;
  367. if (kstrtosize_t(comma1 + 1, 0, &size))
  368. goto invalid_format;
  369. if (kstrtouint(comma2 + 1, 0, &attr))
  370. goto invalid_format;
  371. if (attr == 0)
  372. dma_attrs = 0;
  373. else if (attr == 1)
  374. dma_attrs = DMA_ATTR_FORCE_COHERENT;
  375. else if (attr == 2)
  376. dma_attrs = DMA_ATTR_FORCE_NON_COHERENT;
  377. else if (attr == 3)
  378. dma_attrs = DMA_ATTR_SYS_CACHE_ONLY;
  379. else
  380. goto invalid_format;
  381. mutex_lock(&ddev->state_lock);
  382. if (!ddev->domain) {
  383. pr_err_ratelimited("%s: No domain. Have you selected a usecase?\n", __func__);
  384. mutex_unlock(&ddev->state_lock);
  385. return -EINVAL;
  386. }
  387. dma_unmap_single_attrs(dev, iova, size, DMA_TO_DEVICE, dma_attrs);
  388. retval = count;
  389. pr_err_ratelimited("Unmapped %pa (len=0x%zx)\n", &iova, size);
  390. out:
  391. mutex_unlock(&ddev->state_lock);
  392. return retval;
  393. invalid_format:
  394. pr_err_ratelimited("Invalid format. Expected: iova,len, dma attr\n");
  395. return -EINVAL;
  396. }
  397. const struct file_operations iommu_debug_dma_unmap_fops = {
  398. .open = simple_open,
  399. .write = iommu_debug_dma_unmap_write,
  400. };
  401. static int iommu_debug_build_phoney_sg_table(struct device *dev,
  402. struct sg_table *table,
  403. unsigned long total_size,
  404. unsigned long chunk_size)
  405. {
  406. unsigned long nents = total_size / chunk_size;
  407. struct scatterlist *sg;
  408. int i, j;
  409. struct page *page;
  410. if (!IS_ALIGNED(total_size, PAGE_SIZE))
  411. return -EINVAL;
  412. if (!IS_ALIGNED(total_size, chunk_size))
  413. return -EINVAL;
  414. if (sg_alloc_table(table, nents, GFP_KERNEL))
  415. return -EINVAL;
  416. for_each_sg(table->sgl, sg, table->nents, i) {
  417. page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
  418. if (!page)
  419. goto free_pages;
  420. sg_set_page(sg, page, chunk_size, 0);
  421. }
  422. return 0;
  423. free_pages:
  424. for_each_sg(table->sgl, sg, i--, j)
  425. __free_pages(sg_page(sg), get_order(chunk_size));
  426. sg_free_table(table);
  427. return -ENOMEM;
  428. }
  429. static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
  430. struct sg_table *table,
  431. unsigned long chunk_size)
  432. {
  433. struct scatterlist *sg;
  434. int i;
  435. for_each_sg(table->sgl, sg, table->nents, i)
  436. __free_pages(sg_page(sg), get_order(chunk_size));
  437. sg_free_table(table);
  438. }
  439. #define ps_printf(name, s, fmt, ...) ({ \
  440. pr_err("%s: " fmt, name, ##__VA_ARGS__); \
  441. seq_printf(s, fmt, ##__VA_ARGS__); \
  442. })
  443. static int __functional_dma_api_alloc_test(struct device *dev,
  444. struct seq_file *s,
  445. struct iommu_domain *domain,
  446. void *ignored)
  447. {
  448. size_t size = SZ_1K * 742;
  449. int ret = 0;
  450. u8 *data;
  451. dma_addr_t iova;
  452. /* Make sure we can allocate and use a buffer */
  453. ps_printf(dev_name(dev), s, "Allocating coherent buffer");
  454. data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
  455. if (!data) {
  456. ret = -ENOMEM;
  457. } else {
  458. int i;
  459. ps_printf(dev_name(dev), s, " -> SUCCEEDED\n");
  460. ps_printf(dev_name(dev), s, "Using coherent buffer");
  461. for (i = 0; i < 742; ++i) {
  462. int ind = SZ_1K * i;
  463. u8 *p = data + ind;
  464. u8 val = i % 255;
  465. memset(data, 0xa5, size);
  466. *p = val;
  467. (*p)++;
  468. if ((*p) != val + 1) {
  469. ps_printf(dev_name(dev), s,
  470. " -> FAILED on iter %d since %d != %d\n",
  471. i, *p, val + 1);
  472. ret = -EINVAL;
  473. }
  474. }
  475. if (!ret)
  476. ps_printf(dev_name(dev), s, " -> SUCCEEDED\n");
  477. dma_free_coherent(dev, size, data, iova);
  478. }
  479. return ret;
  480. }
  481. static int __functional_dma_api_basic_test(struct device *dev,
  482. struct seq_file *s,
  483. struct iommu_domain *domain,
  484. void *ignored)
  485. {
  486. size_t size = 1518;
  487. int i, j, ret = 0;
  488. u8 *data;
  489. dma_addr_t iova;
  490. ps_printf(dev_name(dev), s, "Basic DMA API test");
  491. /* Make sure we can allocate and use a buffer */
  492. for (i = 0; i < 1000; ++i) {
  493. data = kmalloc(size, GFP_KERNEL);
  494. if (!data) {
  495. ret = -ENOMEM;
  496. goto out;
  497. }
  498. memset(data, 0xa5, size);
  499. iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
  500. ret = iommu_debug_check_mapping_fast(dev, iova, size, virt_to_phys(data));
  501. if (ret)
  502. goto out;
  503. dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
  504. for (j = 0; j < size; ++j) {
  505. if (data[j] != 0xa5) {
  506. dev_err_ratelimited(dev, "data[%d] != 0xa5\n", data[j]);
  507. ret = -EINVAL;
  508. goto out;
  509. }
  510. }
  511. kfree(data);
  512. }
  513. out:
  514. if (ret)
  515. ps_printf(dev_name(dev), s, " -> FAILED\n");
  516. else
  517. ps_printf(dev_name(dev), s, " -> SUCCEEDED\n");
  518. return ret;
  519. }
  520. static int __functional_dma_api_map_sg_test(struct device *dev, struct seq_file *s,
  521. struct iommu_domain *domain, size_t sizes[])
  522. {
  523. const size_t *sz;
  524. int ret = 0, count = 0;
  525. ps_printf(dev_name(dev), s, "Map SG DMA API test");
  526. for (sz = sizes; *sz; ++sz) {
  527. size_t size = *sz;
  528. struct sg_table table;
  529. unsigned long chunk_size = SZ_4K;
  530. /* Build us a table */
  531. ret = iommu_debug_build_phoney_sg_table(dev, &table, size, chunk_size);
  532. if (ret) {
  533. seq_puts(s, "couldn't build phoney sg table! bailing...\n");
  534. goto out;
  535. }
  536. count = dma_map_sg(dev, table.sgl, table.nents, DMA_BIDIRECTIONAL);
  537. if (!count) {
  538. ret = -EINVAL;
  539. goto destroy_table;
  540. }
  541. /* Check mappings... */
  542. ret = iommu_debug_check_mapping_sg_fast(dev, table.sgl, 0, table.nents, count);
  543. dma_unmap_sg(dev, table.sgl, table.nents, DMA_BIDIRECTIONAL);
  544. destroy_table:
  545. iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
  546. }
  547. out:
  548. if (ret)
  549. ps_printf(dev_name(dev), s, " -> FAILED\n");
  550. else
  551. ps_printf(dev_name(dev), s, " -> SUCCEEDED\n");
  552. return ret;
  553. }
  554. static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
  555. void *ignored)
  556. {
  557. struct iommu_debug_device *ddev = s->private;
  558. struct device *dev;
  559. size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
  560. int ret = -EINVAL;
  561. mutex_lock(&ddev->state_lock);
  562. if (!iommu_debug_usecase_reset(ddev))
  563. goto out;
  564. dev = ddev->test_dev;
  565. ret = __functional_dma_api_alloc_test(dev, s, ddev->domain, sizes);
  566. ret |= __functional_dma_api_basic_test(dev, s, ddev->domain, sizes);
  567. ret |= __functional_dma_api_map_sg_test(dev, s, ddev->domain, sizes);
  568. out:
  569. mutex_unlock(&ddev->state_lock);
  570. if (ret)
  571. seq_printf(s, "FAIL %d\n", ret);
  572. else
  573. seq_puts(s, "SUCCESS\n");
  574. return 0;
  575. }
  576. static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
  577. struct file *file)
  578. {
  579. return single_open(file, iommu_debug_functional_arm_dma_api_show,
  580. inode->i_private);
  581. }
  582. const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
  583. .open = iommu_debug_functional_arm_dma_api_open,
  584. .read = seq_read,
  585. .llseek = seq_lseek,
  586. .release = single_release,
  587. };
  588. /* Creates a fresh fast mapping and applies @fn to it */
  589. static int __apply_to_new_mapping(struct seq_file *s,
  590. int (*fn)(struct device *dev,
  591. struct seq_file *s,
  592. struct iommu_domain *domain,
  593. void *priv),
  594. void *priv)
  595. {
  596. struct iommu_domain *domain;
  597. struct iommu_debug_device *ddev = s->private;
  598. struct device *dev;
  599. int ret = -EINVAL;
  600. mutex_lock(&ddev->state_lock);
  601. if (!iommu_debug_usecase_reset(ddev))
  602. goto out;
  603. domain = ddev->domain;
  604. dev = ddev->test_dev;
  605. ret = fn(dev, s, domain, priv);
  606. out:
  607. mutex_unlock(&ddev->state_lock);
  608. seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
  609. return ret;
  610. }
  611. static const char * const _size_to_string(unsigned long size)
  612. {
  613. switch (size) {
  614. case SZ_4K:
  615. return "4K";
  616. case SZ_8K:
  617. return "8K";
  618. case SZ_16K:
  619. return "16K";
  620. case SZ_64K:
  621. return "64K";
  622. case SZ_1M:
  623. return "1M";
  624. case SZ_2M:
  625. return "2M";
  626. case SZ_1M * 12:
  627. return "12M";
  628. case SZ_1M * 20:
  629. return "20M";
  630. case SZ_1M * 24:
  631. return "24M";
  632. case SZ_1M * 32:
  633. return "32M";
  634. }
  635. pr_err("unknown size, please add to %s\n", __func__);
  636. return "unknown size";
  637. }
  638. static int __check_mapping(struct device *dev, struct iommu_domain *domain,
  639. dma_addr_t iova, phys_addr_t expected)
  640. {
  641. struct iommu_fwspec *fwspec;
  642. phys_addr_t res, res2;
  643. struct qcom_iommu_atos_txn txn;
  644. fwspec = dev_iommu_fwspec_get(dev);
  645. if (!fwspec) {
  646. dev_err_ratelimited(dev, "No fwspec.\n");
  647. return -EINVAL;
  648. }
  649. txn.addr = iova;
  650. txn.flags = IOMMU_TRANS_DEFAULT;
  651. txn.id = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[0]);
  652. res = qcom_iommu_iova_to_phys_hard(domain, &txn);
  653. res2 = iommu_iova_to_phys(domain, iova);
  654. WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
  655. if (res != expected) {
  656. dev_err_ratelimited(dev, "Bad translation for %pa! Expected: %pa Got: %pa\n",
  657. &iova, &expected, &res);
  658. return -EINVAL;
  659. }
  660. return 0;
  661. }
  662. static int __full_va_sweep(struct device *dev, struct seq_file *s,
  663. struct iommu_domain *domain, void *priv)
  664. {
  665. u64 iova;
  666. int nr_maps = 0;
  667. dma_addr_t dma_addr;
  668. void *virt;
  669. phys_addr_t phys;
  670. const u64 max = SZ_1G * 4ULL - 1;
  671. int ret = 0, i;
  672. const size_t size = (size_t)priv;
  673. unsigned long theiova;
  674. virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
  675. if (!virt) {
  676. if (size > SZ_8K) {
  677. dev_err_ratelimited(dev, "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
  678. _size_to_string(size));
  679. return 0;
  680. }
  681. return -ENOMEM;
  682. }
  683. phys = virt_to_phys(virt);
  684. for (iova = 0, i = 0; iova < max; iova += size, ++i) {
  685. unsigned long expected = iova;
  686. if (iova == MSI_IOVA_BASE) {
  687. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - size;
  688. continue;
  689. }
  690. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  691. if (dma_addr != expected) {
  692. dev_err_ratelimited(dev, "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
  693. i, expected, (unsigned long)dma_addr);
  694. ret = -EINVAL;
  695. if (!dma_mapping_error(dev, dma_addr))
  696. dma_unmap_single(dev, dma_addr, size,
  697. DMA_TO_DEVICE);
  698. goto out;
  699. }
  700. nr_maps++;
  701. }
  702. if (domain) {
  703. /* check every mapping from 0..6M */
  704. for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
  705. phys_addr_t expected = phys;
  706. if (__check_mapping(dev, domain, iova, expected)) {
  707. dev_err_ratelimited(dev, "iter: %d\n", i);
  708. ret = -EINVAL;
  709. goto out;
  710. }
  711. }
  712. /* and from 4G..4G-6M */
  713. for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
  714. phys_addr_t expected = phys;
  715. if (iova == MSI_IOVA_BASE) {
  716. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - size;
  717. continue;
  718. }
  719. theiova = ((SZ_1G * 4ULL) - size) - iova;
  720. if (__check_mapping(dev, domain, theiova, expected)) {
  721. dev_err_ratelimited(dev, "iter: %d\n", i);
  722. ret = -EINVAL;
  723. goto out;
  724. }
  725. }
  726. }
  727. /* at this point, our VA space should be full */
  728. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  729. if (dma_addr != DMA_MAPPING_ERROR) {
  730. dev_err_ratelimited(dev, "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
  731. (unsigned long)dma_addr);
  732. ret = -EINVAL;
  733. }
  734. out:
  735. for (iova = 0; iova < max && nr_maps--; iova += size) {
  736. if (iova == MSI_IOVA_BASE) {
  737. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - size;
  738. continue;
  739. }
  740. dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
  741. }
  742. free_pages((unsigned long)virt, get_order(size));
  743. return ret;
  744. }
  745. static int __tlb_stress_sweep(struct device *dev, struct seq_file *s,
  746. struct iommu_domain *domain, void *unused)
  747. {
  748. int i, ret = 0;
  749. int nr_maps = 0;
  750. u64 iova;
  751. u64 first_iova = 0;
  752. const u64 max = SZ_1G * 4ULL - 1;
  753. void *virt;
  754. phys_addr_t phys;
  755. dma_addr_t dma_addr;
  756. /*
  757. * we'll be doing 4K and 8K mappings. Need to own an entire 8K
  758. * chunk that we can work with.
  759. */
  760. virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
  761. phys = virt_to_phys(virt);
  762. /* fill the whole 4GB space */
  763. for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
  764. if (iova == MSI_IOVA_BASE) {
  765. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - SZ_8K;
  766. continue;
  767. }
  768. dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
  769. if (dma_addr == DMA_MAPPING_ERROR) {
  770. dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
  771. ret = -EINVAL;
  772. goto out;
  773. } else if (dma_addr != iova) {
  774. dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
  775. dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
  776. ret = -EINVAL;
  777. goto out;
  778. }
  779. nr_maps++;
  780. }
  781. if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_MAPPING_ERROR) {
  782. dev_err_ratelimited(dev, "dma_map_single unexpectedly (VA should have been exhausted)\n");
  783. ret = -EINVAL;
  784. goto out;
  785. }
  786. /*
  787. * free up 4K at the very beginning, then leave one 4K mapping,
  788. * then free up 8K. This will result in the next 8K map to skip
  789. * over the 4K hole and take the 8K one.
  790. * i.e
  791. * 0K..4K Hole
  792. * 4K..8K Map R1
  793. * 8K..12K Hole
  794. * 12K..4G Map R2
  795. */
  796. dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
  797. dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
  798. dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
  799. /* remap 8K */
  800. dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
  801. if (dma_addr != SZ_8K) {
  802. dma_addr_t expected = SZ_8K;
  803. dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
  804. &dma_addr, &expected);
  805. /* To simplify error handling, unmap the 4K regions (4K..8K
  806. * and 12K..16K) here and the rest (16K..4G) in 8K increments
  807. * in the for loop.
  808. */
  809. dma_unmap_single(dev, SZ_4K, SZ_4K, DMA_TO_DEVICE);
  810. dma_unmap_single(dev, SZ_8K+SZ_4K, SZ_4K, DMA_TO_DEVICE);
  811. nr_maps -= 2;
  812. first_iova = SZ_8K + SZ_8K;
  813. ret = -EINVAL;
  814. goto out;
  815. }
  816. /*
  817. * Now we have 0..4K hole and 4K..4G mapped.
  818. * Remap 4K. We should get the first 4K chunk that was skipped
  819. * over during the previous 8K map. If we missed a TLB invalidate
  820. * at that point this should explode.
  821. */
  822. dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
  823. if (dma_addr != 0) {
  824. dma_addr_t expected = 0;
  825. dev_err_ratelimited(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
  826. &dma_addr, &expected);
  827. /* To simplify error handling, unmap the 4K region (4K..8K)
  828. * here and rest (8K..4G) in 8K increments in the for loop.
  829. */
  830. dma_unmap_single(dev, SZ_4K, SZ_4K, DMA_TO_DEVICE);
  831. first_iova = SZ_8K;
  832. nr_maps -= 1;
  833. ret = -EINVAL;
  834. goto out;
  835. }
  836. first_iova = 0;
  837. if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_MAPPING_ERROR) {
  838. dev_err_ratelimited(dev, "dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
  839. ret = -EINVAL;
  840. goto out;
  841. }
  842. out:
  843. /* we're all full again. unmap everything. */
  844. for (iova = first_iova; iova < max && nr_maps--; iova += SZ_8K) {
  845. if (iova == MSI_IOVA_BASE) {
  846. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - SZ_8K;
  847. continue;
  848. }
  849. dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
  850. }
  851. free_pages((unsigned long)virt, get_order(SZ_8K));
  852. return ret;
  853. }
  854. struct fib_state {
  855. unsigned long cur;
  856. unsigned long prev;
  857. };
  858. static void fib_init(struct fib_state *f)
  859. {
  860. f->cur = f->prev = 1;
  861. }
  862. static unsigned long get_next_fib(struct fib_state *f)
  863. {
  864. int next = f->cur + f->prev;
  865. f->prev = f->cur;
  866. f->cur = next;
  867. return next;
  868. }
  869. /*
  870. * Not actually random. Just testing the fibs (and max - the fibs).
  871. */
  872. static int __rand_va_sweep(struct device *dev, struct seq_file *s,
  873. struct iommu_domain *domain, void *priv)
  874. {
  875. u64 iova;
  876. const u64 max = SZ_1G * 4ULL - 1;
  877. int i, remapped, unmapped, ret = 0;
  878. int nr_maps = 0;
  879. void *virt;
  880. dma_addr_t dma_addr, dma_addr2;
  881. struct fib_state fib;
  882. const size_t size = (size_t)priv;
  883. virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
  884. if (!virt) {
  885. if (size > SZ_8K) {
  886. dev_err_ratelimited(dev, "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
  887. _size_to_string(size));
  888. return 0;
  889. }
  890. return -ENOMEM;
  891. }
  892. /* fill the whole 4GB space */
  893. for (iova = 0, i = 0; iova < max; iova += size, ++i) {
  894. if (iova == MSI_IOVA_BASE) {
  895. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - size;
  896. continue;
  897. }
  898. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  899. if (dma_addr == DMA_MAPPING_ERROR) {
  900. dev_err_ratelimited(dev, "Failed map on iter %d\n", i);
  901. ret = -EINVAL;
  902. goto out;
  903. } else if (dma_addr != iova) {
  904. dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
  905. dev_err_ratelimited(dev, "Unexpected dma_addr. got: %lx, expected: %lx\n",
  906. (unsigned long)dma_addr, (unsigned long)iova);
  907. ret = -EINVAL;
  908. goto out;
  909. }
  910. nr_maps++;
  911. }
  912. /* now unmap "random" iovas */
  913. unmapped = 0;
  914. fib_init(&fib);
  915. for (iova = get_next_fib(&fib) * size;
  916. iova < max - size;
  917. iova = (u64)get_next_fib(&fib) * size) {
  918. dma_addr = (dma_addr_t)(iova);
  919. dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
  920. if (dma_addr == dma_addr2) {
  921. WARN(1, "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
  922. __func__);
  923. return -EINVAL;
  924. }
  925. if (!(MSI_IOVA_BASE <= dma_addr && MSI_IOVA_BASE + MSI_IOVA_LENGTH > dma_addr))
  926. dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
  927. if (!(MSI_IOVA_BASE <= dma_addr2 && MSI_IOVA_BASE + MSI_IOVA_LENGTH > dma_addr2))
  928. dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
  929. unmapped += 2;
  930. }
  931. /* and map until everything fills back up */
  932. for (remapped = 0; ; ++remapped) {
  933. dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
  934. if (dma_addr == DMA_MAPPING_ERROR)
  935. break;
  936. }
  937. if (unmapped != remapped) {
  938. dev_err_ratelimited(dev, "Unexpected random remap count! Unmapped %d but remapped %d\n",
  939. unmapped, remapped);
  940. ret = -EINVAL;
  941. }
  942. out:
  943. for (iova = 0; iova < max && nr_maps--; iova += size) {
  944. if (iova == MSI_IOVA_BASE) {
  945. iova = MSI_IOVA_BASE + MSI_IOVA_LENGTH - size;
  946. continue;
  947. }
  948. dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
  949. }
  950. free_pages((unsigned long)virt, get_order(size));
  951. return ret;
  952. }
  953. static int __functional_dma_api_va_test(struct seq_file *s)
  954. {
  955. int ret = 0;
  956. size_t *sz;
  957. size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
  958. struct iommu_debug_device *ddev = s->private;
  959. char *usecase_name;
  960. /*
  961. * dev_name() cannot be used to get the usecase name as ddev->test_dev
  962. * will be NULL in case __apply_to_new_mapping() fails. Since
  963. * ddev->test_dev changes across calls to __apply_to_new_mapping(), we
  964. * also can't hold a reference to its name by caching the result of
  965. * dev_name() initially.
  966. */
  967. mutex_lock(&ddev->state_lock);
  968. if (!ddev->test_dev) {
  969. mutex_unlock(&ddev->state_lock);
  970. return -ENODEV;
  971. }
  972. usecase_name = kstrdup(dev_name(ddev->test_dev), GFP_KERNEL);
  973. mutex_unlock(&ddev->state_lock);
  974. if (!usecase_name)
  975. return -ENOMEM;
  976. for (sz = sizes; *sz; ++sz) {
  977. ps_printf(usecase_name, s, "Full VA sweep @%s:", _size_to_string(*sz));
  978. if (__apply_to_new_mapping(s, __full_va_sweep, (void *)*sz)) {
  979. ps_printf(usecase_name, s, " -> FAILED\n");
  980. ret = -EINVAL;
  981. } else {
  982. ps_printf(usecase_name, s, " -> SUCCEEDED\n");
  983. }
  984. }
  985. ps_printf(usecase_name, s, "bonus map:");
  986. if (__apply_to_new_mapping(s, __full_va_sweep, (void *)SZ_4K)) {
  987. ps_printf(usecase_name, s, " -> FAILED\n");
  988. ret = -EINVAL;
  989. } else {
  990. ps_printf(usecase_name, s, " -> SUCCEEDED\n");
  991. }
  992. for (sz = sizes; *sz; ++sz) {
  993. ps_printf(usecase_name, s, "Rand VA sweep @%s:", _size_to_string(*sz));
  994. if (__apply_to_new_mapping(s, __rand_va_sweep, (void *)*sz)) {
  995. ps_printf(usecase_name, s, " -> FAILED\n");
  996. ret = -EINVAL;
  997. } else {
  998. ps_printf(usecase_name, s, " -> SUCCEEDED\n");
  999. }
  1000. }
  1001. ps_printf(usecase_name, s, "TLB stress sweep:");
  1002. if (__apply_to_new_mapping(s, __tlb_stress_sweep, NULL)) {
  1003. ps_printf(usecase_name, s, " -> FAILED\n");
  1004. ret = -EINVAL;
  1005. } else {
  1006. ps_printf(usecase_name, s, " -> SUCCEEDED\n");
  1007. }
  1008. ps_printf(usecase_name, s, "second bonus map:");
  1009. if (__apply_to_new_mapping(s, __full_va_sweep, (void *)SZ_4K)) {
  1010. ps_printf(usecase_name, s, " -> FAILED\n");
  1011. ret = -EINVAL;
  1012. } else {
  1013. ps_printf(usecase_name, s, " -> SUCCEEDED\n");
  1014. }
  1015. kfree(usecase_name);
  1016. return ret;
  1017. }
  1018. static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
  1019. void *ignored)
  1020. {
  1021. int ret = 0;
  1022. struct iommu_debug_device *ddev = s->private;
  1023. if (!ddev->test_dev) {
  1024. pr_err("%s:Have you selected a uscase?\n", __func__);
  1025. return -EINVAL;
  1026. }
  1027. if (!ddev->fastmap_usecase) {
  1028. ps_printf(dev_name(ddev->test_dev), s,
  1029. "Not a fastmap usecase\n");
  1030. return 0;
  1031. } else if (!IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_FAST)) {
  1032. ps_printf(dev_name(ddev->test_dev), s,
  1033. "CONFIG_IOMMU_IO_PGTABLE_FAST not enabled\n");
  1034. return 0;
  1035. }
  1036. ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
  1037. ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
  1038. ret |= __functional_dma_api_va_test(s);
  1039. return ret;
  1040. }
  1041. static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
  1042. struct file *file)
  1043. {
  1044. return single_open(file, iommu_debug_functional_fast_dma_api_show,
  1045. inode->i_private);
  1046. }
  1047. const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
  1048. .open = iommu_debug_functional_fast_dma_api_open,
  1049. .read = seq_read,
  1050. .llseek = seq_lseek,
  1051. .release = single_release,
  1052. };
  1053. static ssize_t iommu_debug_test_virt_addr_read(struct file *file,
  1054. char __user *ubuf,
  1055. size_t count, loff_t *offset)
  1056. {
  1057. char buf[100];
  1058. int len;
  1059. if (*offset)
  1060. return 0;
  1061. mutex_lock(&test_virt_addr_lock);
  1062. if (IS_ERR_OR_NULL(test_virt_addr))
  1063. test_virt_addr = kzalloc(SZ_1M, GFP_KERNEL);
  1064. if (!test_virt_addr) {
  1065. test_virt_addr = ERR_PTR(-ENOMEM);
  1066. len = strscpy(buf, "FAIL\n", sizeof(buf));
  1067. } else {
  1068. len = scnprintf(buf, sizeof(buf), "0x%p\n", test_virt_addr);
  1069. }
  1070. mutex_unlock(&test_virt_addr_lock);
  1071. return simple_read_from_buffer(ubuf, count, offset, buf, len);
  1072. }
  1073. const struct file_operations iommu_debug_test_virt_addr_fops = {
  1074. .open = simple_open,
  1075. .read = iommu_debug_test_virt_addr_read,
  1076. };
  1077. #ifdef CONFIG_IOMMU_IOVA_ALIGNMENT
  1078. static unsigned long iommu_debug_get_align_mask(size_t size)
  1079. {
  1080. unsigned long align_mask = ~0UL;
  1081. align_mask <<= min_t(unsigned long, CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT,
  1082. fls_long(size - 1));
  1083. return ~align_mask;
  1084. }
  1085. #else
  1086. static unsigned long iommu_debug_get_align_mask(size_t size)
  1087. {
  1088. unsigned long align_mask = ~0UL;
  1089. align_mask <<= fls_long(size - 1);
  1090. return ~align_mask;
  1091. }
  1092. #endif
  1093. static void iommu_debug_device_profiling(struct seq_file *s, struct iommu_debug_device *ddev,
  1094. const size_t sizes[])
  1095. {
  1096. const size_t *sz;
  1097. struct iommu_domain *domain;
  1098. struct device *dev;
  1099. unsigned long iova = 0x10000;
  1100. phys_addr_t paddr = 0x80000000;
  1101. mutex_lock(&ddev->state_lock);
  1102. if (!iommu_debug_usecase_reset(ddev))
  1103. goto out;
  1104. domain = ddev->domain;
  1105. dev = ddev->test_dev;
  1106. seq_printf(s, "(average over %d iterations)\n", ddev->nr_iters);
  1107. seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
  1108. for (sz = sizes; *sz; ++sz) {
  1109. size_t size = *sz;
  1110. size_t unmapped;
  1111. u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
  1112. u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
  1113. u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
  1114. ktime_t tbefore, tafter, diff;
  1115. int i;
  1116. unsigned long align_mask = iommu_debug_get_align_mask(size);
  1117. for (i = 0; i < ddev->nr_iters; ++i) {
  1118. tbefore = ktime_get();
  1119. if (iommu_map(domain, __ALIGN_MASK(iova, align_mask),
  1120. ALIGN(paddr, size), size,
  1121. IOMMU_READ | IOMMU_WRITE)) {
  1122. seq_puts(s, "Failed to map\n");
  1123. continue;
  1124. }
  1125. tafter = ktime_get();
  1126. diff = ktime_sub(tafter, tbefore);
  1127. map_elapsed_ns += ktime_to_ns(diff);
  1128. tbefore = ktime_get();
  1129. unmapped = iommu_unmap(domain,
  1130. __ALIGN_MASK(iova, align_mask),
  1131. size);
  1132. if (unmapped != size) {
  1133. seq_printf(s,
  1134. "Only unmapped %zx instead of %zx\n",
  1135. unmapped, size);
  1136. continue;
  1137. }
  1138. tafter = ktime_get();
  1139. diff = ktime_sub(tafter, tbefore);
  1140. unmap_elapsed_ns += ktime_to_ns(diff);
  1141. }
  1142. map_elapsed_ns = div_u64_rem(map_elapsed_ns, ddev->nr_iters, &map_elapsed_rem);
  1143. unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, ddev->nr_iters,
  1144. &unmap_elapsed_rem);
  1145. map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000, &map_elapsed_rem);
  1146. unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000, &unmap_elapsed_rem);
  1147. seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
  1148. _size_to_string(size), map_elapsed_us, map_elapsed_rem,
  1149. unmap_elapsed_us, unmap_elapsed_rem);
  1150. }
  1151. seq_putc(s, '\n');
  1152. seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
  1153. for (sz = sizes; *sz; ++sz) {
  1154. size_t size = *sz;
  1155. size_t unmapped;
  1156. u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
  1157. u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
  1158. u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
  1159. ktime_t tbefore, tafter, diff;
  1160. struct sg_table table;
  1161. unsigned long chunk_size = SZ_4K;
  1162. int i;
  1163. unsigned long align_mask = iommu_debug_get_align_mask(size);
  1164. if (iommu_debug_build_phoney_sg_table(dev, &table, size,
  1165. chunk_size)) {
  1166. seq_puts(s, "couldn't build phoney sg table! bailing...\n");
  1167. goto out;
  1168. }
  1169. for (i = 0; i < ddev->nr_iters; ++i) {
  1170. tbefore = ktime_get();
  1171. if (iommu_map_sgtable(domain, __ALIGN_MASK(iova, align_mask),
  1172. &table, IOMMU_READ | IOMMU_WRITE) != size) {
  1173. seq_puts(s, "Failed to map_sg\n");
  1174. goto next;
  1175. }
  1176. tafter = ktime_get();
  1177. diff = ktime_sub(tafter, tbefore);
  1178. map_elapsed_ns += ktime_to_ns(diff);
  1179. tbefore = ktime_get();
  1180. unmapped = iommu_unmap(domain,
  1181. __ALIGN_MASK(iova, align_mask),
  1182. size);
  1183. if (unmapped != size) {
  1184. seq_printf(s, "Only unmapped %zx instead of %zx\n",
  1185. unmapped, size);
  1186. goto next;
  1187. }
  1188. tafter = ktime_get();
  1189. diff = ktime_sub(tafter, tbefore);
  1190. unmap_elapsed_ns += ktime_to_ns(diff);
  1191. }
  1192. map_elapsed_ns = div_u64_rem(map_elapsed_ns, ddev->nr_iters, &map_elapsed_rem);
  1193. unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, ddev->nr_iters,
  1194. &unmap_elapsed_rem);
  1195. map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000, &map_elapsed_rem);
  1196. unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000, &unmap_elapsed_rem);
  1197. seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n", _size_to_string(size),
  1198. map_elapsed_us, map_elapsed_rem, unmap_elapsed_us, unmap_elapsed_rem);
  1199. next:
  1200. iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
  1201. }
  1202. out:
  1203. mutex_unlock(&ddev->state_lock);
  1204. }
  1205. static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
  1206. {
  1207. struct iommu_debug_device *ddev = s->private;
  1208. const size_t sizes[] = { SZ_4K, SZ_64K, SZ_1M, SZ_2M, SZ_1M * 12,
  1209. SZ_1M * 24, SZ_1M * 32, 0 };
  1210. iommu_debug_device_profiling(s, ddev, sizes);
  1211. return 0;
  1212. }
  1213. static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
  1214. {
  1215. return single_open(file, iommu_debug_profiling_show, inode->i_private);
  1216. }
  1217. const struct file_operations iommu_debug_profiling_fops = {
  1218. .open = iommu_debug_profiling_open,
  1219. .read = seq_read,
  1220. .llseek = seq_lseek,
  1221. .release = single_release,
  1222. };