pci_endpoint_test.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /**
  3. * Host side test driver to test endpoint functionality
  4. *
  5. * Copyright (C) 2017 Texas Instruments
  6. * Author: Kishon Vijay Abraham I <[email protected]>
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/delay.h>
  10. #include <linux/fs.h>
  11. #include <linux/io.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/irq.h>
  14. #include <linux/miscdevice.h>
  15. #include <linux/module.h>
  16. #include <linux/mutex.h>
  17. #include <linux/random.h>
  18. #include <linux/slab.h>
  19. #include <linux/uaccess.h>
  20. #include <linux/pci.h>
  21. #include <linux/pci_ids.h>
  22. #include <linux/pci_regs.h>
  23. #include <uapi/linux/pcitest.h>
  24. #define DRV_MODULE_NAME "pci-endpoint-test"
  25. #define IRQ_TYPE_UNDEFINED -1
  26. #define IRQ_TYPE_LEGACY 0
  27. #define IRQ_TYPE_MSI 1
  28. #define IRQ_TYPE_MSIX 2
  29. #define PCI_ENDPOINT_TEST_MAGIC 0x0
  30. #define PCI_ENDPOINT_TEST_COMMAND 0x4
  31. #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
  32. #define COMMAND_RAISE_MSI_IRQ BIT(1)
  33. #define COMMAND_RAISE_MSIX_IRQ BIT(2)
  34. #define COMMAND_READ BIT(3)
  35. #define COMMAND_WRITE BIT(4)
  36. #define COMMAND_COPY BIT(5)
  37. #define PCI_ENDPOINT_TEST_STATUS 0x8
  38. #define STATUS_READ_SUCCESS BIT(0)
  39. #define STATUS_READ_FAIL BIT(1)
  40. #define STATUS_WRITE_SUCCESS BIT(2)
  41. #define STATUS_WRITE_FAIL BIT(3)
  42. #define STATUS_COPY_SUCCESS BIT(4)
  43. #define STATUS_COPY_FAIL BIT(5)
  44. #define STATUS_IRQ_RAISED BIT(6)
  45. #define STATUS_SRC_ADDR_INVALID BIT(7)
  46. #define STATUS_DST_ADDR_INVALID BIT(8)
  47. #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
  48. #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
  49. #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
  50. #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
  51. #define PCI_ENDPOINT_TEST_SIZE 0x1c
  52. #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
  53. #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
  54. #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
  55. #define PCI_ENDPOINT_TEST_FLAGS 0x2c
  56. #define FLAG_USE_DMA BIT(0)
  57. #define PCI_DEVICE_ID_TI_AM654 0xb00c
  58. #define PCI_DEVICE_ID_TI_J7200 0xb00f
  59. #define PCI_DEVICE_ID_TI_AM64 0xb010
  60. #define PCI_DEVICE_ID_TI_J721S2 0xb013
  61. #define PCI_DEVICE_ID_LS1088A 0x80c0
  62. #define is_am654_pci_dev(pdev) \
  63. ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
  64. #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
  65. #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
  66. #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
  67. #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
  68. #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
  69. static DEFINE_IDA(pci_endpoint_test_ida);
  70. #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
  71. miscdev)
  72. static bool no_msi;
  73. module_param(no_msi, bool, 0444);
  74. MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
  75. static int irq_type = IRQ_TYPE_MSI;
  76. module_param(irq_type, int, 0444);
  77. MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
  78. enum pci_barno {
  79. BAR_0,
  80. BAR_1,
  81. BAR_2,
  82. BAR_3,
  83. BAR_4,
  84. BAR_5,
  85. };
  86. struct pci_endpoint_test {
  87. struct pci_dev *pdev;
  88. void __iomem *base;
  89. void __iomem *bar[PCI_STD_NUM_BARS];
  90. struct completion irq_raised;
  91. int last_irq;
  92. int num_irqs;
  93. int irq_type;
  94. /* mutex to protect the ioctls */
  95. struct mutex mutex;
  96. struct miscdevice miscdev;
  97. enum pci_barno test_reg_bar;
  98. size_t alignment;
  99. const char *name;
  100. };
  101. struct pci_endpoint_test_data {
  102. enum pci_barno test_reg_bar;
  103. size_t alignment;
  104. int irq_type;
  105. };
  106. static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
  107. u32 offset)
  108. {
  109. return readl(test->base + offset);
  110. }
  111. static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
  112. u32 offset, u32 value)
  113. {
  114. writel(value, test->base + offset);
  115. }
  116. static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
  117. int bar, int offset)
  118. {
  119. return readl(test->bar[bar] + offset);
  120. }
  121. static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
  122. int bar, u32 offset, u32 value)
  123. {
  124. writel(value, test->bar[bar] + offset);
  125. }
  126. static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
  127. {
  128. struct pci_endpoint_test *test = dev_id;
  129. u32 reg;
  130. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  131. if (reg & STATUS_IRQ_RAISED) {
  132. test->last_irq = irq;
  133. complete(&test->irq_raised);
  134. reg &= ~STATUS_IRQ_RAISED;
  135. }
  136. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
  137. reg);
  138. return IRQ_HANDLED;
  139. }
  140. static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
  141. {
  142. struct pci_dev *pdev = test->pdev;
  143. pci_free_irq_vectors(pdev);
  144. test->irq_type = IRQ_TYPE_UNDEFINED;
  145. }
  146. static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
  147. int type)
  148. {
  149. int irq = -1;
  150. struct pci_dev *pdev = test->pdev;
  151. struct device *dev = &pdev->dev;
  152. bool res = true;
  153. switch (type) {
  154. case IRQ_TYPE_LEGACY:
  155. irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
  156. if (irq < 0)
  157. dev_err(dev, "Failed to get Legacy interrupt\n");
  158. break;
  159. case IRQ_TYPE_MSI:
  160. irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
  161. if (irq < 0)
  162. dev_err(dev, "Failed to get MSI interrupts\n");
  163. break;
  164. case IRQ_TYPE_MSIX:
  165. irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
  166. if (irq < 0)
  167. dev_err(dev, "Failed to get MSI-X interrupts\n");
  168. break;
  169. default:
  170. dev_err(dev, "Invalid IRQ type selected\n");
  171. }
  172. if (irq < 0) {
  173. irq = 0;
  174. res = false;
  175. }
  176. test->irq_type = type;
  177. test->num_irqs = irq;
  178. return res;
  179. }
  180. static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
  181. {
  182. int i;
  183. struct pci_dev *pdev = test->pdev;
  184. struct device *dev = &pdev->dev;
  185. for (i = 0; i < test->num_irqs; i++)
  186. devm_free_irq(dev, pci_irq_vector(pdev, i), test);
  187. test->num_irqs = 0;
  188. }
  189. static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
  190. {
  191. int i;
  192. int err;
  193. struct pci_dev *pdev = test->pdev;
  194. struct device *dev = &pdev->dev;
  195. for (i = 0; i < test->num_irqs; i++) {
  196. err = devm_request_irq(dev, pci_irq_vector(pdev, i),
  197. pci_endpoint_test_irqhandler,
  198. IRQF_SHARED, test->name, test);
  199. if (err)
  200. goto fail;
  201. }
  202. return true;
  203. fail:
  204. switch (irq_type) {
  205. case IRQ_TYPE_LEGACY:
  206. dev_err(dev, "Failed to request IRQ %d for Legacy\n",
  207. pci_irq_vector(pdev, i));
  208. break;
  209. case IRQ_TYPE_MSI:
  210. dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
  211. pci_irq_vector(pdev, i),
  212. i + 1);
  213. break;
  214. case IRQ_TYPE_MSIX:
  215. dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
  216. pci_irq_vector(pdev, i),
  217. i + 1);
  218. break;
  219. }
  220. return false;
  221. }
  222. static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
  223. enum pci_barno barno)
  224. {
  225. int j;
  226. u32 val;
  227. int size;
  228. struct pci_dev *pdev = test->pdev;
  229. if (!test->bar[barno])
  230. return false;
  231. size = pci_resource_len(pdev, barno);
  232. if (barno == test->test_reg_bar)
  233. size = 0x4;
  234. for (j = 0; j < size; j += 4)
  235. pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
  236. for (j = 0; j < size; j += 4) {
  237. val = pci_endpoint_test_bar_readl(test, barno, j);
  238. if (val != 0xA0A0A0A0)
  239. return false;
  240. }
  241. return true;
  242. }
  243. static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
  244. {
  245. u32 val;
  246. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  247. IRQ_TYPE_LEGACY);
  248. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
  249. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  250. COMMAND_RAISE_LEGACY_IRQ);
  251. val = wait_for_completion_timeout(&test->irq_raised,
  252. msecs_to_jiffies(1000));
  253. if (!val)
  254. return false;
  255. return true;
  256. }
  257. static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
  258. u16 msi_num, bool msix)
  259. {
  260. u32 val;
  261. struct pci_dev *pdev = test->pdev;
  262. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
  263. msix == false ? IRQ_TYPE_MSI :
  264. IRQ_TYPE_MSIX);
  265. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
  266. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  267. msix == false ? COMMAND_RAISE_MSI_IRQ :
  268. COMMAND_RAISE_MSIX_IRQ);
  269. val = wait_for_completion_timeout(&test->irq_raised,
  270. msecs_to_jiffies(1000));
  271. if (!val)
  272. return false;
  273. if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
  274. return true;
  275. return false;
  276. }
  277. static int pci_endpoint_test_validate_xfer_params(struct device *dev,
  278. struct pci_endpoint_test_xfer_param *param, size_t alignment)
  279. {
  280. if (!param->size) {
  281. dev_dbg(dev, "Data size is zero\n");
  282. return -EINVAL;
  283. }
  284. if (param->size > SIZE_MAX - alignment) {
  285. dev_dbg(dev, "Maximum transfer data size exceeded\n");
  286. return -EINVAL;
  287. }
  288. return 0;
  289. }
  290. static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
  291. unsigned long arg)
  292. {
  293. struct pci_endpoint_test_xfer_param param;
  294. bool ret = false;
  295. void *src_addr;
  296. void *dst_addr;
  297. u32 flags = 0;
  298. bool use_dma;
  299. size_t size;
  300. dma_addr_t src_phys_addr;
  301. dma_addr_t dst_phys_addr;
  302. struct pci_dev *pdev = test->pdev;
  303. struct device *dev = &pdev->dev;
  304. void *orig_src_addr;
  305. dma_addr_t orig_src_phys_addr;
  306. void *orig_dst_addr;
  307. dma_addr_t orig_dst_phys_addr;
  308. size_t offset;
  309. size_t alignment = test->alignment;
  310. int irq_type = test->irq_type;
  311. u32 src_crc32;
  312. u32 dst_crc32;
  313. int err;
  314. err = copy_from_user(&param, (void __user *)arg, sizeof(param));
  315. if (err) {
  316. dev_err(dev, "Failed to get transfer param\n");
  317. return false;
  318. }
  319. err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
  320. if (err)
  321. return false;
  322. size = param.size;
  323. use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
  324. if (use_dma)
  325. flags |= FLAG_USE_DMA;
  326. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  327. dev_err(dev, "Invalid IRQ type option\n");
  328. goto err;
  329. }
  330. orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
  331. if (!orig_src_addr) {
  332. dev_err(dev, "Failed to allocate source buffer\n");
  333. ret = false;
  334. goto err;
  335. }
  336. get_random_bytes(orig_src_addr, size + alignment);
  337. orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
  338. size + alignment, DMA_TO_DEVICE);
  339. if (dma_mapping_error(dev, orig_src_phys_addr)) {
  340. dev_err(dev, "failed to map source buffer address\n");
  341. ret = false;
  342. goto err_src_phys_addr;
  343. }
  344. if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
  345. src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
  346. offset = src_phys_addr - orig_src_phys_addr;
  347. src_addr = orig_src_addr + offset;
  348. } else {
  349. src_phys_addr = orig_src_phys_addr;
  350. src_addr = orig_src_addr;
  351. }
  352. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  353. lower_32_bits(src_phys_addr));
  354. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  355. upper_32_bits(src_phys_addr));
  356. src_crc32 = crc32_le(~0, src_addr, size);
  357. orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
  358. if (!orig_dst_addr) {
  359. dev_err(dev, "Failed to allocate destination address\n");
  360. ret = false;
  361. goto err_dst_addr;
  362. }
  363. orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
  364. size + alignment, DMA_FROM_DEVICE);
  365. if (dma_mapping_error(dev, orig_dst_phys_addr)) {
  366. dev_err(dev, "failed to map destination buffer address\n");
  367. ret = false;
  368. goto err_dst_phys_addr;
  369. }
  370. if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
  371. dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
  372. offset = dst_phys_addr - orig_dst_phys_addr;
  373. dst_addr = orig_dst_addr + offset;
  374. } else {
  375. dst_phys_addr = orig_dst_phys_addr;
  376. dst_addr = orig_dst_addr;
  377. }
  378. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  379. lower_32_bits(dst_phys_addr));
  380. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  381. upper_32_bits(dst_phys_addr));
  382. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
  383. size);
  384. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
  385. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  386. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  387. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  388. COMMAND_COPY);
  389. wait_for_completion(&test->irq_raised);
  390. dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
  391. DMA_FROM_DEVICE);
  392. dst_crc32 = crc32_le(~0, dst_addr, size);
  393. if (dst_crc32 == src_crc32)
  394. ret = true;
  395. err_dst_phys_addr:
  396. kfree(orig_dst_addr);
  397. err_dst_addr:
  398. dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
  399. DMA_TO_DEVICE);
  400. err_src_phys_addr:
  401. kfree(orig_src_addr);
  402. err:
  403. return ret;
  404. }
  405. static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
  406. unsigned long arg)
  407. {
  408. struct pci_endpoint_test_xfer_param param;
  409. bool ret = false;
  410. u32 flags = 0;
  411. bool use_dma;
  412. u32 reg;
  413. void *addr;
  414. dma_addr_t phys_addr;
  415. struct pci_dev *pdev = test->pdev;
  416. struct device *dev = &pdev->dev;
  417. void *orig_addr;
  418. dma_addr_t orig_phys_addr;
  419. size_t offset;
  420. size_t alignment = test->alignment;
  421. int irq_type = test->irq_type;
  422. size_t size;
  423. u32 crc32;
  424. int err;
  425. err = copy_from_user(&param, (void __user *)arg, sizeof(param));
  426. if (err != 0) {
  427. dev_err(dev, "Failed to get transfer param\n");
  428. return false;
  429. }
  430. err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
  431. if (err)
  432. return false;
  433. size = param.size;
  434. use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
  435. if (use_dma)
  436. flags |= FLAG_USE_DMA;
  437. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  438. dev_err(dev, "Invalid IRQ type option\n");
  439. goto err;
  440. }
  441. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  442. if (!orig_addr) {
  443. dev_err(dev, "Failed to allocate address\n");
  444. ret = false;
  445. goto err;
  446. }
  447. get_random_bytes(orig_addr, size + alignment);
  448. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  449. DMA_TO_DEVICE);
  450. if (dma_mapping_error(dev, orig_phys_addr)) {
  451. dev_err(dev, "failed to map source buffer address\n");
  452. ret = false;
  453. goto err_phys_addr;
  454. }
  455. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  456. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  457. offset = phys_addr - orig_phys_addr;
  458. addr = orig_addr + offset;
  459. } else {
  460. phys_addr = orig_phys_addr;
  461. addr = orig_addr;
  462. }
  463. crc32 = crc32_le(~0, addr, size);
  464. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
  465. crc32);
  466. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
  467. lower_32_bits(phys_addr));
  468. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
  469. upper_32_bits(phys_addr));
  470. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  471. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
  472. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  473. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  474. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  475. COMMAND_READ);
  476. wait_for_completion(&test->irq_raised);
  477. reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
  478. if (reg & STATUS_READ_SUCCESS)
  479. ret = true;
  480. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  481. DMA_TO_DEVICE);
  482. err_phys_addr:
  483. kfree(orig_addr);
  484. err:
  485. return ret;
  486. }
  487. static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
  488. unsigned long arg)
  489. {
  490. struct pci_endpoint_test_xfer_param param;
  491. bool ret = false;
  492. u32 flags = 0;
  493. bool use_dma;
  494. size_t size;
  495. void *addr;
  496. dma_addr_t phys_addr;
  497. struct pci_dev *pdev = test->pdev;
  498. struct device *dev = &pdev->dev;
  499. void *orig_addr;
  500. dma_addr_t orig_phys_addr;
  501. size_t offset;
  502. size_t alignment = test->alignment;
  503. int irq_type = test->irq_type;
  504. u32 crc32;
  505. int err;
  506. err = copy_from_user(&param, (void __user *)arg, sizeof(param));
  507. if (err) {
  508. dev_err(dev, "Failed to get transfer param\n");
  509. return false;
  510. }
  511. err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
  512. if (err)
  513. return false;
  514. size = param.size;
  515. use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
  516. if (use_dma)
  517. flags |= FLAG_USE_DMA;
  518. if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
  519. dev_err(dev, "Invalid IRQ type option\n");
  520. goto err;
  521. }
  522. orig_addr = kzalloc(size + alignment, GFP_KERNEL);
  523. if (!orig_addr) {
  524. dev_err(dev, "Failed to allocate destination address\n");
  525. ret = false;
  526. goto err;
  527. }
  528. orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
  529. DMA_FROM_DEVICE);
  530. if (dma_mapping_error(dev, orig_phys_addr)) {
  531. dev_err(dev, "failed to map source buffer address\n");
  532. ret = false;
  533. goto err_phys_addr;
  534. }
  535. if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
  536. phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
  537. offset = phys_addr - orig_phys_addr;
  538. addr = orig_addr + offset;
  539. } else {
  540. phys_addr = orig_phys_addr;
  541. addr = orig_addr;
  542. }
  543. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
  544. lower_32_bits(phys_addr));
  545. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
  546. upper_32_bits(phys_addr));
  547. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
  548. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
  549. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
  550. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
  551. pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
  552. COMMAND_WRITE);
  553. wait_for_completion(&test->irq_raised);
  554. dma_unmap_single(dev, orig_phys_addr, size + alignment,
  555. DMA_FROM_DEVICE);
  556. crc32 = crc32_le(~0, addr, size);
  557. if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
  558. ret = true;
  559. err_phys_addr:
  560. kfree(orig_addr);
  561. err:
  562. return ret;
  563. }
  564. static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
  565. {
  566. pci_endpoint_test_release_irq(test);
  567. pci_endpoint_test_free_irq_vectors(test);
  568. return true;
  569. }
  570. static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
  571. int req_irq_type)
  572. {
  573. struct pci_dev *pdev = test->pdev;
  574. struct device *dev = &pdev->dev;
  575. if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
  576. dev_err(dev, "Invalid IRQ type option\n");
  577. return false;
  578. }
  579. if (test->irq_type == req_irq_type)
  580. return true;
  581. pci_endpoint_test_release_irq(test);
  582. pci_endpoint_test_free_irq_vectors(test);
  583. if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
  584. goto err;
  585. if (!pci_endpoint_test_request_irq(test))
  586. goto err;
  587. return true;
  588. err:
  589. pci_endpoint_test_free_irq_vectors(test);
  590. return false;
  591. }
  592. static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
  593. unsigned long arg)
  594. {
  595. int ret = -EINVAL;
  596. enum pci_barno bar;
  597. struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
  598. struct pci_dev *pdev = test->pdev;
  599. mutex_lock(&test->mutex);
  600. reinit_completion(&test->irq_raised);
  601. test->last_irq = -ENODATA;
  602. switch (cmd) {
  603. case PCITEST_BAR:
  604. bar = arg;
  605. if (bar > BAR_5)
  606. goto ret;
  607. if (is_am654_pci_dev(pdev) && bar == BAR_0)
  608. goto ret;
  609. ret = pci_endpoint_test_bar(test, bar);
  610. break;
  611. case PCITEST_LEGACY_IRQ:
  612. ret = pci_endpoint_test_legacy_irq(test);
  613. break;
  614. case PCITEST_MSI:
  615. case PCITEST_MSIX:
  616. ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
  617. break;
  618. case PCITEST_WRITE:
  619. ret = pci_endpoint_test_write(test, arg);
  620. break;
  621. case PCITEST_READ:
  622. ret = pci_endpoint_test_read(test, arg);
  623. break;
  624. case PCITEST_COPY:
  625. ret = pci_endpoint_test_copy(test, arg);
  626. break;
  627. case PCITEST_SET_IRQTYPE:
  628. ret = pci_endpoint_test_set_irq(test, arg);
  629. break;
  630. case PCITEST_GET_IRQTYPE:
  631. ret = irq_type;
  632. break;
  633. case PCITEST_CLEAR_IRQ:
  634. ret = pci_endpoint_test_clear_irq(test);
  635. break;
  636. }
  637. ret:
  638. mutex_unlock(&test->mutex);
  639. return ret;
  640. }
  641. static const struct file_operations pci_endpoint_test_fops = {
  642. .owner = THIS_MODULE,
  643. .unlocked_ioctl = pci_endpoint_test_ioctl,
  644. };
  645. static int pci_endpoint_test_probe(struct pci_dev *pdev,
  646. const struct pci_device_id *ent)
  647. {
  648. int err;
  649. int id;
  650. char name[24];
  651. enum pci_barno bar;
  652. void __iomem *base;
  653. struct device *dev = &pdev->dev;
  654. struct pci_endpoint_test *test;
  655. struct pci_endpoint_test_data *data;
  656. enum pci_barno test_reg_bar = BAR_0;
  657. struct miscdevice *misc_device;
  658. if (pci_is_bridge(pdev))
  659. return -ENODEV;
  660. test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
  661. if (!test)
  662. return -ENOMEM;
  663. test->test_reg_bar = 0;
  664. test->alignment = 0;
  665. test->pdev = pdev;
  666. test->irq_type = IRQ_TYPE_UNDEFINED;
  667. if (no_msi)
  668. irq_type = IRQ_TYPE_LEGACY;
  669. data = (struct pci_endpoint_test_data *)ent->driver_data;
  670. if (data) {
  671. test_reg_bar = data->test_reg_bar;
  672. test->test_reg_bar = test_reg_bar;
  673. test->alignment = data->alignment;
  674. irq_type = data->irq_type;
  675. }
  676. init_completion(&test->irq_raised);
  677. mutex_init(&test->mutex);
  678. if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
  679. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  680. dev_err(dev, "Cannot set DMA mask\n");
  681. return -EINVAL;
  682. }
  683. err = pci_enable_device(pdev);
  684. if (err) {
  685. dev_err(dev, "Cannot enable PCI device\n");
  686. return err;
  687. }
  688. err = pci_request_regions(pdev, DRV_MODULE_NAME);
  689. if (err) {
  690. dev_err(dev, "Cannot obtain PCI resources\n");
  691. goto err_disable_pdev;
  692. }
  693. pci_set_master(pdev);
  694. if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
  695. err = -EINVAL;
  696. goto err_disable_irq;
  697. }
  698. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  699. if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
  700. base = pci_ioremap_bar(pdev, bar);
  701. if (!base) {
  702. dev_err(dev, "Failed to read BAR%d\n", bar);
  703. WARN_ON(bar == test_reg_bar);
  704. }
  705. test->bar[bar] = base;
  706. }
  707. }
  708. test->base = test->bar[test_reg_bar];
  709. if (!test->base) {
  710. err = -ENOMEM;
  711. dev_err(dev, "Cannot perform PCI test without BAR%d\n",
  712. test_reg_bar);
  713. goto err_iounmap;
  714. }
  715. pci_set_drvdata(pdev, test);
  716. id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
  717. if (id < 0) {
  718. err = id;
  719. dev_err(dev, "Unable to get id\n");
  720. goto err_iounmap;
  721. }
  722. snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
  723. test->name = kstrdup(name, GFP_KERNEL);
  724. if (!test->name) {
  725. err = -ENOMEM;
  726. goto err_ida_remove;
  727. }
  728. if (!pci_endpoint_test_request_irq(test)) {
  729. err = -EINVAL;
  730. goto err_kfree_test_name;
  731. }
  732. misc_device = &test->miscdev;
  733. misc_device->minor = MISC_DYNAMIC_MINOR;
  734. misc_device->name = kstrdup(name, GFP_KERNEL);
  735. if (!misc_device->name) {
  736. err = -ENOMEM;
  737. goto err_release_irq;
  738. }
  739. misc_device->parent = &pdev->dev;
  740. misc_device->fops = &pci_endpoint_test_fops;
  741. err = misc_register(misc_device);
  742. if (err) {
  743. dev_err(dev, "Failed to register device\n");
  744. goto err_kfree_name;
  745. }
  746. return 0;
  747. err_kfree_name:
  748. kfree(misc_device->name);
  749. err_release_irq:
  750. pci_endpoint_test_release_irq(test);
  751. err_kfree_test_name:
  752. kfree(test->name);
  753. err_ida_remove:
  754. ida_simple_remove(&pci_endpoint_test_ida, id);
  755. err_iounmap:
  756. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  757. if (test->bar[bar])
  758. pci_iounmap(pdev, test->bar[bar]);
  759. }
  760. err_disable_irq:
  761. pci_endpoint_test_free_irq_vectors(test);
  762. pci_release_regions(pdev);
  763. err_disable_pdev:
  764. pci_disable_device(pdev);
  765. return err;
  766. }
  767. static void pci_endpoint_test_remove(struct pci_dev *pdev)
  768. {
  769. int id;
  770. enum pci_barno bar;
  771. struct pci_endpoint_test *test = pci_get_drvdata(pdev);
  772. struct miscdevice *misc_device = &test->miscdev;
  773. if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
  774. return;
  775. if (id < 0)
  776. return;
  777. pci_endpoint_test_release_irq(test);
  778. pci_endpoint_test_free_irq_vectors(test);
  779. misc_deregister(&test->miscdev);
  780. kfree(misc_device->name);
  781. kfree(test->name);
  782. ida_simple_remove(&pci_endpoint_test_ida, id);
  783. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  784. if (test->bar[bar])
  785. pci_iounmap(pdev, test->bar[bar]);
  786. }
  787. pci_release_regions(pdev);
  788. pci_disable_device(pdev);
  789. }
  790. static const struct pci_endpoint_test_data default_data = {
  791. .test_reg_bar = BAR_0,
  792. .alignment = SZ_4K,
  793. .irq_type = IRQ_TYPE_MSI,
  794. };
  795. static const struct pci_endpoint_test_data am654_data = {
  796. .test_reg_bar = BAR_2,
  797. .alignment = SZ_64K,
  798. .irq_type = IRQ_TYPE_MSI,
  799. };
  800. static const struct pci_endpoint_test_data j721e_data = {
  801. .alignment = 256,
  802. .irq_type = IRQ_TYPE_MSI,
  803. };
  804. static const struct pci_device_id pci_endpoint_test_tbl[] = {
  805. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
  806. .driver_data = (kernel_ulong_t)&default_data,
  807. },
  808. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
  809. .driver_data = (kernel_ulong_t)&default_data,
  810. },
  811. { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
  812. .driver_data = (kernel_ulong_t)&default_data,
  813. },
  814. { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
  815. .driver_data = (kernel_ulong_t)&default_data,
  816. },
  817. { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
  818. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
  819. .driver_data = (kernel_ulong_t)&am654_data
  820. },
  821. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
  822. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
  823. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
  824. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
  825. { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
  826. .driver_data = (kernel_ulong_t)&default_data,
  827. },
  828. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
  829. .driver_data = (kernel_ulong_t)&j721e_data,
  830. },
  831. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
  832. .driver_data = (kernel_ulong_t)&j721e_data,
  833. },
  834. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
  835. .driver_data = (kernel_ulong_t)&j721e_data,
  836. },
  837. { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
  838. .driver_data = (kernel_ulong_t)&j721e_data,
  839. },
  840. { }
  841. };
  842. MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
  843. static struct pci_driver pci_endpoint_test_driver = {
  844. .name = DRV_MODULE_NAME,
  845. .id_table = pci_endpoint_test_tbl,
  846. .probe = pci_endpoint_test_probe,
  847. .remove = pci_endpoint_test_remove,
  848. .sriov_configure = pci_sriov_configure_simple,
  849. };
  850. module_pci_driver(pci_endpoint_test_driver);
  851. MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
  852. MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
  853. MODULE_LICENSE("GPL v2");