pci-epf-test.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Test driver to test endpoint functionality
  4. *
  5. * Copyright (C) 2017 Texas Instruments
  6. * Author: Kishon Vijay Abraham I <[email protected]>
  7. */
  8. #include <linux/crc32.h>
  9. #include <linux/delay.h>
  10. #include <linux/dmaengine.h>
  11. #include <linux/io.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/pci_ids.h>
  15. #include <linux/random.h>
  16. #include <linux/pci-epc.h>
  17. #include <linux/pci-epf.h>
  18. #include <linux/pci_regs.h>
  19. #define IRQ_TYPE_LEGACY 0
  20. #define IRQ_TYPE_MSI 1
  21. #define IRQ_TYPE_MSIX 2
  22. #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
  23. #define COMMAND_RAISE_MSI_IRQ BIT(1)
  24. #define COMMAND_RAISE_MSIX_IRQ BIT(2)
  25. #define COMMAND_READ BIT(3)
  26. #define COMMAND_WRITE BIT(4)
  27. #define COMMAND_COPY BIT(5)
  28. #define STATUS_READ_SUCCESS BIT(0)
  29. #define STATUS_READ_FAIL BIT(1)
  30. #define STATUS_WRITE_SUCCESS BIT(2)
  31. #define STATUS_WRITE_FAIL BIT(3)
  32. #define STATUS_COPY_SUCCESS BIT(4)
  33. #define STATUS_COPY_FAIL BIT(5)
  34. #define STATUS_IRQ_RAISED BIT(6)
  35. #define STATUS_SRC_ADDR_INVALID BIT(7)
  36. #define STATUS_DST_ADDR_INVALID BIT(8)
  37. #define FLAG_USE_DMA BIT(0)
  38. #define TIMER_RESOLUTION 1
  39. static struct workqueue_struct *kpcitest_workqueue;
  40. struct pci_epf_test {
  41. void *reg[PCI_STD_NUM_BARS];
  42. struct pci_epf *epf;
  43. enum pci_barno test_reg_bar;
  44. size_t msix_table_offset;
  45. struct delayed_work cmd_handler;
  46. struct dma_chan *dma_chan_tx;
  47. struct dma_chan *dma_chan_rx;
  48. struct dma_chan *transfer_chan;
  49. dma_cookie_t transfer_cookie;
  50. enum dma_status transfer_status;
  51. struct completion transfer_complete;
  52. bool dma_supported;
  53. bool dma_private;
  54. const struct pci_epc_features *epc_features;
  55. };
  56. struct pci_epf_test_reg {
  57. u32 magic;
  58. u32 command;
  59. u32 status;
  60. u64 src_addr;
  61. u64 dst_addr;
  62. u32 size;
  63. u32 checksum;
  64. u32 irq_type;
  65. u32 irq_number;
  66. u32 flags;
  67. } __packed;
  68. static struct pci_epf_header test_header = {
  69. .vendorid = PCI_ANY_ID,
  70. .deviceid = PCI_ANY_ID,
  71. .baseclass_code = PCI_CLASS_OTHERS,
  72. .interrupt_pin = PCI_INTERRUPT_INTA,
  73. };
  74. static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
  75. static void pci_epf_test_dma_callback(void *param)
  76. {
  77. struct pci_epf_test *epf_test = param;
  78. struct dma_tx_state state;
  79. epf_test->transfer_status =
  80. dmaengine_tx_status(epf_test->transfer_chan,
  81. epf_test->transfer_cookie, &state);
  82. if (epf_test->transfer_status == DMA_COMPLETE ||
  83. epf_test->transfer_status == DMA_ERROR)
  84. complete(&epf_test->transfer_complete);
  85. }
  86. /**
  87. * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
  88. * data between PCIe EP and remote PCIe RC
  89. * @epf_test: the EPF test device that performs the data transfer operation
  90. * @dma_dst: The destination address of the data transfer. It can be a physical
  91. * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
  92. * @dma_src: The source address of the data transfer. It can be a physical
  93. * address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
  94. * @len: The size of the data transfer
  95. * @dma_remote: remote RC physical address
  96. * @dir: DMA transfer direction
  97. *
  98. * Function that uses dmaengine API to transfer data between PCIe EP and remote
  99. * PCIe RC. The source and destination address can be a physical address given
  100. * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
  101. *
  102. * The function returns '0' on success and negative value on failure.
  103. */
  104. static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
  105. dma_addr_t dma_dst, dma_addr_t dma_src,
  106. size_t len, dma_addr_t dma_remote,
  107. enum dma_transfer_direction dir)
  108. {
  109. struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
  110. epf_test->dma_chan_tx : epf_test->dma_chan_rx;
  111. dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
  112. enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  113. struct pci_epf *epf = epf_test->epf;
  114. struct dma_async_tx_descriptor *tx;
  115. struct dma_slave_config sconf = {};
  116. struct device *dev = &epf->dev;
  117. int ret;
  118. if (IS_ERR_OR_NULL(chan)) {
  119. dev_err(dev, "Invalid DMA memcpy channel\n");
  120. return -EINVAL;
  121. }
  122. if (epf_test->dma_private) {
  123. sconf.direction = dir;
  124. if (dir == DMA_MEM_TO_DEV)
  125. sconf.dst_addr = dma_remote;
  126. else
  127. sconf.src_addr = dma_remote;
  128. if (dmaengine_slave_config(chan, &sconf)) {
  129. dev_err(dev, "DMA slave config fail\n");
  130. return -EIO;
  131. }
  132. tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
  133. flags);
  134. } else {
  135. tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
  136. flags);
  137. }
  138. if (!tx) {
  139. dev_err(dev, "Failed to prepare DMA memcpy\n");
  140. return -EIO;
  141. }
  142. reinit_completion(&epf_test->transfer_complete);
  143. epf_test->transfer_chan = chan;
  144. tx->callback = pci_epf_test_dma_callback;
  145. tx->callback_param = epf_test;
  146. epf_test->transfer_cookie = tx->tx_submit(tx);
  147. ret = dma_submit_error(epf_test->transfer_cookie);
  148. if (ret) {
  149. dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
  150. goto terminate;
  151. }
  152. dma_async_issue_pending(chan);
  153. ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
  154. if (ret < 0) {
  155. dev_err(dev, "DMA wait_for_completion interrupted\n");
  156. goto terminate;
  157. }
  158. if (epf_test->transfer_status == DMA_ERROR) {
  159. dev_err(dev, "DMA transfer failed\n");
  160. ret = -EIO;
  161. }
  162. terminate:
  163. dmaengine_terminate_sync(chan);
  164. return ret;
  165. }
  166. struct epf_dma_filter {
  167. struct device *dev;
  168. u32 dma_mask;
  169. };
  170. static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
  171. {
  172. struct epf_dma_filter *filter = node;
  173. struct dma_slave_caps caps;
  174. memset(&caps, 0, sizeof(caps));
  175. dma_get_slave_caps(chan, &caps);
  176. return chan->device->dev == filter->dev
  177. && (filter->dma_mask & caps.directions);
  178. }
  179. /**
  180. * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
  181. * @epf_test: the EPF test device that performs data transfer operation
  182. *
  183. * Function to initialize EPF test DMA channel.
  184. */
  185. static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
  186. {
  187. struct pci_epf *epf = epf_test->epf;
  188. struct device *dev = &epf->dev;
  189. struct epf_dma_filter filter;
  190. struct dma_chan *dma_chan;
  191. dma_cap_mask_t mask;
  192. int ret;
  193. filter.dev = epf->epc->dev.parent;
  194. filter.dma_mask = BIT(DMA_DEV_TO_MEM);
  195. dma_cap_zero(mask);
  196. dma_cap_set(DMA_SLAVE, mask);
  197. dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
  198. if (!dma_chan) {
  199. dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
  200. goto fail_back_tx;
  201. }
  202. epf_test->dma_chan_rx = dma_chan;
  203. filter.dma_mask = BIT(DMA_MEM_TO_DEV);
  204. dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
  205. if (!dma_chan) {
  206. dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
  207. goto fail_back_rx;
  208. }
  209. epf_test->dma_chan_tx = dma_chan;
  210. epf_test->dma_private = true;
  211. init_completion(&epf_test->transfer_complete);
  212. return 0;
  213. fail_back_rx:
  214. dma_release_channel(epf_test->dma_chan_rx);
  215. epf_test->dma_chan_tx = NULL;
  216. fail_back_tx:
  217. dma_cap_zero(mask);
  218. dma_cap_set(DMA_MEMCPY, mask);
  219. dma_chan = dma_request_chan_by_mask(&mask);
  220. if (IS_ERR(dma_chan)) {
  221. ret = PTR_ERR(dma_chan);
  222. if (ret != -EPROBE_DEFER)
  223. dev_err(dev, "Failed to get DMA channel\n");
  224. return ret;
  225. }
  226. init_completion(&epf_test->transfer_complete);
  227. epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
  228. return 0;
  229. }
  230. /**
  231. * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
  232. * @epf_test: the EPF test device that performs data transfer operation
  233. *
  234. * Helper to cleanup EPF test DMA channel.
  235. */
  236. static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
  237. {
  238. if (!epf_test->dma_supported)
  239. return;
  240. dma_release_channel(epf_test->dma_chan_tx);
  241. if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
  242. epf_test->dma_chan_tx = NULL;
  243. epf_test->dma_chan_rx = NULL;
  244. return;
  245. }
  246. dma_release_channel(epf_test->dma_chan_rx);
  247. epf_test->dma_chan_rx = NULL;
  248. return;
  249. }
  250. static void pci_epf_test_print_rate(const char *ops, u64 size,
  251. struct timespec64 *start,
  252. struct timespec64 *end, bool dma)
  253. {
  254. struct timespec64 ts;
  255. u64 rate, ns;
  256. ts = timespec64_sub(*end, *start);
  257. /* convert both size (stored in 'rate') and time in terms of 'ns' */
  258. ns = timespec64_to_ns(&ts);
  259. rate = size * NSEC_PER_SEC;
  260. /* Divide both size (stored in 'rate') and ns by a common factor */
  261. while (ns > UINT_MAX) {
  262. rate >>= 1;
  263. ns >>= 1;
  264. }
  265. if (!ns)
  266. return;
  267. /* calculate the rate */
  268. do_div(rate, (uint32_t)ns);
  269. pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
  270. "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
  271. (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
  272. }
  273. static int pci_epf_test_copy(struct pci_epf_test *epf_test)
  274. {
  275. int ret;
  276. bool use_dma;
  277. void __iomem *src_addr;
  278. void __iomem *dst_addr;
  279. phys_addr_t src_phys_addr;
  280. phys_addr_t dst_phys_addr;
  281. struct timespec64 start, end;
  282. struct pci_epf *epf = epf_test->epf;
  283. struct device *dev = &epf->dev;
  284. struct pci_epc *epc = epf->epc;
  285. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  286. struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
  287. src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
  288. if (!src_addr) {
  289. dev_err(dev, "Failed to allocate source address\n");
  290. reg->status = STATUS_SRC_ADDR_INVALID;
  291. ret = -ENOMEM;
  292. goto err;
  293. }
  294. ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr,
  295. reg->src_addr, reg->size);
  296. if (ret) {
  297. dev_err(dev, "Failed to map source address\n");
  298. reg->status = STATUS_SRC_ADDR_INVALID;
  299. goto err_src_addr;
  300. }
  301. dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
  302. if (!dst_addr) {
  303. dev_err(dev, "Failed to allocate destination address\n");
  304. reg->status = STATUS_DST_ADDR_INVALID;
  305. ret = -ENOMEM;
  306. goto err_src_map_addr;
  307. }
  308. ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr,
  309. reg->dst_addr, reg->size);
  310. if (ret) {
  311. dev_err(dev, "Failed to map destination address\n");
  312. reg->status = STATUS_DST_ADDR_INVALID;
  313. goto err_dst_addr;
  314. }
  315. ktime_get_ts64(&start);
  316. use_dma = !!(reg->flags & FLAG_USE_DMA);
  317. if (use_dma) {
  318. if (!epf_test->dma_supported) {
  319. dev_err(dev, "Cannot transfer data using DMA\n");
  320. ret = -EINVAL;
  321. goto err_map_addr;
  322. }
  323. if (epf_test->dma_private) {
  324. dev_err(dev, "Cannot transfer data using DMA\n");
  325. ret = -EINVAL;
  326. goto err_map_addr;
  327. }
  328. ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
  329. src_phys_addr, reg->size, 0,
  330. DMA_MEM_TO_MEM);
  331. if (ret)
  332. dev_err(dev, "Data transfer failed\n");
  333. } else {
  334. void *buf;
  335. buf = kzalloc(reg->size, GFP_KERNEL);
  336. if (!buf) {
  337. ret = -ENOMEM;
  338. goto err_map_addr;
  339. }
  340. memcpy_fromio(buf, src_addr, reg->size);
  341. memcpy_toio(dst_addr, buf, reg->size);
  342. kfree(buf);
  343. }
  344. ktime_get_ts64(&end);
  345. pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
  346. err_map_addr:
  347. pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr);
  348. err_dst_addr:
  349. pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
  350. err_src_map_addr:
  351. pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr);
  352. err_src_addr:
  353. pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
  354. err:
  355. return ret;
  356. }
  357. static int pci_epf_test_read(struct pci_epf_test *epf_test)
  358. {
  359. int ret;
  360. void __iomem *src_addr;
  361. void *buf;
  362. u32 crc32;
  363. bool use_dma;
  364. phys_addr_t phys_addr;
  365. phys_addr_t dst_phys_addr;
  366. struct timespec64 start, end;
  367. struct pci_epf *epf = epf_test->epf;
  368. struct device *dev = &epf->dev;
  369. struct pci_epc *epc = epf->epc;
  370. struct device *dma_dev = epf->epc->dev.parent;
  371. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  372. struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
  373. src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
  374. if (!src_addr) {
  375. dev_err(dev, "Failed to allocate address\n");
  376. reg->status = STATUS_SRC_ADDR_INVALID;
  377. ret = -ENOMEM;
  378. goto err;
  379. }
  380. ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
  381. reg->src_addr, reg->size);
  382. if (ret) {
  383. dev_err(dev, "Failed to map address\n");
  384. reg->status = STATUS_SRC_ADDR_INVALID;
  385. goto err_addr;
  386. }
  387. buf = kzalloc(reg->size, GFP_KERNEL);
  388. if (!buf) {
  389. ret = -ENOMEM;
  390. goto err_map_addr;
  391. }
  392. use_dma = !!(reg->flags & FLAG_USE_DMA);
  393. if (use_dma) {
  394. if (!epf_test->dma_supported) {
  395. dev_err(dev, "Cannot transfer data using DMA\n");
  396. ret = -EINVAL;
  397. goto err_dma_map;
  398. }
  399. dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
  400. DMA_FROM_DEVICE);
  401. if (dma_mapping_error(dma_dev, dst_phys_addr)) {
  402. dev_err(dev, "Failed to map destination buffer addr\n");
  403. ret = -ENOMEM;
  404. goto err_dma_map;
  405. }
  406. ktime_get_ts64(&start);
  407. ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
  408. phys_addr, reg->size,
  409. reg->src_addr, DMA_DEV_TO_MEM);
  410. if (ret)
  411. dev_err(dev, "Data transfer failed\n");
  412. ktime_get_ts64(&end);
  413. dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
  414. DMA_FROM_DEVICE);
  415. } else {
  416. ktime_get_ts64(&start);
  417. memcpy_fromio(buf, src_addr, reg->size);
  418. ktime_get_ts64(&end);
  419. }
  420. pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
  421. crc32 = crc32_le(~0, buf, reg->size);
  422. if (crc32 != reg->checksum)
  423. ret = -EIO;
  424. err_dma_map:
  425. kfree(buf);
  426. err_map_addr:
  427. pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
  428. err_addr:
  429. pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
  430. err:
  431. return ret;
  432. }
  433. static int pci_epf_test_write(struct pci_epf_test *epf_test)
  434. {
  435. int ret;
  436. void __iomem *dst_addr;
  437. void *buf;
  438. bool use_dma;
  439. phys_addr_t phys_addr;
  440. phys_addr_t src_phys_addr;
  441. struct timespec64 start, end;
  442. struct pci_epf *epf = epf_test->epf;
  443. struct device *dev = &epf->dev;
  444. struct pci_epc *epc = epf->epc;
  445. struct device *dma_dev = epf->epc->dev.parent;
  446. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  447. struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
  448. dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
  449. if (!dst_addr) {
  450. dev_err(dev, "Failed to allocate address\n");
  451. reg->status = STATUS_DST_ADDR_INVALID;
  452. ret = -ENOMEM;
  453. goto err;
  454. }
  455. ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr,
  456. reg->dst_addr, reg->size);
  457. if (ret) {
  458. dev_err(dev, "Failed to map address\n");
  459. reg->status = STATUS_DST_ADDR_INVALID;
  460. goto err_addr;
  461. }
  462. buf = kzalloc(reg->size, GFP_KERNEL);
  463. if (!buf) {
  464. ret = -ENOMEM;
  465. goto err_map_addr;
  466. }
  467. get_random_bytes(buf, reg->size);
  468. reg->checksum = crc32_le(~0, buf, reg->size);
  469. use_dma = !!(reg->flags & FLAG_USE_DMA);
  470. if (use_dma) {
  471. if (!epf_test->dma_supported) {
  472. dev_err(dev, "Cannot transfer data using DMA\n");
  473. ret = -EINVAL;
  474. goto err_dma_map;
  475. }
  476. src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
  477. DMA_TO_DEVICE);
  478. if (dma_mapping_error(dma_dev, src_phys_addr)) {
  479. dev_err(dev, "Failed to map source buffer addr\n");
  480. ret = -ENOMEM;
  481. goto err_dma_map;
  482. }
  483. ktime_get_ts64(&start);
  484. ret = pci_epf_test_data_transfer(epf_test, phys_addr,
  485. src_phys_addr, reg->size,
  486. reg->dst_addr,
  487. DMA_MEM_TO_DEV);
  488. if (ret)
  489. dev_err(dev, "Data transfer failed\n");
  490. ktime_get_ts64(&end);
  491. dma_unmap_single(dma_dev, src_phys_addr, reg->size,
  492. DMA_TO_DEVICE);
  493. } else {
  494. ktime_get_ts64(&start);
  495. memcpy_toio(dst_addr, buf, reg->size);
  496. ktime_get_ts64(&end);
  497. }
  498. pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
  499. /*
  500. * wait 1ms inorder for the write to complete. Without this delay L3
  501. * error in observed in the host system.
  502. */
  503. usleep_range(1000, 2000);
  504. err_dma_map:
  505. kfree(buf);
  506. err_map_addr:
  507. pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
  508. err_addr:
  509. pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
  510. err:
  511. return ret;
  512. }
  513. static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
  514. u16 irq)
  515. {
  516. struct pci_epf *epf = epf_test->epf;
  517. struct device *dev = &epf->dev;
  518. struct pci_epc *epc = epf->epc;
  519. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  520. struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
  521. reg->status |= STATUS_IRQ_RAISED;
  522. switch (irq_type) {
  523. case IRQ_TYPE_LEGACY:
  524. pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
  525. PCI_EPC_IRQ_LEGACY, 0);
  526. break;
  527. case IRQ_TYPE_MSI:
  528. pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
  529. PCI_EPC_IRQ_MSI, irq);
  530. break;
  531. case IRQ_TYPE_MSIX:
  532. pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
  533. PCI_EPC_IRQ_MSIX, irq);
  534. break;
  535. default:
  536. dev_err(dev, "Failed to raise IRQ, unknown type\n");
  537. break;
  538. }
  539. }
  540. static void pci_epf_test_cmd_handler(struct work_struct *work)
  541. {
  542. int ret;
  543. int count;
  544. u32 command;
  545. struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
  546. cmd_handler.work);
  547. struct pci_epf *epf = epf_test->epf;
  548. struct device *dev = &epf->dev;
  549. struct pci_epc *epc = epf->epc;
  550. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  551. struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
  552. command = reg->command;
  553. if (!command)
  554. goto reset_handler;
  555. reg->command = 0;
  556. reg->status = 0;
  557. if (reg->irq_type > IRQ_TYPE_MSIX) {
  558. dev_err(dev, "Failed to detect IRQ type\n");
  559. goto reset_handler;
  560. }
  561. if (command & COMMAND_RAISE_LEGACY_IRQ) {
  562. reg->status = STATUS_IRQ_RAISED;
  563. pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
  564. PCI_EPC_IRQ_LEGACY, 0);
  565. goto reset_handler;
  566. }
  567. if (command & COMMAND_WRITE) {
  568. ret = pci_epf_test_write(epf_test);
  569. if (ret)
  570. reg->status |= STATUS_WRITE_FAIL;
  571. else
  572. reg->status |= STATUS_WRITE_SUCCESS;
  573. pci_epf_test_raise_irq(epf_test, reg->irq_type,
  574. reg->irq_number);
  575. goto reset_handler;
  576. }
  577. if (command & COMMAND_READ) {
  578. ret = pci_epf_test_read(epf_test);
  579. if (!ret)
  580. reg->status |= STATUS_READ_SUCCESS;
  581. else
  582. reg->status |= STATUS_READ_FAIL;
  583. pci_epf_test_raise_irq(epf_test, reg->irq_type,
  584. reg->irq_number);
  585. goto reset_handler;
  586. }
  587. if (command & COMMAND_COPY) {
  588. ret = pci_epf_test_copy(epf_test);
  589. if (!ret)
  590. reg->status |= STATUS_COPY_SUCCESS;
  591. else
  592. reg->status |= STATUS_COPY_FAIL;
  593. pci_epf_test_raise_irq(epf_test, reg->irq_type,
  594. reg->irq_number);
  595. goto reset_handler;
  596. }
  597. if (command & COMMAND_RAISE_MSI_IRQ) {
  598. count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
  599. if (reg->irq_number > count || count <= 0)
  600. goto reset_handler;
  601. reg->status = STATUS_IRQ_RAISED;
  602. pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
  603. PCI_EPC_IRQ_MSI, reg->irq_number);
  604. goto reset_handler;
  605. }
  606. if (command & COMMAND_RAISE_MSIX_IRQ) {
  607. count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
  608. if (reg->irq_number > count || count <= 0)
  609. goto reset_handler;
  610. reg->status = STATUS_IRQ_RAISED;
  611. pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
  612. PCI_EPC_IRQ_MSIX, reg->irq_number);
  613. goto reset_handler;
  614. }
  615. reset_handler:
  616. queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
  617. msecs_to_jiffies(1));
  618. }
  619. static void pci_epf_test_unbind(struct pci_epf *epf)
  620. {
  621. struct pci_epf_test *epf_test = epf_get_drvdata(epf);
  622. struct pci_epc *epc = epf->epc;
  623. struct pci_epf_bar *epf_bar;
  624. int bar;
  625. cancel_delayed_work(&epf_test->cmd_handler);
  626. pci_epf_test_clean_dma_chan(epf_test);
  627. for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
  628. epf_bar = &epf->bar[bar];
  629. if (epf_test->reg[bar]) {
  630. pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
  631. epf_bar);
  632. pci_epf_free_space(epf, epf_test->reg[bar], bar,
  633. PRIMARY_INTERFACE);
  634. }
  635. }
  636. }
  637. static int pci_epf_test_set_bar(struct pci_epf *epf)
  638. {
  639. int bar, add;
  640. int ret;
  641. struct pci_epf_bar *epf_bar;
  642. struct pci_epc *epc = epf->epc;
  643. struct device *dev = &epf->dev;
  644. struct pci_epf_test *epf_test = epf_get_drvdata(epf);
  645. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  646. const struct pci_epc_features *epc_features;
  647. epc_features = epf_test->epc_features;
  648. for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
  649. epf_bar = &epf->bar[bar];
  650. /*
  651. * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
  652. * if the specific implementation required a 64-bit BAR,
  653. * even if we only requested a 32-bit BAR.
  654. */
  655. add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
  656. if (!!(epc_features->reserved_bar & (1 << bar)))
  657. continue;
  658. ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
  659. epf_bar);
  660. if (ret) {
  661. pci_epf_free_space(epf, epf_test->reg[bar], bar,
  662. PRIMARY_INTERFACE);
  663. dev_err(dev, "Failed to set BAR%d\n", bar);
  664. if (bar == test_reg_bar)
  665. return ret;
  666. }
  667. }
  668. return 0;
  669. }
  670. static int pci_epf_test_core_init(struct pci_epf *epf)
  671. {
  672. struct pci_epf_test *epf_test = epf_get_drvdata(epf);
  673. struct pci_epf_header *header = epf->header;
  674. const struct pci_epc_features *epc_features;
  675. struct pci_epc *epc = epf->epc;
  676. struct device *dev = &epf->dev;
  677. bool msix_capable = false;
  678. bool msi_capable = true;
  679. int ret;
  680. epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
  681. if (epc_features) {
  682. msix_capable = epc_features->msix_capable;
  683. msi_capable = epc_features->msi_capable;
  684. }
  685. if (epf->vfunc_no <= 1) {
  686. ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
  687. if (ret) {
  688. dev_err(dev, "Configuration header write failed\n");
  689. return ret;
  690. }
  691. }
  692. ret = pci_epf_test_set_bar(epf);
  693. if (ret)
  694. return ret;
  695. if (msi_capable) {
  696. ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
  697. epf->msi_interrupts);
  698. if (ret) {
  699. dev_err(dev, "MSI configuration failed\n");
  700. return ret;
  701. }
  702. }
  703. if (msix_capable) {
  704. ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
  705. epf->msix_interrupts,
  706. epf_test->test_reg_bar,
  707. epf_test->msix_table_offset);
  708. if (ret) {
  709. dev_err(dev, "MSI-X configuration failed\n");
  710. return ret;
  711. }
  712. }
  713. return 0;
  714. }
  715. static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
  716. void *data)
  717. {
  718. struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
  719. struct pci_epf_test *epf_test = epf_get_drvdata(epf);
  720. int ret;
  721. switch (val) {
  722. case CORE_INIT:
  723. ret = pci_epf_test_core_init(epf);
  724. if (ret)
  725. return NOTIFY_BAD;
  726. break;
  727. case LINK_UP:
  728. queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
  729. msecs_to_jiffies(1));
  730. break;
  731. default:
  732. dev_err(&epf->dev, "Invalid EPF test notifier event\n");
  733. return NOTIFY_BAD;
  734. }
  735. return NOTIFY_OK;
  736. }
  737. static int pci_epf_test_alloc_space(struct pci_epf *epf)
  738. {
  739. struct pci_epf_test *epf_test = epf_get_drvdata(epf);
  740. struct device *dev = &epf->dev;
  741. struct pci_epf_bar *epf_bar;
  742. size_t msix_table_size = 0;
  743. size_t test_reg_bar_size;
  744. size_t pba_size = 0;
  745. bool msix_capable;
  746. void *base;
  747. int bar, add;
  748. enum pci_barno test_reg_bar = epf_test->test_reg_bar;
  749. const struct pci_epc_features *epc_features;
  750. size_t test_reg_size;
  751. epc_features = epf_test->epc_features;
  752. test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
  753. msix_capable = epc_features->msix_capable;
  754. if (msix_capable) {
  755. msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
  756. epf_test->msix_table_offset = test_reg_bar_size;
  757. /* Align to QWORD or 8 Bytes */
  758. pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
  759. }
  760. test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
  761. if (epc_features->bar_fixed_size[test_reg_bar]) {
  762. if (test_reg_size > bar_size[test_reg_bar])
  763. return -ENOMEM;
  764. test_reg_size = bar_size[test_reg_bar];
  765. }
  766. base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
  767. epc_features->align, PRIMARY_INTERFACE);
  768. if (!base) {
  769. dev_err(dev, "Failed to allocated register space\n");
  770. return -ENOMEM;
  771. }
  772. epf_test->reg[test_reg_bar] = base;
  773. for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
  774. epf_bar = &epf->bar[bar];
  775. add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
  776. if (bar == test_reg_bar)
  777. continue;
  778. if (!!(epc_features->reserved_bar & (1 << bar)))
  779. continue;
  780. base = pci_epf_alloc_space(epf, bar_size[bar], bar,
  781. epc_features->align,
  782. PRIMARY_INTERFACE);
  783. if (!base)
  784. dev_err(dev, "Failed to allocate space for BAR%d\n",
  785. bar);
  786. epf_test->reg[bar] = base;
  787. }
  788. return 0;
  789. }
  790. static void pci_epf_configure_bar(struct pci_epf *epf,
  791. const struct pci_epc_features *epc_features)
  792. {
  793. struct pci_epf_bar *epf_bar;
  794. bool bar_fixed_64bit;
  795. int i;
  796. for (i = 0; i < PCI_STD_NUM_BARS; i++) {
  797. epf_bar = &epf->bar[i];
  798. bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
  799. if (bar_fixed_64bit)
  800. epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
  801. if (epc_features->bar_fixed_size[i])
  802. bar_size[i] = epc_features->bar_fixed_size[i];
  803. }
  804. }
  805. static int pci_epf_test_bind(struct pci_epf *epf)
  806. {
  807. int ret;
  808. struct pci_epf_test *epf_test = epf_get_drvdata(epf);
  809. const struct pci_epc_features *epc_features;
  810. enum pci_barno test_reg_bar = BAR_0;
  811. struct pci_epc *epc = epf->epc;
  812. bool linkup_notifier = false;
  813. bool core_init_notifier = false;
  814. if (WARN_ON_ONCE(!epc))
  815. return -EINVAL;
  816. epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
  817. if (!epc_features) {
  818. dev_err(&epf->dev, "epc_features not implemented\n");
  819. return -EOPNOTSUPP;
  820. }
  821. linkup_notifier = epc_features->linkup_notifier;
  822. core_init_notifier = epc_features->core_init_notifier;
  823. test_reg_bar = pci_epc_get_first_free_bar(epc_features);
  824. if (test_reg_bar < 0)
  825. return -EINVAL;
  826. pci_epf_configure_bar(epf, epc_features);
  827. epf_test->test_reg_bar = test_reg_bar;
  828. epf_test->epc_features = epc_features;
  829. ret = pci_epf_test_alloc_space(epf);
  830. if (ret)
  831. return ret;
  832. if (!core_init_notifier) {
  833. ret = pci_epf_test_core_init(epf);
  834. if (ret)
  835. return ret;
  836. }
  837. epf_test->dma_supported = true;
  838. ret = pci_epf_test_init_dma_chan(epf_test);
  839. if (ret)
  840. epf_test->dma_supported = false;
  841. if (linkup_notifier || core_init_notifier) {
  842. epf->nb.notifier_call = pci_epf_test_notifier;
  843. pci_epc_register_notifier(epc, &epf->nb);
  844. } else {
  845. queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
  846. }
  847. return 0;
  848. }
  849. static const struct pci_epf_device_id pci_epf_test_ids[] = {
  850. {
  851. .name = "pci_epf_test",
  852. },
  853. {},
  854. };
  855. static int pci_epf_test_probe(struct pci_epf *epf)
  856. {
  857. struct pci_epf_test *epf_test;
  858. struct device *dev = &epf->dev;
  859. epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
  860. if (!epf_test)
  861. return -ENOMEM;
  862. epf->header = &test_header;
  863. epf_test->epf = epf;
  864. INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
  865. epf_set_drvdata(epf, epf_test);
  866. return 0;
  867. }
  868. static struct pci_epf_ops ops = {
  869. .unbind = pci_epf_test_unbind,
  870. .bind = pci_epf_test_bind,
  871. };
  872. static struct pci_epf_driver test_driver = {
  873. .driver.name = "pci_epf_test",
  874. .probe = pci_epf_test_probe,
  875. .id_table = pci_epf_test_ids,
  876. .ops = &ops,
  877. .owner = THIS_MODULE,
  878. };
  879. static int __init pci_epf_test_init(void)
  880. {
  881. int ret;
  882. kpcitest_workqueue = alloc_workqueue("kpcitest",
  883. WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
  884. if (!kpcitest_workqueue) {
  885. pr_err("Failed to allocate the kpcitest work queue\n");
  886. return -ENOMEM;
  887. }
  888. ret = pci_epf_register_driver(&test_driver);
  889. if (ret) {
  890. destroy_workqueue(kpcitest_workqueue);
  891. pr_err("Failed to register pci epf test driver --> %d\n", ret);
  892. return ret;
  893. }
  894. return 0;
  895. }
  896. module_init(pci_epf_test_init);
  897. static void __exit pci_epf_test_exit(void)
  898. {
  899. if (kpcitest_workqueue)
  900. destroy_workqueue(kpcitest_workqueue);
  901. pci_epf_unregister_driver(&test_driver);
  902. }
  903. module_exit(pci_epf_test_exit);
  904. MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
  905. MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
  906. MODULE_LICENSE("GPL v2");