dma_test.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * DMA traffic test driver
  4. *
  5. * Copyright (C) 2020, Intel Corporation
  6. * Authors: Isaac Hazan <[email protected]>
  7. * Mika Westerberg <[email protected]>
  8. */
  9. #include <linux/completion.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/module.h>
  12. #include <linux/sizes.h>
  13. #include <linux/thunderbolt.h>
  14. #define DMA_TEST_TX_RING_SIZE 64
  15. #define DMA_TEST_RX_RING_SIZE 256
  16. #define DMA_TEST_FRAME_SIZE SZ_4K
  17. #define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL
  18. #define DMA_TEST_MAX_PACKETS 1000
  19. enum dma_test_frame_pdf {
  20. DMA_TEST_PDF_FRAME_START = 1,
  21. DMA_TEST_PDF_FRAME_END,
  22. };
  23. struct dma_test_frame {
  24. struct dma_test *dma_test;
  25. void *data;
  26. struct ring_frame frame;
  27. };
  28. enum dma_test_test_error {
  29. DMA_TEST_NO_ERROR,
  30. DMA_TEST_INTERRUPTED,
  31. DMA_TEST_BUFFER_ERROR,
  32. DMA_TEST_DMA_ERROR,
  33. DMA_TEST_CONFIG_ERROR,
  34. DMA_TEST_SPEED_ERROR,
  35. DMA_TEST_WIDTH_ERROR,
  36. DMA_TEST_BONDING_ERROR,
  37. DMA_TEST_PACKET_ERROR,
  38. };
  39. static const char * const dma_test_error_names[] = {
  40. [DMA_TEST_NO_ERROR] = "no errors",
  41. [DMA_TEST_INTERRUPTED] = "interrupted by signal",
  42. [DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers",
  43. [DMA_TEST_DMA_ERROR] = "DMA ring setup failed",
  44. [DMA_TEST_CONFIG_ERROR] = "configuration is not valid",
  45. [DMA_TEST_SPEED_ERROR] = "unexpected link speed",
  46. [DMA_TEST_WIDTH_ERROR] = "unexpected link width",
  47. [DMA_TEST_BONDING_ERROR] = "lane bonding configuration error",
  48. [DMA_TEST_PACKET_ERROR] = "packet check failed",
  49. };
  50. enum dma_test_result {
  51. DMA_TEST_NOT_RUN,
  52. DMA_TEST_SUCCESS,
  53. DMA_TEST_FAIL,
  54. };
  55. static const char * const dma_test_result_names[] = {
  56. [DMA_TEST_NOT_RUN] = "not run",
  57. [DMA_TEST_SUCCESS] = "success",
  58. [DMA_TEST_FAIL] = "failed",
  59. };
  60. /**
  61. * struct dma_test - DMA test device driver private data
  62. * @svc: XDomain service the driver is bound to
  63. * @xd: XDomain the service belongs to
  64. * @rx_ring: Software ring holding RX frames
  65. * @rx_hopid: HopID used for receiving frames
  66. * @tx_ring: Software ring holding TX frames
  67. * @tx_hopid: HopID used for sending fames
  68. * @packets_to_send: Number of packets to send
  69. * @packets_to_receive: Number of packets to receive
  70. * @packets_sent: Actual number of packets sent
  71. * @packets_received: Actual number of packets received
  72. * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
  73. * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
  74. * @crc_errors: Number of CRC errors during the test run
  75. * @buffer_overflow_errors: Number of buffer overflow errors during the test
  76. * run
  77. * @result: Result of the last run
  78. * @error_code: Error code of the last run
  79. * @complete: Used to wait for the Rx to complete
  80. * @lock: Lock serializing access to this structure
  81. * @debugfs_dir: dentry of this dma_test
  82. */
  83. struct dma_test {
  84. const struct tb_service *svc;
  85. struct tb_xdomain *xd;
  86. struct tb_ring *rx_ring;
  87. int rx_hopid;
  88. struct tb_ring *tx_ring;
  89. int tx_hopid;
  90. unsigned int packets_to_send;
  91. unsigned int packets_to_receive;
  92. unsigned int packets_sent;
  93. unsigned int packets_received;
  94. unsigned int link_speed;
  95. unsigned int link_width;
  96. unsigned int crc_errors;
  97. unsigned int buffer_overflow_errors;
  98. enum dma_test_result result;
  99. enum dma_test_test_error error_code;
  100. struct completion complete;
  101. struct mutex lock;
  102. struct dentry *debugfs_dir;
  103. };
  104. /* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
  105. static const uuid_t dma_test_dir_uuid =
  106. UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
  107. 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
  108. static struct tb_property_dir *dma_test_dir;
  109. static void *dma_test_pattern;
  110. static void dma_test_free_rings(struct dma_test *dt)
  111. {
  112. if (dt->rx_ring) {
  113. tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
  114. tb_ring_free(dt->rx_ring);
  115. dt->rx_ring = NULL;
  116. }
  117. if (dt->tx_ring) {
  118. tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
  119. tb_ring_free(dt->tx_ring);
  120. dt->tx_ring = NULL;
  121. }
  122. }
  123. static int dma_test_start_rings(struct dma_test *dt)
  124. {
  125. unsigned int flags = RING_FLAG_FRAME;
  126. struct tb_xdomain *xd = dt->xd;
  127. int ret, e2e_tx_hop = 0;
  128. struct tb_ring *ring;
  129. /*
  130. * If we are both sender and receiver (traffic goes over a
  131. * special loopback dongle) enable E2E flow control. This avoids
  132. * losing packets.
  133. */
  134. if (dt->packets_to_send && dt->packets_to_receive)
  135. flags |= RING_FLAG_E2E;
  136. if (dt->packets_to_send) {
  137. ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE,
  138. flags);
  139. if (!ring)
  140. return -ENOMEM;
  141. dt->tx_ring = ring;
  142. e2e_tx_hop = ring->hop;
  143. ret = tb_xdomain_alloc_out_hopid(xd, -1);
  144. if (ret < 0) {
  145. dma_test_free_rings(dt);
  146. return ret;
  147. }
  148. dt->tx_hopid = ret;
  149. }
  150. if (dt->packets_to_receive) {
  151. u16 sof_mask, eof_mask;
  152. sof_mask = BIT(DMA_TEST_PDF_FRAME_START);
  153. eof_mask = BIT(DMA_TEST_PDF_FRAME_END);
  154. ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE,
  155. flags, e2e_tx_hop, sof_mask, eof_mask,
  156. NULL, NULL);
  157. if (!ring) {
  158. dma_test_free_rings(dt);
  159. return -ENOMEM;
  160. }
  161. dt->rx_ring = ring;
  162. ret = tb_xdomain_alloc_in_hopid(xd, -1);
  163. if (ret < 0) {
  164. dma_test_free_rings(dt);
  165. return ret;
  166. }
  167. dt->rx_hopid = ret;
  168. }
  169. ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
  170. dt->tx_ring ? dt->tx_ring->hop : -1,
  171. dt->rx_hopid,
  172. dt->rx_ring ? dt->rx_ring->hop : -1);
  173. if (ret) {
  174. dma_test_free_rings(dt);
  175. return ret;
  176. }
  177. if (dt->tx_ring)
  178. tb_ring_start(dt->tx_ring);
  179. if (dt->rx_ring)
  180. tb_ring_start(dt->rx_ring);
  181. return 0;
  182. }
  183. static void dma_test_stop_rings(struct dma_test *dt)
  184. {
  185. int ret;
  186. if (dt->rx_ring)
  187. tb_ring_stop(dt->rx_ring);
  188. if (dt->tx_ring)
  189. tb_ring_stop(dt->tx_ring);
  190. ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
  191. dt->tx_ring ? dt->tx_ring->hop : -1,
  192. dt->rx_hopid,
  193. dt->rx_ring ? dt->rx_ring->hop : -1);
  194. if (ret)
  195. dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
  196. dma_test_free_rings(dt);
  197. }
  198. static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  199. bool canceled)
  200. {
  201. struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
  202. struct dma_test *dt = tf->dma_test;
  203. struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
  204. dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
  205. DMA_FROM_DEVICE);
  206. kfree(tf->data);
  207. if (canceled) {
  208. kfree(tf);
  209. return;
  210. }
  211. dt->packets_received++;
  212. dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
  213. dt->packets_to_receive);
  214. if (tf->frame.flags & RING_DESC_CRC_ERROR)
  215. dt->crc_errors++;
  216. if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN)
  217. dt->buffer_overflow_errors++;
  218. kfree(tf);
  219. if (dt->packets_received == dt->packets_to_receive)
  220. complete(&dt->complete);
  221. }
  222. static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
  223. {
  224. struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
  225. int i;
  226. for (i = 0; i < npackets; i++) {
  227. struct dma_test_frame *tf;
  228. dma_addr_t dma_addr;
  229. tf = kzalloc(sizeof(*tf), GFP_KERNEL);
  230. if (!tf)
  231. return -ENOMEM;
  232. tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
  233. if (!tf->data) {
  234. kfree(tf);
  235. return -ENOMEM;
  236. }
  237. dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
  238. DMA_FROM_DEVICE);
  239. if (dma_mapping_error(dma_dev, dma_addr)) {
  240. kfree(tf->data);
  241. kfree(tf);
  242. return -ENOMEM;
  243. }
  244. tf->frame.buffer_phy = dma_addr;
  245. tf->frame.callback = dma_test_rx_callback;
  246. tf->dma_test = dt;
  247. INIT_LIST_HEAD(&tf->frame.list);
  248. tb_ring_rx(dt->rx_ring, &tf->frame);
  249. }
  250. return 0;
  251. }
  252. static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  253. bool canceled)
  254. {
  255. struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
  256. struct dma_test *dt = tf->dma_test;
  257. struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
  258. dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
  259. DMA_TO_DEVICE);
  260. kfree(tf->data);
  261. kfree(tf);
  262. }
  263. static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
  264. {
  265. struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
  266. int i;
  267. for (i = 0; i < npackets; i++) {
  268. struct dma_test_frame *tf;
  269. dma_addr_t dma_addr;
  270. tf = kzalloc(sizeof(*tf), GFP_KERNEL);
  271. if (!tf)
  272. return -ENOMEM;
  273. tf->frame.size = 0; /* means 4096 */
  274. tf->dma_test = dt;
  275. tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL);
  276. if (!tf->data) {
  277. kfree(tf);
  278. return -ENOMEM;
  279. }
  280. dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
  281. DMA_TO_DEVICE);
  282. if (dma_mapping_error(dma_dev, dma_addr)) {
  283. kfree(tf->data);
  284. kfree(tf);
  285. return -ENOMEM;
  286. }
  287. tf->frame.buffer_phy = dma_addr;
  288. tf->frame.callback = dma_test_tx_callback;
  289. tf->frame.sof = DMA_TEST_PDF_FRAME_START;
  290. tf->frame.eof = DMA_TEST_PDF_FRAME_END;
  291. INIT_LIST_HEAD(&tf->frame.list);
  292. dt->packets_sent++;
  293. dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
  294. dt->packets_to_send);
  295. tb_ring_tx(dt->tx_ring, &tf->frame);
  296. }
  297. return 0;
  298. }
  299. #define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \
  300. static int __fops ## _show(void *data, u64 *val) \
  301. { \
  302. struct tb_service *svc = data; \
  303. struct dma_test *dt = tb_service_get_drvdata(svc); \
  304. int ret; \
  305. \
  306. ret = mutex_lock_interruptible(&dt->lock); \
  307. if (ret) \
  308. return ret; \
  309. __get(dt, val); \
  310. mutex_unlock(&dt->lock); \
  311. return 0; \
  312. } \
  313. static int __fops ## _store(void *data, u64 val) \
  314. { \
  315. struct tb_service *svc = data; \
  316. struct dma_test *dt = tb_service_get_drvdata(svc); \
  317. int ret; \
  318. \
  319. ret = __validate(val); \
  320. if (ret) \
  321. return ret; \
  322. ret = mutex_lock_interruptible(&dt->lock); \
  323. if (ret) \
  324. return ret; \
  325. __set(dt, val); \
  326. mutex_unlock(&dt->lock); \
  327. return 0; \
  328. } \
  329. DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \
  330. __fops ## _store, "%llu\n")
  331. static void lanes_get(const struct dma_test *dt, u64 *val)
  332. {
  333. *val = dt->link_width;
  334. }
  335. static int lanes_validate(u64 val)
  336. {
  337. return val > 2 ? -EINVAL : 0;
  338. }
  339. static void lanes_set(struct dma_test *dt, u64 val)
  340. {
  341. dt->link_width = val;
  342. }
  343. DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set);
  344. static void speed_get(const struct dma_test *dt, u64 *val)
  345. {
  346. *val = dt->link_speed;
  347. }
  348. static int speed_validate(u64 val)
  349. {
  350. switch (val) {
  351. case 20:
  352. case 10:
  353. case 0:
  354. return 0;
  355. default:
  356. return -EINVAL;
  357. }
  358. }
  359. static void speed_set(struct dma_test *dt, u64 val)
  360. {
  361. dt->link_speed = val;
  362. }
  363. DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set);
  364. static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
  365. {
  366. *val = dt->packets_to_receive;
  367. }
  368. static int packets_to_receive_validate(u64 val)
  369. {
  370. return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
  371. }
  372. static void packets_to_receive_set(struct dma_test *dt, u64 val)
  373. {
  374. dt->packets_to_receive = val;
  375. }
  376. DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get,
  377. packets_to_receive_validate, packets_to_receive_set);
  378. static void packets_to_send_get(const struct dma_test *dt, u64 *val)
  379. {
  380. *val = dt->packets_to_send;
  381. }
  382. static int packets_to_send_validate(u64 val)
  383. {
  384. return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
  385. }
  386. static void packets_to_send_set(struct dma_test *dt, u64 val)
  387. {
  388. dt->packets_to_send = val;
  389. }
  390. DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
  391. packets_to_send_validate, packets_to_send_set);
  392. static int dma_test_set_bonding(struct dma_test *dt)
  393. {
  394. switch (dt->link_width) {
  395. case 2:
  396. return tb_xdomain_lane_bonding_enable(dt->xd);
  397. case 1:
  398. tb_xdomain_lane_bonding_disable(dt->xd);
  399. fallthrough;
  400. default:
  401. return 0;
  402. }
  403. }
  404. static bool dma_test_validate_config(struct dma_test *dt)
  405. {
  406. if (!dt->packets_to_send && !dt->packets_to_receive)
  407. return false;
  408. if (dt->packets_to_send && dt->packets_to_receive &&
  409. dt->packets_to_send != dt->packets_to_receive)
  410. return false;
  411. return true;
  412. }
  413. static void dma_test_check_errors(struct dma_test *dt, int ret)
  414. {
  415. if (!dt->error_code) {
  416. if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
  417. dt->error_code = DMA_TEST_SPEED_ERROR;
  418. } else if (dt->link_width &&
  419. dt->xd->link_width != dt->link_width) {
  420. dt->error_code = DMA_TEST_WIDTH_ERROR;
  421. } else if (dt->packets_to_send != dt->packets_sent ||
  422. dt->packets_to_receive != dt->packets_received ||
  423. dt->crc_errors || dt->buffer_overflow_errors) {
  424. dt->error_code = DMA_TEST_PACKET_ERROR;
  425. } else {
  426. return;
  427. }
  428. }
  429. dt->result = DMA_TEST_FAIL;
  430. }
  431. static int test_store(void *data, u64 val)
  432. {
  433. struct tb_service *svc = data;
  434. struct dma_test *dt = tb_service_get_drvdata(svc);
  435. int ret;
  436. if (val != 1)
  437. return -EINVAL;
  438. ret = mutex_lock_interruptible(&dt->lock);
  439. if (ret)
  440. return ret;
  441. dt->packets_sent = 0;
  442. dt->packets_received = 0;
  443. dt->crc_errors = 0;
  444. dt->buffer_overflow_errors = 0;
  445. dt->result = DMA_TEST_SUCCESS;
  446. dt->error_code = DMA_TEST_NO_ERROR;
  447. dev_dbg(&svc->dev, "DMA test starting\n");
  448. if (dt->link_speed)
  449. dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
  450. if (dt->link_width)
  451. dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
  452. dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
  453. dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
  454. if (!dma_test_validate_config(dt)) {
  455. dev_err(&svc->dev, "invalid test configuration\n");
  456. dt->error_code = DMA_TEST_CONFIG_ERROR;
  457. goto out_unlock;
  458. }
  459. ret = dma_test_set_bonding(dt);
  460. if (ret) {
  461. dev_err(&svc->dev, "failed to set lanes\n");
  462. dt->error_code = DMA_TEST_BONDING_ERROR;
  463. goto out_unlock;
  464. }
  465. ret = dma_test_start_rings(dt);
  466. if (ret) {
  467. dev_err(&svc->dev, "failed to enable DMA rings\n");
  468. dt->error_code = DMA_TEST_DMA_ERROR;
  469. goto out_unlock;
  470. }
  471. if (dt->packets_to_receive) {
  472. reinit_completion(&dt->complete);
  473. ret = dma_test_submit_rx(dt, dt->packets_to_receive);
  474. if (ret) {
  475. dev_err(&svc->dev, "failed to submit receive buffers\n");
  476. dt->error_code = DMA_TEST_BUFFER_ERROR;
  477. goto out_stop;
  478. }
  479. }
  480. if (dt->packets_to_send) {
  481. ret = dma_test_submit_tx(dt, dt->packets_to_send);
  482. if (ret) {
  483. dev_err(&svc->dev, "failed to submit transmit buffers\n");
  484. dt->error_code = DMA_TEST_BUFFER_ERROR;
  485. goto out_stop;
  486. }
  487. }
  488. if (dt->packets_to_receive) {
  489. ret = wait_for_completion_interruptible(&dt->complete);
  490. if (ret) {
  491. dt->error_code = DMA_TEST_INTERRUPTED;
  492. goto out_stop;
  493. }
  494. }
  495. out_stop:
  496. dma_test_stop_rings(dt);
  497. out_unlock:
  498. dma_test_check_errors(dt, ret);
  499. mutex_unlock(&dt->lock);
  500. dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
  501. return ret;
  502. }
  503. DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n");
  504. static int status_show(struct seq_file *s, void *not_used)
  505. {
  506. struct tb_service *svc = s->private;
  507. struct dma_test *dt = tb_service_get_drvdata(svc);
  508. int ret;
  509. ret = mutex_lock_interruptible(&dt->lock);
  510. if (ret)
  511. return ret;
  512. seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
  513. if (dt->result == DMA_TEST_NOT_RUN)
  514. goto out_unlock;
  515. seq_printf(s, "packets received: %u\n", dt->packets_received);
  516. seq_printf(s, "packets sent: %u\n", dt->packets_sent);
  517. seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
  518. seq_printf(s, "buffer overflow errors: %u\n",
  519. dt->buffer_overflow_errors);
  520. seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
  521. out_unlock:
  522. mutex_unlock(&dt->lock);
  523. return 0;
  524. }
  525. DEFINE_SHOW_ATTRIBUTE(status);
  526. static void dma_test_debugfs_init(struct tb_service *svc)
  527. {
  528. struct dma_test *dt = tb_service_get_drvdata(svc);
  529. dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
  530. debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
  531. debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
  532. debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
  533. &packets_to_receive_fops);
  534. debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
  535. &packets_to_send_fops);
  536. debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
  537. debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
  538. }
  539. static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id)
  540. {
  541. struct tb_xdomain *xd = tb_service_parent(svc);
  542. struct dma_test *dt;
  543. dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
  544. if (!dt)
  545. return -ENOMEM;
  546. dt->svc = svc;
  547. dt->xd = xd;
  548. mutex_init(&dt->lock);
  549. init_completion(&dt->complete);
  550. tb_service_set_drvdata(svc, dt);
  551. dma_test_debugfs_init(svc);
  552. return 0;
  553. }
  554. static void dma_test_remove(struct tb_service *svc)
  555. {
  556. struct dma_test *dt = tb_service_get_drvdata(svc);
  557. mutex_lock(&dt->lock);
  558. debugfs_remove_recursive(dt->debugfs_dir);
  559. mutex_unlock(&dt->lock);
  560. }
  561. static int __maybe_unused dma_test_suspend(struct device *dev)
  562. {
  563. /*
  564. * No need to do anything special here. If userspace is writing
  565. * to the test attribute when suspend started, it comes out from
  566. * wait_for_completion_interruptible() with -ERESTARTSYS and the
  567. * DMA test fails tearing down the rings. Once userspace is
  568. * thawed the kernel restarts the write syscall effectively
  569. * re-running the test.
  570. */
  571. return 0;
  572. }
  573. static int __maybe_unused dma_test_resume(struct device *dev)
  574. {
  575. return 0;
  576. }
  577. static const struct dev_pm_ops dma_test_pm_ops = {
  578. SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume)
  579. };
  580. static const struct tb_service_id dma_test_ids[] = {
  581. { TB_SERVICE("dma_test", 1) },
  582. { },
  583. };
  584. MODULE_DEVICE_TABLE(tbsvc, dma_test_ids);
  585. static struct tb_service_driver dma_test_driver = {
  586. .driver = {
  587. .owner = THIS_MODULE,
  588. .name = "thunderbolt_dma_test",
  589. .pm = &dma_test_pm_ops,
  590. },
  591. .probe = dma_test_probe,
  592. .remove = dma_test_remove,
  593. .id_table = dma_test_ids,
  594. };
  595. static int __init dma_test_init(void)
  596. {
  597. u64 data_value = DMA_TEST_DATA_PATTERN;
  598. int i, ret;
  599. dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
  600. if (!dma_test_pattern)
  601. return -ENOMEM;
  602. for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++)
  603. ((u32 *)dma_test_pattern)[i] = data_value++;
  604. dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid);
  605. if (!dma_test_dir) {
  606. ret = -ENOMEM;
  607. goto err_free_pattern;
  608. }
  609. tb_property_add_immediate(dma_test_dir, "prtcid", 1);
  610. tb_property_add_immediate(dma_test_dir, "prtcvers", 1);
  611. tb_property_add_immediate(dma_test_dir, "prtcrevs", 0);
  612. tb_property_add_immediate(dma_test_dir, "prtcstns", 0);
  613. ret = tb_register_property_dir("dma_test", dma_test_dir);
  614. if (ret)
  615. goto err_free_dir;
  616. ret = tb_register_service_driver(&dma_test_driver);
  617. if (ret)
  618. goto err_unregister_dir;
  619. return 0;
  620. err_unregister_dir:
  621. tb_unregister_property_dir("dma_test", dma_test_dir);
  622. err_free_dir:
  623. tb_property_free_dir(dma_test_dir);
  624. err_free_pattern:
  625. kfree(dma_test_pattern);
  626. return ret;
  627. }
  628. module_init(dma_test_init);
  629. static void __exit dma_test_exit(void)
  630. {
  631. tb_unregister_service_driver(&dma_test_driver);
  632. tb_unregister_property_dir("dma_test", dma_test_dir);
  633. tb_property_free_dir(dma_test_dir);
  634. kfree(dma_test_pattern);
  635. }
  636. module_exit(dma_test_exit);
  637. MODULE_AUTHOR("Isaac Hazan <[email protected]>");
  638. MODULE_AUTHOR("Mika Westerberg <[email protected]>");
  639. MODULE_DESCRIPTION("DMA traffic test driver");
  640. MODULE_LICENSE("GPL v2");