st-dma-fence.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /* SPDX-License-Identifier: MIT */
  2. /*
  3. * Copyright © 2019 Intel Corporation
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/dma-fence.h>
  7. #include <linux/kernel.h>
  8. #include <linux/kthread.h>
  9. #include <linux/sched/signal.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include "selftest.h"
  13. static struct kmem_cache *slab_fences;
  14. static struct mock_fence {
  15. struct dma_fence base;
  16. struct spinlock lock;
  17. } *to_mock_fence(struct dma_fence *f) {
  18. return container_of(f, struct mock_fence, base);
  19. }
  20. static const char *mock_name(struct dma_fence *f)
  21. {
  22. return "mock";
  23. }
  24. static void mock_fence_release(struct dma_fence *f)
  25. {
  26. kmem_cache_free(slab_fences, to_mock_fence(f));
  27. }
  28. struct wait_cb {
  29. struct dma_fence_cb cb;
  30. struct task_struct *task;
  31. };
  32. static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
  33. {
  34. wake_up_process(container_of(cb, struct wait_cb, cb)->task);
  35. }
  36. static long mock_wait(struct dma_fence *f, bool intr, long timeout)
  37. {
  38. const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
  39. struct wait_cb cb = { .task = current };
  40. if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
  41. return timeout;
  42. while (timeout) {
  43. set_current_state(state);
  44. if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
  45. break;
  46. if (signal_pending_state(state, current))
  47. break;
  48. timeout = schedule_timeout(timeout);
  49. }
  50. __set_current_state(TASK_RUNNING);
  51. if (!dma_fence_remove_callback(f, &cb.cb))
  52. return timeout;
  53. if (signal_pending_state(state, current))
  54. return -ERESTARTSYS;
  55. return -ETIME;
  56. }
  57. static const struct dma_fence_ops mock_ops = {
  58. .get_driver_name = mock_name,
  59. .get_timeline_name = mock_name,
  60. .wait = mock_wait,
  61. .release = mock_fence_release,
  62. };
  63. static struct dma_fence *mock_fence(void)
  64. {
  65. struct mock_fence *f;
  66. f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
  67. if (!f)
  68. return NULL;
  69. spin_lock_init(&f->lock);
  70. dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
  71. return &f->base;
  72. }
  73. static int sanitycheck(void *arg)
  74. {
  75. struct dma_fence *f;
  76. f = mock_fence();
  77. if (!f)
  78. return -ENOMEM;
  79. dma_fence_enable_sw_signaling(f);
  80. dma_fence_signal(f);
  81. dma_fence_put(f);
  82. return 0;
  83. }
  84. static int test_signaling(void *arg)
  85. {
  86. struct dma_fence *f;
  87. int err = -EINVAL;
  88. f = mock_fence();
  89. if (!f)
  90. return -ENOMEM;
  91. dma_fence_enable_sw_signaling(f);
  92. if (dma_fence_is_signaled(f)) {
  93. pr_err("Fence unexpectedly signaled on creation\n");
  94. goto err_free;
  95. }
  96. if (dma_fence_signal(f)) {
  97. pr_err("Fence reported being already signaled\n");
  98. goto err_free;
  99. }
  100. if (!dma_fence_is_signaled(f)) {
  101. pr_err("Fence not reporting signaled\n");
  102. goto err_free;
  103. }
  104. if (!dma_fence_signal(f)) {
  105. pr_err("Fence reported not being already signaled\n");
  106. goto err_free;
  107. }
  108. err = 0;
  109. err_free:
  110. dma_fence_put(f);
  111. return err;
  112. }
  113. struct simple_cb {
  114. struct dma_fence_cb cb;
  115. bool seen;
  116. };
  117. static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
  118. {
  119. smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
  120. }
  121. static int test_add_callback(void *arg)
  122. {
  123. struct simple_cb cb = {};
  124. struct dma_fence *f;
  125. int err = -EINVAL;
  126. f = mock_fence();
  127. if (!f)
  128. return -ENOMEM;
  129. if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
  130. pr_err("Failed to add callback, fence already signaled!\n");
  131. goto err_free;
  132. }
  133. dma_fence_signal(f);
  134. if (!cb.seen) {
  135. pr_err("Callback failed!\n");
  136. goto err_free;
  137. }
  138. err = 0;
  139. err_free:
  140. dma_fence_put(f);
  141. return err;
  142. }
  143. static int test_late_add_callback(void *arg)
  144. {
  145. struct simple_cb cb = {};
  146. struct dma_fence *f;
  147. int err = -EINVAL;
  148. f = mock_fence();
  149. if (!f)
  150. return -ENOMEM;
  151. dma_fence_enable_sw_signaling(f);
  152. dma_fence_signal(f);
  153. if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
  154. pr_err("Added callback, but fence was already signaled!\n");
  155. goto err_free;
  156. }
  157. dma_fence_signal(f);
  158. if (cb.seen) {
  159. pr_err("Callback called after failed attachment !\n");
  160. goto err_free;
  161. }
  162. err = 0;
  163. err_free:
  164. dma_fence_put(f);
  165. return err;
  166. }
  167. static int test_rm_callback(void *arg)
  168. {
  169. struct simple_cb cb = {};
  170. struct dma_fence *f;
  171. int err = -EINVAL;
  172. f = mock_fence();
  173. if (!f)
  174. return -ENOMEM;
  175. if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
  176. pr_err("Failed to add callback, fence already signaled!\n");
  177. goto err_free;
  178. }
  179. if (!dma_fence_remove_callback(f, &cb.cb)) {
  180. pr_err("Failed to remove callback!\n");
  181. goto err_free;
  182. }
  183. dma_fence_signal(f);
  184. if (cb.seen) {
  185. pr_err("Callback still signaled after removal!\n");
  186. goto err_free;
  187. }
  188. err = 0;
  189. err_free:
  190. dma_fence_put(f);
  191. return err;
  192. }
  193. static int test_late_rm_callback(void *arg)
  194. {
  195. struct simple_cb cb = {};
  196. struct dma_fence *f;
  197. int err = -EINVAL;
  198. f = mock_fence();
  199. if (!f)
  200. return -ENOMEM;
  201. if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
  202. pr_err("Failed to add callback, fence already signaled!\n");
  203. goto err_free;
  204. }
  205. dma_fence_signal(f);
  206. if (!cb.seen) {
  207. pr_err("Callback failed!\n");
  208. goto err_free;
  209. }
  210. if (dma_fence_remove_callback(f, &cb.cb)) {
  211. pr_err("Callback removal succeed after being executed!\n");
  212. goto err_free;
  213. }
  214. err = 0;
  215. err_free:
  216. dma_fence_put(f);
  217. return err;
  218. }
  219. static int test_status(void *arg)
  220. {
  221. struct dma_fence *f;
  222. int err = -EINVAL;
  223. f = mock_fence();
  224. if (!f)
  225. return -ENOMEM;
  226. dma_fence_enable_sw_signaling(f);
  227. if (dma_fence_get_status(f)) {
  228. pr_err("Fence unexpectedly has signaled status on creation\n");
  229. goto err_free;
  230. }
  231. dma_fence_signal(f);
  232. if (!dma_fence_get_status(f)) {
  233. pr_err("Fence not reporting signaled status\n");
  234. goto err_free;
  235. }
  236. err = 0;
  237. err_free:
  238. dma_fence_put(f);
  239. return err;
  240. }
  241. static int test_error(void *arg)
  242. {
  243. struct dma_fence *f;
  244. int err = -EINVAL;
  245. f = mock_fence();
  246. if (!f)
  247. return -ENOMEM;
  248. dma_fence_enable_sw_signaling(f);
  249. dma_fence_set_error(f, -EIO);
  250. if (dma_fence_get_status(f)) {
  251. pr_err("Fence unexpectedly has error status before signal\n");
  252. goto err_free;
  253. }
  254. dma_fence_signal(f);
  255. if (dma_fence_get_status(f) != -EIO) {
  256. pr_err("Fence not reporting error status, got %d\n",
  257. dma_fence_get_status(f));
  258. goto err_free;
  259. }
  260. err = 0;
  261. err_free:
  262. dma_fence_put(f);
  263. return err;
  264. }
  265. static int test_wait(void *arg)
  266. {
  267. struct dma_fence *f;
  268. int err = -EINVAL;
  269. f = mock_fence();
  270. if (!f)
  271. return -ENOMEM;
  272. dma_fence_enable_sw_signaling(f);
  273. if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
  274. pr_err("Wait reported complete before being signaled\n");
  275. goto err_free;
  276. }
  277. dma_fence_signal(f);
  278. if (dma_fence_wait_timeout(f, false, 0) != 0) {
  279. pr_err("Wait reported incomplete after being signaled\n");
  280. goto err_free;
  281. }
  282. err = 0;
  283. err_free:
  284. dma_fence_signal(f);
  285. dma_fence_put(f);
  286. return err;
  287. }
  288. struct wait_timer {
  289. struct timer_list timer;
  290. struct dma_fence *f;
  291. };
  292. static void wait_timer(struct timer_list *timer)
  293. {
  294. struct wait_timer *wt = from_timer(wt, timer, timer);
  295. dma_fence_signal(wt->f);
  296. }
  297. static int test_wait_timeout(void *arg)
  298. {
  299. struct wait_timer wt;
  300. int err = -EINVAL;
  301. timer_setup_on_stack(&wt.timer, wait_timer, 0);
  302. wt.f = mock_fence();
  303. if (!wt.f)
  304. return -ENOMEM;
  305. dma_fence_enable_sw_signaling(wt.f);
  306. if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
  307. pr_err("Wait reported complete before being signaled\n");
  308. goto err_free;
  309. }
  310. mod_timer(&wt.timer, jiffies + 1);
  311. if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
  312. if (timer_pending(&wt.timer)) {
  313. pr_notice("Timer did not fire within the jiffie!\n");
  314. err = 0; /* not our fault! */
  315. } else {
  316. pr_err("Wait reported incomplete after timeout\n");
  317. }
  318. goto err_free;
  319. }
  320. err = 0;
  321. err_free:
  322. del_timer_sync(&wt.timer);
  323. destroy_timer_on_stack(&wt.timer);
  324. dma_fence_signal(wt.f);
  325. dma_fence_put(wt.f);
  326. return err;
  327. }
  328. static int test_stub(void *arg)
  329. {
  330. struct dma_fence *f[64];
  331. int err = -EINVAL;
  332. int i;
  333. for (i = 0; i < ARRAY_SIZE(f); i++) {
  334. f[i] = dma_fence_get_stub();
  335. if (!dma_fence_is_signaled(f[i])) {
  336. pr_err("Obtained unsignaled stub fence!\n");
  337. goto err;
  338. }
  339. }
  340. err = 0;
  341. err:
  342. while (i--)
  343. dma_fence_put(f[i]);
  344. return err;
  345. }
  346. /* Now off to the races! */
  347. struct race_thread {
  348. struct dma_fence __rcu **fences;
  349. struct task_struct *task;
  350. bool before;
  351. int id;
  352. };
  353. static void __wait_for_callbacks(struct dma_fence *f)
  354. {
  355. spin_lock_irq(f->lock);
  356. spin_unlock_irq(f->lock);
  357. }
  358. static int thread_signal_callback(void *arg)
  359. {
  360. const struct race_thread *t = arg;
  361. unsigned long pass = 0;
  362. unsigned long miss = 0;
  363. int err = 0;
  364. while (!err && !kthread_should_stop()) {
  365. struct dma_fence *f1, *f2;
  366. struct simple_cb cb;
  367. f1 = mock_fence();
  368. if (!f1) {
  369. err = -ENOMEM;
  370. break;
  371. }
  372. dma_fence_enable_sw_signaling(f1);
  373. rcu_assign_pointer(t->fences[t->id], f1);
  374. smp_wmb();
  375. rcu_read_lock();
  376. do {
  377. f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
  378. } while (!f2 && !kthread_should_stop());
  379. rcu_read_unlock();
  380. if (t->before)
  381. dma_fence_signal(f1);
  382. smp_store_mb(cb.seen, false);
  383. if (!f2 ||
  384. dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
  385. miss++;
  386. cb.seen = true;
  387. }
  388. if (!t->before)
  389. dma_fence_signal(f1);
  390. if (!cb.seen) {
  391. dma_fence_wait(f2, false);
  392. __wait_for_callbacks(f2);
  393. }
  394. if (!READ_ONCE(cb.seen)) {
  395. pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
  396. t->id, pass, miss,
  397. t->before ? "before" : "after",
  398. dma_fence_is_signaled(f2) ? "yes" : "no");
  399. err = -EINVAL;
  400. }
  401. dma_fence_put(f2);
  402. rcu_assign_pointer(t->fences[t->id], NULL);
  403. smp_wmb();
  404. dma_fence_put(f1);
  405. pass++;
  406. }
  407. pr_info("%s[%d] completed %lu passes, %lu misses\n",
  408. __func__, t->id, pass, miss);
  409. return err;
  410. }
  411. static int race_signal_callback(void *arg)
  412. {
  413. struct dma_fence __rcu *f[2] = {};
  414. int ret = 0;
  415. int pass;
  416. for (pass = 0; !ret && pass <= 1; pass++) {
  417. struct race_thread t[2];
  418. int i;
  419. for (i = 0; i < ARRAY_SIZE(t); i++) {
  420. t[i].fences = f;
  421. t[i].id = i;
  422. t[i].before = pass;
  423. t[i].task = kthread_run(thread_signal_callback, &t[i],
  424. "dma-fence:%d", i);
  425. get_task_struct(t[i].task);
  426. }
  427. msleep(50);
  428. for (i = 0; i < ARRAY_SIZE(t); i++) {
  429. int err;
  430. err = kthread_stop(t[i].task);
  431. if (err && !ret)
  432. ret = err;
  433. put_task_struct(t[i].task);
  434. }
  435. }
  436. return ret;
  437. }
  438. int dma_fence(void)
  439. {
  440. static const struct subtest tests[] = {
  441. SUBTEST(sanitycheck),
  442. SUBTEST(test_signaling),
  443. SUBTEST(test_add_callback),
  444. SUBTEST(test_late_add_callback),
  445. SUBTEST(test_rm_callback),
  446. SUBTEST(test_late_rm_callback),
  447. SUBTEST(test_status),
  448. SUBTEST(test_error),
  449. SUBTEST(test_wait),
  450. SUBTEST(test_wait_timeout),
  451. SUBTEST(test_stub),
  452. SUBTEST(race_signal_callback),
  453. };
  454. int ret;
  455. pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
  456. slab_fences = KMEM_CACHE(mock_fence,
  457. SLAB_TYPESAFE_BY_RCU |
  458. SLAB_HWCACHE_ALIGN);
  459. if (!slab_fences)
  460. return -ENOMEM;
  461. ret = subtests(tests, NULL);
  462. kmem_cache_destroy(slab_fences);
  463. return ret;
  464. }