locktorture.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Module-based torture test facility for locking
  4. *
  5. * Copyright (C) IBM Corporation, 2014
  6. *
  7. * Authors: Paul E. McKenney <[email protected]>
  8. * Davidlohr Bueso <[email protected]>
  9. * Based on kernel/rcu/torture.c.
  10. */
  11. #define pr_fmt(fmt) fmt
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/kthread.h>
  15. #include <linux/sched/rt.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/mutex.h>
  18. #include <linux/rwsem.h>
  19. #include <linux/smp.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/sched.h>
  22. #include <uapi/linux/sched/types.h>
  23. #include <linux/rtmutex.h>
  24. #include <linux/atomic.h>
  25. #include <linux/moduleparam.h>
  26. #include <linux/delay.h>
  27. #include <linux/slab.h>
  28. #include <linux/torture.h>
  29. #include <linux/reboot.h>
  30. MODULE_LICENSE("GPL");
  31. MODULE_AUTHOR("Paul E. McKenney <[email protected]>");
  32. torture_param(int, nwriters_stress, -1,
  33. "Number of write-locking stress-test threads");
  34. torture_param(int, nreaders_stress, -1,
  35. "Number of read-locking stress-test threads");
  36. torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
  37. torture_param(int, onoff_interval, 0,
  38. "Time between CPU hotplugs (s), 0=disable");
  39. torture_param(int, shuffle_interval, 3,
  40. "Number of jiffies between shuffles, 0=disable");
  41. torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
  42. torture_param(int, stat_interval, 60,
  43. "Number of seconds between stats printk()s");
  44. torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
  45. torture_param(int, verbose, 1,
  46. "Enable verbose debugging printk()s");
  47. static char *torture_type = "spin_lock";
  48. module_param(torture_type, charp, 0444);
  49. MODULE_PARM_DESC(torture_type,
  50. "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
  51. static struct task_struct *stats_task;
  52. static struct task_struct **writer_tasks;
  53. static struct task_struct **reader_tasks;
  54. static bool lock_is_write_held;
  55. static atomic_t lock_is_read_held;
  56. static unsigned long last_lock_release;
  57. struct lock_stress_stats {
  58. long n_lock_fail;
  59. long n_lock_acquired;
  60. };
  61. /* Forward reference. */
  62. static void lock_torture_cleanup(void);
  63. /*
  64. * Operations vector for selecting different types of tests.
  65. */
  66. struct lock_torture_ops {
  67. void (*init)(void);
  68. void (*exit)(void);
  69. int (*writelock)(int tid);
  70. void (*write_delay)(struct torture_random_state *trsp);
  71. void (*task_boost)(struct torture_random_state *trsp);
  72. void (*writeunlock)(int tid);
  73. int (*readlock)(int tid);
  74. void (*read_delay)(struct torture_random_state *trsp);
  75. void (*readunlock)(int tid);
  76. unsigned long flags; /* for irq spinlocks */
  77. const char *name;
  78. };
  79. struct lock_torture_cxt {
  80. int nrealwriters_stress;
  81. int nrealreaders_stress;
  82. bool debug_lock;
  83. bool init_called;
  84. atomic_t n_lock_torture_errors;
  85. struct lock_torture_ops *cur_ops;
  86. struct lock_stress_stats *lwsa; /* writer statistics */
  87. struct lock_stress_stats *lrsa; /* reader statistics */
  88. };
  89. static struct lock_torture_cxt cxt = { 0, 0, false, false,
  90. ATOMIC_INIT(0),
  91. NULL, NULL};
  92. /*
  93. * Definitions for lock torture testing.
  94. */
  95. static int torture_lock_busted_write_lock(int tid __maybe_unused)
  96. {
  97. return 0; /* BUGGY, do not use in real life!!! */
  98. }
  99. static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
  100. {
  101. const unsigned long longdelay_ms = 100;
  102. /* We want a long delay occasionally to force massive contention. */
  103. if (!(torture_random(trsp) %
  104. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  105. mdelay(longdelay_ms);
  106. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  107. torture_preempt_schedule(); /* Allow test to be preempted. */
  108. }
  109. static void torture_lock_busted_write_unlock(int tid __maybe_unused)
  110. {
  111. /* BUGGY, do not use in real life!!! */
  112. }
  113. static void torture_boost_dummy(struct torture_random_state *trsp)
  114. {
  115. /* Only rtmutexes care about priority */
  116. }
  117. static struct lock_torture_ops lock_busted_ops = {
  118. .writelock = torture_lock_busted_write_lock,
  119. .write_delay = torture_lock_busted_write_delay,
  120. .task_boost = torture_boost_dummy,
  121. .writeunlock = torture_lock_busted_write_unlock,
  122. .readlock = NULL,
  123. .read_delay = NULL,
  124. .readunlock = NULL,
  125. .name = "lock_busted"
  126. };
  127. static DEFINE_SPINLOCK(torture_spinlock);
  128. static int torture_spin_lock_write_lock(int tid __maybe_unused)
  129. __acquires(torture_spinlock)
  130. {
  131. spin_lock(&torture_spinlock);
  132. return 0;
  133. }
  134. static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
  135. {
  136. const unsigned long shortdelay_us = 2;
  137. const unsigned long longdelay_ms = 100;
  138. /* We want a short delay mostly to emulate likely code, and
  139. * we want a long delay occasionally to force massive contention.
  140. */
  141. if (!(torture_random(trsp) %
  142. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  143. mdelay(longdelay_ms);
  144. if (!(torture_random(trsp) %
  145. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  146. udelay(shortdelay_us);
  147. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  148. torture_preempt_schedule(); /* Allow test to be preempted. */
  149. }
  150. static void torture_spin_lock_write_unlock(int tid __maybe_unused)
  151. __releases(torture_spinlock)
  152. {
  153. spin_unlock(&torture_spinlock);
  154. }
  155. static struct lock_torture_ops spin_lock_ops = {
  156. .writelock = torture_spin_lock_write_lock,
  157. .write_delay = torture_spin_lock_write_delay,
  158. .task_boost = torture_boost_dummy,
  159. .writeunlock = torture_spin_lock_write_unlock,
  160. .readlock = NULL,
  161. .read_delay = NULL,
  162. .readunlock = NULL,
  163. .name = "spin_lock"
  164. };
  165. static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
  166. __acquires(torture_spinlock)
  167. {
  168. unsigned long flags;
  169. spin_lock_irqsave(&torture_spinlock, flags);
  170. cxt.cur_ops->flags = flags;
  171. return 0;
  172. }
  173. static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
  174. __releases(torture_spinlock)
  175. {
  176. spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
  177. }
  178. static struct lock_torture_ops spin_lock_irq_ops = {
  179. .writelock = torture_spin_lock_write_lock_irq,
  180. .write_delay = torture_spin_lock_write_delay,
  181. .task_boost = torture_boost_dummy,
  182. .writeunlock = torture_lock_spin_write_unlock_irq,
  183. .readlock = NULL,
  184. .read_delay = NULL,
  185. .readunlock = NULL,
  186. .name = "spin_lock_irq"
  187. };
  188. static DEFINE_RWLOCK(torture_rwlock);
  189. static int torture_rwlock_write_lock(int tid __maybe_unused)
  190. __acquires(torture_rwlock)
  191. {
  192. write_lock(&torture_rwlock);
  193. return 0;
  194. }
  195. static void torture_rwlock_write_delay(struct torture_random_state *trsp)
  196. {
  197. const unsigned long shortdelay_us = 2;
  198. const unsigned long longdelay_ms = 100;
  199. /* We want a short delay mostly to emulate likely code, and
  200. * we want a long delay occasionally to force massive contention.
  201. */
  202. if (!(torture_random(trsp) %
  203. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  204. mdelay(longdelay_ms);
  205. else
  206. udelay(shortdelay_us);
  207. }
  208. static void torture_rwlock_write_unlock(int tid __maybe_unused)
  209. __releases(torture_rwlock)
  210. {
  211. write_unlock(&torture_rwlock);
  212. }
  213. static int torture_rwlock_read_lock(int tid __maybe_unused)
  214. __acquires(torture_rwlock)
  215. {
  216. read_lock(&torture_rwlock);
  217. return 0;
  218. }
  219. static void torture_rwlock_read_delay(struct torture_random_state *trsp)
  220. {
  221. const unsigned long shortdelay_us = 10;
  222. const unsigned long longdelay_ms = 100;
  223. /* We want a short delay mostly to emulate likely code, and
  224. * we want a long delay occasionally to force massive contention.
  225. */
  226. if (!(torture_random(trsp) %
  227. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  228. mdelay(longdelay_ms);
  229. else
  230. udelay(shortdelay_us);
  231. }
  232. static void torture_rwlock_read_unlock(int tid __maybe_unused)
  233. __releases(torture_rwlock)
  234. {
  235. read_unlock(&torture_rwlock);
  236. }
  237. static struct lock_torture_ops rw_lock_ops = {
  238. .writelock = torture_rwlock_write_lock,
  239. .write_delay = torture_rwlock_write_delay,
  240. .task_boost = torture_boost_dummy,
  241. .writeunlock = torture_rwlock_write_unlock,
  242. .readlock = torture_rwlock_read_lock,
  243. .read_delay = torture_rwlock_read_delay,
  244. .readunlock = torture_rwlock_read_unlock,
  245. .name = "rw_lock"
  246. };
  247. static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
  248. __acquires(torture_rwlock)
  249. {
  250. unsigned long flags;
  251. write_lock_irqsave(&torture_rwlock, flags);
  252. cxt.cur_ops->flags = flags;
  253. return 0;
  254. }
  255. static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
  256. __releases(torture_rwlock)
  257. {
  258. write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  259. }
  260. static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
  261. __acquires(torture_rwlock)
  262. {
  263. unsigned long flags;
  264. read_lock_irqsave(&torture_rwlock, flags);
  265. cxt.cur_ops->flags = flags;
  266. return 0;
  267. }
  268. static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
  269. __releases(torture_rwlock)
  270. {
  271. read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
  272. }
  273. static struct lock_torture_ops rw_lock_irq_ops = {
  274. .writelock = torture_rwlock_write_lock_irq,
  275. .write_delay = torture_rwlock_write_delay,
  276. .task_boost = torture_boost_dummy,
  277. .writeunlock = torture_rwlock_write_unlock_irq,
  278. .readlock = torture_rwlock_read_lock_irq,
  279. .read_delay = torture_rwlock_read_delay,
  280. .readunlock = torture_rwlock_read_unlock_irq,
  281. .name = "rw_lock_irq"
  282. };
  283. static DEFINE_MUTEX(torture_mutex);
  284. static int torture_mutex_lock(int tid __maybe_unused)
  285. __acquires(torture_mutex)
  286. {
  287. mutex_lock(&torture_mutex);
  288. return 0;
  289. }
  290. static void torture_mutex_delay(struct torture_random_state *trsp)
  291. {
  292. const unsigned long longdelay_ms = 100;
  293. /* We want a long delay occasionally to force massive contention. */
  294. if (!(torture_random(trsp) %
  295. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  296. mdelay(longdelay_ms * 5);
  297. else
  298. mdelay(longdelay_ms / 5);
  299. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  300. torture_preempt_schedule(); /* Allow test to be preempted. */
  301. }
  302. static void torture_mutex_unlock(int tid __maybe_unused)
  303. __releases(torture_mutex)
  304. {
  305. mutex_unlock(&torture_mutex);
  306. }
  307. static struct lock_torture_ops mutex_lock_ops = {
  308. .writelock = torture_mutex_lock,
  309. .write_delay = torture_mutex_delay,
  310. .task_boost = torture_boost_dummy,
  311. .writeunlock = torture_mutex_unlock,
  312. .readlock = NULL,
  313. .read_delay = NULL,
  314. .readunlock = NULL,
  315. .name = "mutex_lock"
  316. };
  317. #include <linux/ww_mutex.h>
  318. /*
  319. * The torture ww_mutexes should belong to the same lock class as
  320. * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
  321. * function is called for initialization to ensure that.
  322. */
  323. static DEFINE_WD_CLASS(torture_ww_class);
  324. static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
  325. static struct ww_acquire_ctx *ww_acquire_ctxs;
  326. static void torture_ww_mutex_init(void)
  327. {
  328. ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
  329. ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
  330. ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
  331. ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
  332. sizeof(*ww_acquire_ctxs),
  333. GFP_KERNEL);
  334. if (!ww_acquire_ctxs)
  335. VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
  336. }
  337. static void torture_ww_mutex_exit(void)
  338. {
  339. kfree(ww_acquire_ctxs);
  340. }
  341. static int torture_ww_mutex_lock(int tid)
  342. __acquires(torture_ww_mutex_0)
  343. __acquires(torture_ww_mutex_1)
  344. __acquires(torture_ww_mutex_2)
  345. {
  346. LIST_HEAD(list);
  347. struct reorder_lock {
  348. struct list_head link;
  349. struct ww_mutex *lock;
  350. } locks[3], *ll, *ln;
  351. struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
  352. locks[0].lock = &torture_ww_mutex_0;
  353. list_add(&locks[0].link, &list);
  354. locks[1].lock = &torture_ww_mutex_1;
  355. list_add(&locks[1].link, &list);
  356. locks[2].lock = &torture_ww_mutex_2;
  357. list_add(&locks[2].link, &list);
  358. ww_acquire_init(ctx, &torture_ww_class);
  359. list_for_each_entry(ll, &list, link) {
  360. int err;
  361. err = ww_mutex_lock(ll->lock, ctx);
  362. if (!err)
  363. continue;
  364. ln = ll;
  365. list_for_each_entry_continue_reverse(ln, &list, link)
  366. ww_mutex_unlock(ln->lock);
  367. if (err != -EDEADLK)
  368. return err;
  369. ww_mutex_lock_slow(ll->lock, ctx);
  370. list_move(&ll->link, &list);
  371. }
  372. return 0;
  373. }
  374. static void torture_ww_mutex_unlock(int tid)
  375. __releases(torture_ww_mutex_0)
  376. __releases(torture_ww_mutex_1)
  377. __releases(torture_ww_mutex_2)
  378. {
  379. struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
  380. ww_mutex_unlock(&torture_ww_mutex_0);
  381. ww_mutex_unlock(&torture_ww_mutex_1);
  382. ww_mutex_unlock(&torture_ww_mutex_2);
  383. ww_acquire_fini(ctx);
  384. }
  385. static struct lock_torture_ops ww_mutex_lock_ops = {
  386. .init = torture_ww_mutex_init,
  387. .exit = torture_ww_mutex_exit,
  388. .writelock = torture_ww_mutex_lock,
  389. .write_delay = torture_mutex_delay,
  390. .task_boost = torture_boost_dummy,
  391. .writeunlock = torture_ww_mutex_unlock,
  392. .readlock = NULL,
  393. .read_delay = NULL,
  394. .readunlock = NULL,
  395. .name = "ww_mutex_lock"
  396. };
  397. #ifdef CONFIG_RT_MUTEXES
  398. static DEFINE_RT_MUTEX(torture_rtmutex);
  399. static int torture_rtmutex_lock(int tid __maybe_unused)
  400. __acquires(torture_rtmutex)
  401. {
  402. rt_mutex_lock(&torture_rtmutex);
  403. return 0;
  404. }
  405. static void torture_rtmutex_boost(struct torture_random_state *trsp)
  406. {
  407. const unsigned int factor = 50000; /* yes, quite arbitrary */
  408. if (!rt_task(current)) {
  409. /*
  410. * Boost priority once every ~50k operations. When the
  411. * task tries to take the lock, the rtmutex it will account
  412. * for the new priority, and do any corresponding pi-dance.
  413. */
  414. if (trsp && !(torture_random(trsp) %
  415. (cxt.nrealwriters_stress * factor))) {
  416. sched_set_fifo(current);
  417. } else /* common case, do nothing */
  418. return;
  419. } else {
  420. /*
  421. * The task will remain boosted for another ~500k operations,
  422. * then restored back to its original prio, and so forth.
  423. *
  424. * When @trsp is nil, we want to force-reset the task for
  425. * stopping the kthread.
  426. */
  427. if (!trsp || !(torture_random(trsp) %
  428. (cxt.nrealwriters_stress * factor * 2))) {
  429. sched_set_normal(current, 0);
  430. } else /* common case, do nothing */
  431. return;
  432. }
  433. }
  434. static void torture_rtmutex_delay(struct torture_random_state *trsp)
  435. {
  436. const unsigned long shortdelay_us = 2;
  437. const unsigned long longdelay_ms = 100;
  438. /*
  439. * We want a short delay mostly to emulate likely code, and
  440. * we want a long delay occasionally to force massive contention.
  441. */
  442. if (!(torture_random(trsp) %
  443. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  444. mdelay(longdelay_ms);
  445. if (!(torture_random(trsp) %
  446. (cxt.nrealwriters_stress * 2 * shortdelay_us)))
  447. udelay(shortdelay_us);
  448. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  449. torture_preempt_schedule(); /* Allow test to be preempted. */
  450. }
  451. static void torture_rtmutex_unlock(int tid __maybe_unused)
  452. __releases(torture_rtmutex)
  453. {
  454. rt_mutex_unlock(&torture_rtmutex);
  455. }
  456. static struct lock_torture_ops rtmutex_lock_ops = {
  457. .writelock = torture_rtmutex_lock,
  458. .write_delay = torture_rtmutex_delay,
  459. .task_boost = torture_rtmutex_boost,
  460. .writeunlock = torture_rtmutex_unlock,
  461. .readlock = NULL,
  462. .read_delay = NULL,
  463. .readunlock = NULL,
  464. .name = "rtmutex_lock"
  465. };
  466. #endif
  467. static DECLARE_RWSEM(torture_rwsem);
  468. static int torture_rwsem_down_write(int tid __maybe_unused)
  469. __acquires(torture_rwsem)
  470. {
  471. down_write(&torture_rwsem);
  472. return 0;
  473. }
  474. static void torture_rwsem_write_delay(struct torture_random_state *trsp)
  475. {
  476. const unsigned long longdelay_ms = 100;
  477. /* We want a long delay occasionally to force massive contention. */
  478. if (!(torture_random(trsp) %
  479. (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
  480. mdelay(longdelay_ms * 10);
  481. else
  482. mdelay(longdelay_ms / 10);
  483. if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
  484. torture_preempt_schedule(); /* Allow test to be preempted. */
  485. }
  486. static void torture_rwsem_up_write(int tid __maybe_unused)
  487. __releases(torture_rwsem)
  488. {
  489. up_write(&torture_rwsem);
  490. }
  491. static int torture_rwsem_down_read(int tid __maybe_unused)
  492. __acquires(torture_rwsem)
  493. {
  494. down_read(&torture_rwsem);
  495. return 0;
  496. }
  497. static void torture_rwsem_read_delay(struct torture_random_state *trsp)
  498. {
  499. const unsigned long longdelay_ms = 100;
  500. /* We want a long delay occasionally to force massive contention. */
  501. if (!(torture_random(trsp) %
  502. (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
  503. mdelay(longdelay_ms * 2);
  504. else
  505. mdelay(longdelay_ms / 2);
  506. if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
  507. torture_preempt_schedule(); /* Allow test to be preempted. */
  508. }
  509. static void torture_rwsem_up_read(int tid __maybe_unused)
  510. __releases(torture_rwsem)
  511. {
  512. up_read(&torture_rwsem);
  513. }
  514. static struct lock_torture_ops rwsem_lock_ops = {
  515. .writelock = torture_rwsem_down_write,
  516. .write_delay = torture_rwsem_write_delay,
  517. .task_boost = torture_boost_dummy,
  518. .writeunlock = torture_rwsem_up_write,
  519. .readlock = torture_rwsem_down_read,
  520. .read_delay = torture_rwsem_read_delay,
  521. .readunlock = torture_rwsem_up_read,
  522. .name = "rwsem_lock"
  523. };
  524. #include <linux/percpu-rwsem.h>
  525. static struct percpu_rw_semaphore pcpu_rwsem;
  526. static void torture_percpu_rwsem_init(void)
  527. {
  528. BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
  529. }
  530. static void torture_percpu_rwsem_exit(void)
  531. {
  532. percpu_free_rwsem(&pcpu_rwsem);
  533. }
  534. static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
  535. __acquires(pcpu_rwsem)
  536. {
  537. percpu_down_write(&pcpu_rwsem);
  538. return 0;
  539. }
  540. static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
  541. __releases(pcpu_rwsem)
  542. {
  543. percpu_up_write(&pcpu_rwsem);
  544. }
  545. static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
  546. __acquires(pcpu_rwsem)
  547. {
  548. percpu_down_read(&pcpu_rwsem);
  549. return 0;
  550. }
  551. static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
  552. __releases(pcpu_rwsem)
  553. {
  554. percpu_up_read(&pcpu_rwsem);
  555. }
  556. static struct lock_torture_ops percpu_rwsem_lock_ops = {
  557. .init = torture_percpu_rwsem_init,
  558. .exit = torture_percpu_rwsem_exit,
  559. .writelock = torture_percpu_rwsem_down_write,
  560. .write_delay = torture_rwsem_write_delay,
  561. .task_boost = torture_boost_dummy,
  562. .writeunlock = torture_percpu_rwsem_up_write,
  563. .readlock = torture_percpu_rwsem_down_read,
  564. .read_delay = torture_rwsem_read_delay,
  565. .readunlock = torture_percpu_rwsem_up_read,
  566. .name = "percpu_rwsem_lock"
  567. };
  568. /*
  569. * Lock torture writer kthread. Repeatedly acquires and releases
  570. * the lock, checking for duplicate acquisitions.
  571. */
  572. static int lock_torture_writer(void *arg)
  573. {
  574. struct lock_stress_stats *lwsp = arg;
  575. int tid = lwsp - cxt.lwsa;
  576. DEFINE_TORTURE_RANDOM(rand);
  577. VERBOSE_TOROUT_STRING("lock_torture_writer task started");
  578. set_user_nice(current, MAX_NICE);
  579. do {
  580. if ((torture_random(&rand) & 0xfffff) == 0)
  581. schedule_timeout_uninterruptible(1);
  582. cxt.cur_ops->task_boost(&rand);
  583. cxt.cur_ops->writelock(tid);
  584. if (WARN_ON_ONCE(lock_is_write_held))
  585. lwsp->n_lock_fail++;
  586. lock_is_write_held = true;
  587. if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
  588. lwsp->n_lock_fail++; /* rare, but... */
  589. lwsp->n_lock_acquired++;
  590. cxt.cur_ops->write_delay(&rand);
  591. lock_is_write_held = false;
  592. WRITE_ONCE(last_lock_release, jiffies);
  593. cxt.cur_ops->writeunlock(tid);
  594. stutter_wait("lock_torture_writer");
  595. } while (!torture_must_stop());
  596. cxt.cur_ops->task_boost(NULL); /* reset prio */
  597. torture_kthread_stopping("lock_torture_writer");
  598. return 0;
  599. }
  600. /*
  601. * Lock torture reader kthread. Repeatedly acquires and releases
  602. * the reader lock.
  603. */
  604. static int lock_torture_reader(void *arg)
  605. {
  606. struct lock_stress_stats *lrsp = arg;
  607. int tid = lrsp - cxt.lrsa;
  608. DEFINE_TORTURE_RANDOM(rand);
  609. VERBOSE_TOROUT_STRING("lock_torture_reader task started");
  610. set_user_nice(current, MAX_NICE);
  611. do {
  612. if ((torture_random(&rand) & 0xfffff) == 0)
  613. schedule_timeout_uninterruptible(1);
  614. cxt.cur_ops->readlock(tid);
  615. atomic_inc(&lock_is_read_held);
  616. if (WARN_ON_ONCE(lock_is_write_held))
  617. lrsp->n_lock_fail++; /* rare, but... */
  618. lrsp->n_lock_acquired++;
  619. cxt.cur_ops->read_delay(&rand);
  620. atomic_dec(&lock_is_read_held);
  621. cxt.cur_ops->readunlock(tid);
  622. stutter_wait("lock_torture_reader");
  623. } while (!torture_must_stop());
  624. torture_kthread_stopping("lock_torture_reader");
  625. return 0;
  626. }
  627. /*
  628. * Create an lock-torture-statistics message in the specified buffer.
  629. */
  630. static void __torture_print_stats(char *page,
  631. struct lock_stress_stats *statp, bool write)
  632. {
  633. long cur;
  634. bool fail = false;
  635. int i, n_stress;
  636. long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
  637. long long sum = 0;
  638. n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
  639. for (i = 0; i < n_stress; i++) {
  640. if (data_race(statp[i].n_lock_fail))
  641. fail = true;
  642. cur = data_race(statp[i].n_lock_acquired);
  643. sum += cur;
  644. if (max < cur)
  645. max = cur;
  646. if (min > cur)
  647. min = cur;
  648. }
  649. page += sprintf(page,
  650. "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
  651. write ? "Writes" : "Reads ",
  652. sum, max, min,
  653. !onoff_interval && max / 2 > min ? "???" : "",
  654. fail, fail ? "!!!" : "");
  655. if (fail)
  656. atomic_inc(&cxt.n_lock_torture_errors);
  657. }
  658. /*
  659. * Print torture statistics. Caller must ensure that there is only one
  660. * call to this function at a given time!!! This is normally accomplished
  661. * by relying on the module system to only have one copy of the module
  662. * loaded, and then by giving the lock_torture_stats kthread full control
  663. * (or the init/cleanup functions when lock_torture_stats thread is not
  664. * running).
  665. */
  666. static void lock_torture_stats_print(void)
  667. {
  668. int size = cxt.nrealwriters_stress * 200 + 8192;
  669. char *buf;
  670. if (cxt.cur_ops->readlock)
  671. size += cxt.nrealreaders_stress * 200 + 8192;
  672. buf = kmalloc(size, GFP_KERNEL);
  673. if (!buf) {
  674. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  675. size);
  676. return;
  677. }
  678. __torture_print_stats(buf, cxt.lwsa, true);
  679. pr_alert("%s", buf);
  680. kfree(buf);
  681. if (cxt.cur_ops->readlock) {
  682. buf = kmalloc(size, GFP_KERNEL);
  683. if (!buf) {
  684. pr_err("lock_torture_stats_print: Out of memory, need: %d",
  685. size);
  686. return;
  687. }
  688. __torture_print_stats(buf, cxt.lrsa, false);
  689. pr_alert("%s", buf);
  690. kfree(buf);
  691. }
  692. }
  693. /*
  694. * Periodically prints torture statistics, if periodic statistics printing
  695. * was specified via the stat_interval module parameter.
  696. *
  697. * No need to worry about fullstop here, since this one doesn't reference
  698. * volatile state or register callbacks.
  699. */
  700. static int lock_torture_stats(void *arg)
  701. {
  702. VERBOSE_TOROUT_STRING("lock_torture_stats task started");
  703. do {
  704. schedule_timeout_interruptible(stat_interval * HZ);
  705. lock_torture_stats_print();
  706. torture_shutdown_absorb("lock_torture_stats");
  707. } while (!torture_must_stop());
  708. torture_kthread_stopping("lock_torture_stats");
  709. return 0;
  710. }
  711. static inline void
  712. lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
  713. const char *tag)
  714. {
  715. pr_alert("%s" TORTURE_FLAG
  716. "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
  717. torture_type, tag, cxt.debug_lock ? " [debug]": "",
  718. cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
  719. verbose, shuffle_interval, stutter, shutdown_secs,
  720. onoff_interval, onoff_holdoff);
  721. }
  722. static void lock_torture_cleanup(void)
  723. {
  724. int i;
  725. if (torture_cleanup_begin())
  726. return;
  727. /*
  728. * Indicates early cleanup, meaning that the test has not run,
  729. * such as when passing bogus args when loading the module.
  730. * However cxt->cur_ops.init() may have been invoked, so beside
  731. * perform the underlying torture-specific cleanups, cur_ops.exit()
  732. * will be invoked if needed.
  733. */
  734. if (!cxt.lwsa && !cxt.lrsa)
  735. goto end;
  736. if (writer_tasks) {
  737. for (i = 0; i < cxt.nrealwriters_stress; i++)
  738. torture_stop_kthread(lock_torture_writer,
  739. writer_tasks[i]);
  740. kfree(writer_tasks);
  741. writer_tasks = NULL;
  742. }
  743. if (reader_tasks) {
  744. for (i = 0; i < cxt.nrealreaders_stress; i++)
  745. torture_stop_kthread(lock_torture_reader,
  746. reader_tasks[i]);
  747. kfree(reader_tasks);
  748. reader_tasks = NULL;
  749. }
  750. torture_stop_kthread(lock_torture_stats, stats_task);
  751. lock_torture_stats_print(); /* -After- the stats thread is stopped! */
  752. if (atomic_read(&cxt.n_lock_torture_errors))
  753. lock_torture_print_module_parms(cxt.cur_ops,
  754. "End of test: FAILURE");
  755. else if (torture_onoff_failures())
  756. lock_torture_print_module_parms(cxt.cur_ops,
  757. "End of test: LOCK_HOTPLUG");
  758. else
  759. lock_torture_print_module_parms(cxt.cur_ops,
  760. "End of test: SUCCESS");
  761. kfree(cxt.lwsa);
  762. cxt.lwsa = NULL;
  763. kfree(cxt.lrsa);
  764. cxt.lrsa = NULL;
  765. end:
  766. if (cxt.init_called) {
  767. if (cxt.cur_ops->exit)
  768. cxt.cur_ops->exit();
  769. cxt.init_called = false;
  770. }
  771. torture_cleanup_end();
  772. }
  773. static int __init lock_torture_init(void)
  774. {
  775. int i, j;
  776. int firsterr = 0;
  777. static struct lock_torture_ops *torture_ops[] = {
  778. &lock_busted_ops,
  779. &spin_lock_ops, &spin_lock_irq_ops,
  780. &rw_lock_ops, &rw_lock_irq_ops,
  781. &mutex_lock_ops,
  782. &ww_mutex_lock_ops,
  783. #ifdef CONFIG_RT_MUTEXES
  784. &rtmutex_lock_ops,
  785. #endif
  786. &rwsem_lock_ops,
  787. &percpu_rwsem_lock_ops,
  788. };
  789. if (!torture_init_begin(torture_type, verbose))
  790. return -EBUSY;
  791. /* Process args and tell the world that the torturer is on the job. */
  792. for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
  793. cxt.cur_ops = torture_ops[i];
  794. if (strcmp(torture_type, cxt.cur_ops->name) == 0)
  795. break;
  796. }
  797. if (i == ARRAY_SIZE(torture_ops)) {
  798. pr_alert("lock-torture: invalid torture type: \"%s\"\n",
  799. torture_type);
  800. pr_alert("lock-torture types:");
  801. for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
  802. pr_alert(" %s", torture_ops[i]->name);
  803. pr_alert("\n");
  804. firsterr = -EINVAL;
  805. goto unwind;
  806. }
  807. if (nwriters_stress == 0 &&
  808. (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
  809. pr_alert("lock-torture: must run at least one locking thread\n");
  810. firsterr = -EINVAL;
  811. goto unwind;
  812. }
  813. if (nwriters_stress >= 0)
  814. cxt.nrealwriters_stress = nwriters_stress;
  815. else
  816. cxt.nrealwriters_stress = 2 * num_online_cpus();
  817. if (cxt.cur_ops->init) {
  818. cxt.cur_ops->init();
  819. cxt.init_called = true;
  820. }
  821. #ifdef CONFIG_DEBUG_MUTEXES
  822. if (str_has_prefix(torture_type, "mutex"))
  823. cxt.debug_lock = true;
  824. #endif
  825. #ifdef CONFIG_DEBUG_RT_MUTEXES
  826. if (str_has_prefix(torture_type, "rtmutex"))
  827. cxt.debug_lock = true;
  828. #endif
  829. #ifdef CONFIG_DEBUG_SPINLOCK
  830. if ((str_has_prefix(torture_type, "spin")) ||
  831. (str_has_prefix(torture_type, "rw_lock")))
  832. cxt.debug_lock = true;
  833. #endif
  834. /* Initialize the statistics so that each run gets its own numbers. */
  835. if (nwriters_stress) {
  836. lock_is_write_held = false;
  837. cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
  838. sizeof(*cxt.lwsa),
  839. GFP_KERNEL);
  840. if (cxt.lwsa == NULL) {
  841. VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
  842. firsterr = -ENOMEM;
  843. goto unwind;
  844. }
  845. for (i = 0; i < cxt.nrealwriters_stress; i++) {
  846. cxt.lwsa[i].n_lock_fail = 0;
  847. cxt.lwsa[i].n_lock_acquired = 0;
  848. }
  849. }
  850. if (cxt.cur_ops->readlock) {
  851. if (nreaders_stress >= 0)
  852. cxt.nrealreaders_stress = nreaders_stress;
  853. else {
  854. /*
  855. * By default distribute evenly the number of
  856. * readers and writers. We still run the same number
  857. * of threads as the writer-only locks default.
  858. */
  859. if (nwriters_stress < 0) /* user doesn't care */
  860. cxt.nrealwriters_stress = num_online_cpus();
  861. cxt.nrealreaders_stress = cxt.nrealwriters_stress;
  862. }
  863. if (nreaders_stress) {
  864. cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
  865. sizeof(*cxt.lrsa),
  866. GFP_KERNEL);
  867. if (cxt.lrsa == NULL) {
  868. VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
  869. firsterr = -ENOMEM;
  870. kfree(cxt.lwsa);
  871. cxt.lwsa = NULL;
  872. goto unwind;
  873. }
  874. for (i = 0; i < cxt.nrealreaders_stress; i++) {
  875. cxt.lrsa[i].n_lock_fail = 0;
  876. cxt.lrsa[i].n_lock_acquired = 0;
  877. }
  878. }
  879. }
  880. lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
  881. /* Prepare torture context. */
  882. if (onoff_interval > 0) {
  883. firsterr = torture_onoff_init(onoff_holdoff * HZ,
  884. onoff_interval * HZ, NULL);
  885. if (torture_init_error(firsterr))
  886. goto unwind;
  887. }
  888. if (shuffle_interval > 0) {
  889. firsterr = torture_shuffle_init(shuffle_interval);
  890. if (torture_init_error(firsterr))
  891. goto unwind;
  892. }
  893. if (shutdown_secs > 0) {
  894. firsterr = torture_shutdown_init(shutdown_secs,
  895. lock_torture_cleanup);
  896. if (torture_init_error(firsterr))
  897. goto unwind;
  898. }
  899. if (stutter > 0) {
  900. firsterr = torture_stutter_init(stutter, stutter);
  901. if (torture_init_error(firsterr))
  902. goto unwind;
  903. }
  904. if (nwriters_stress) {
  905. writer_tasks = kcalloc(cxt.nrealwriters_stress,
  906. sizeof(writer_tasks[0]),
  907. GFP_KERNEL);
  908. if (writer_tasks == NULL) {
  909. TOROUT_ERRSTRING("writer_tasks: Out of memory");
  910. firsterr = -ENOMEM;
  911. goto unwind;
  912. }
  913. }
  914. if (cxt.cur_ops->readlock) {
  915. reader_tasks = kcalloc(cxt.nrealreaders_stress,
  916. sizeof(reader_tasks[0]),
  917. GFP_KERNEL);
  918. if (reader_tasks == NULL) {
  919. TOROUT_ERRSTRING("reader_tasks: Out of memory");
  920. kfree(writer_tasks);
  921. writer_tasks = NULL;
  922. firsterr = -ENOMEM;
  923. goto unwind;
  924. }
  925. }
  926. /*
  927. * Create the kthreads and start torturing (oh, those poor little locks).
  928. *
  929. * TODO: Note that we interleave writers with readers, giving writers a
  930. * slight advantage, by creating its kthread first. This can be modified
  931. * for very specific needs, or even let the user choose the policy, if
  932. * ever wanted.
  933. */
  934. for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
  935. j < cxt.nrealreaders_stress; i++, j++) {
  936. if (i >= cxt.nrealwriters_stress)
  937. goto create_reader;
  938. /* Create writer. */
  939. firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
  940. writer_tasks[i]);
  941. if (torture_init_error(firsterr))
  942. goto unwind;
  943. create_reader:
  944. if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
  945. continue;
  946. /* Create reader. */
  947. firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
  948. reader_tasks[j]);
  949. if (torture_init_error(firsterr))
  950. goto unwind;
  951. }
  952. if (stat_interval > 0) {
  953. firsterr = torture_create_kthread(lock_torture_stats, NULL,
  954. stats_task);
  955. if (torture_init_error(firsterr))
  956. goto unwind;
  957. }
  958. torture_init_end();
  959. return 0;
  960. unwind:
  961. torture_init_end();
  962. lock_torture_cleanup();
  963. if (shutdown_secs) {
  964. WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
  965. kernel_power_off();
  966. }
  967. return firsterr;
  968. }
  969. module_init(lock_torture_init);
  970. module_exit(lock_torture_cleanup);