lockdep_proc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * kernel/lockdep_proc.c
  4. *
  5. * Runtime locking correctness validator
  6. *
  7. * Started by Ingo Molnar:
  8. *
  9. * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]>
  10. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  11. *
  12. * Code for /proc/lockdep and /proc/lockdep_stats:
  13. *
  14. */
  15. #include <linux/export.h>
  16. #include <linux/proc_fs.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/kallsyms.h>
  19. #include <linux/debug_locks.h>
  20. #include <linux/vmalloc.h>
  21. #include <linux/sort.h>
  22. #include <linux/uaccess.h>
  23. #include <asm/div64.h>
  24. #include "lockdep_internals.h"
  25. /*
  26. * Since iteration of lock_classes is done without holding the lockdep lock,
  27. * it is not safe to iterate all_lock_classes list directly as the iteration
  28. * may branch off to free_lock_classes or the zapped list. Iteration is done
  29. * directly on the lock_classes array by checking the lock_classes_in_use
  30. * bitmap and max_lock_class_idx.
  31. */
  32. #define iterate_lock_classes(idx, class) \
  33. for (idx = 0, class = lock_classes; idx <= max_lock_class_idx; \
  34. idx++, class++)
  35. static void *l_next(struct seq_file *m, void *v, loff_t *pos)
  36. {
  37. struct lock_class *class = v;
  38. ++class;
  39. *pos = class - lock_classes;
  40. return (*pos > max_lock_class_idx) ? NULL : class;
  41. }
  42. static void *l_start(struct seq_file *m, loff_t *pos)
  43. {
  44. unsigned long idx = *pos;
  45. if (idx > max_lock_class_idx)
  46. return NULL;
  47. return lock_classes + idx;
  48. }
  49. static void l_stop(struct seq_file *m, void *v)
  50. {
  51. }
  52. static void print_name(struct seq_file *m, struct lock_class *class)
  53. {
  54. char str[KSYM_NAME_LEN];
  55. const char *name = class->name;
  56. if (!name) {
  57. name = __get_key_name(class->key, str);
  58. seq_printf(m, "%s", name);
  59. } else{
  60. seq_printf(m, "%s", name);
  61. if (class->name_version > 1)
  62. seq_printf(m, "#%d", class->name_version);
  63. if (class->subclass)
  64. seq_printf(m, "/%d", class->subclass);
  65. }
  66. }
  67. static int l_show(struct seq_file *m, void *v)
  68. {
  69. struct lock_class *class = v;
  70. struct lock_list *entry;
  71. char usage[LOCK_USAGE_CHARS];
  72. int idx = class - lock_classes;
  73. if (v == lock_classes)
  74. seq_printf(m, "all lock classes:\n");
  75. if (!test_bit(idx, lock_classes_in_use))
  76. return 0;
  77. seq_printf(m, "%p", class->key);
  78. #ifdef CONFIG_DEBUG_LOCKDEP
  79. seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
  80. #endif
  81. if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
  82. seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
  83. seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
  84. get_usage_chars(class, usage);
  85. seq_printf(m, " %s", usage);
  86. }
  87. seq_printf(m, ": ");
  88. print_name(m, class);
  89. seq_puts(m, "\n");
  90. if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
  91. list_for_each_entry(entry, &class->locks_after, entry) {
  92. if (entry->distance == 1) {
  93. seq_printf(m, " -> [%p] ", entry->class->key);
  94. print_name(m, entry->class);
  95. seq_puts(m, "\n");
  96. }
  97. }
  98. seq_puts(m, "\n");
  99. }
  100. return 0;
  101. }
  102. static const struct seq_operations lockdep_ops = {
  103. .start = l_start,
  104. .next = l_next,
  105. .stop = l_stop,
  106. .show = l_show,
  107. };
  108. #ifdef CONFIG_PROVE_LOCKING
  109. static void *lc_start(struct seq_file *m, loff_t *pos)
  110. {
  111. if (*pos < 0)
  112. return NULL;
  113. if (*pos == 0)
  114. return SEQ_START_TOKEN;
  115. return lock_chains + (*pos - 1);
  116. }
  117. static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
  118. {
  119. *pos = lockdep_next_lockchain(*pos - 1) + 1;
  120. return lc_start(m, pos);
  121. }
  122. static void lc_stop(struct seq_file *m, void *v)
  123. {
  124. }
  125. static int lc_show(struct seq_file *m, void *v)
  126. {
  127. struct lock_chain *chain = v;
  128. struct lock_class *class;
  129. int i;
  130. static const char * const irq_strs[] = {
  131. [0] = "0",
  132. [LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq",
  133. [LOCK_CHAIN_SOFTIRQ_CONTEXT] = "softirq",
  134. [LOCK_CHAIN_SOFTIRQ_CONTEXT|
  135. LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq|softirq",
  136. };
  137. if (v == SEQ_START_TOKEN) {
  138. if (!nr_free_chain_hlocks)
  139. seq_printf(m, "(buggered) ");
  140. seq_printf(m, "all lock chains:\n");
  141. return 0;
  142. }
  143. seq_printf(m, "irq_context: %s\n", irq_strs[chain->irq_context]);
  144. for (i = 0; i < chain->depth; i++) {
  145. class = lock_chain_get_class(chain, i);
  146. if (!class->key)
  147. continue;
  148. seq_printf(m, "[%p] ", class->key);
  149. print_name(m, class);
  150. seq_puts(m, "\n");
  151. }
  152. seq_puts(m, "\n");
  153. return 0;
  154. }
  155. static const struct seq_operations lockdep_chains_ops = {
  156. .start = lc_start,
  157. .next = lc_next,
  158. .stop = lc_stop,
  159. .show = lc_show,
  160. };
  161. #endif /* CONFIG_PROVE_LOCKING */
  162. static void lockdep_stats_debug_show(struct seq_file *m)
  163. {
  164. #ifdef CONFIG_DEBUG_LOCKDEP
  165. unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
  166. hi2 = debug_atomic_read(hardirqs_off_events),
  167. hr1 = debug_atomic_read(redundant_hardirqs_on),
  168. hr2 = debug_atomic_read(redundant_hardirqs_off),
  169. si1 = debug_atomic_read(softirqs_on_events),
  170. si2 = debug_atomic_read(softirqs_off_events),
  171. sr1 = debug_atomic_read(redundant_softirqs_on),
  172. sr2 = debug_atomic_read(redundant_softirqs_off);
  173. seq_printf(m, " chain lookup misses: %11llu\n",
  174. debug_atomic_read(chain_lookup_misses));
  175. seq_printf(m, " chain lookup hits: %11llu\n",
  176. debug_atomic_read(chain_lookup_hits));
  177. seq_printf(m, " cyclic checks: %11llu\n",
  178. debug_atomic_read(nr_cyclic_checks));
  179. seq_printf(m, " redundant checks: %11llu\n",
  180. debug_atomic_read(nr_redundant_checks));
  181. seq_printf(m, " redundant links: %11llu\n",
  182. debug_atomic_read(nr_redundant));
  183. seq_printf(m, " find-mask forwards checks: %11llu\n",
  184. debug_atomic_read(nr_find_usage_forwards_checks));
  185. seq_printf(m, " find-mask backwards checks: %11llu\n",
  186. debug_atomic_read(nr_find_usage_backwards_checks));
  187. seq_printf(m, " hardirq on events: %11llu\n", hi1);
  188. seq_printf(m, " hardirq off events: %11llu\n", hi2);
  189. seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
  190. seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
  191. seq_printf(m, " softirq on events: %11llu\n", si1);
  192. seq_printf(m, " softirq off events: %11llu\n", si2);
  193. seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
  194. seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
  195. #endif
  196. }
  197. static int lockdep_stats_show(struct seq_file *m, void *v)
  198. {
  199. unsigned long nr_unused = 0, nr_uncategorized = 0,
  200. nr_irq_safe = 0, nr_irq_unsafe = 0,
  201. nr_softirq_safe = 0, nr_softirq_unsafe = 0,
  202. nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
  203. nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
  204. nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
  205. nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
  206. sum_forward_deps = 0;
  207. #ifdef CONFIG_PROVE_LOCKING
  208. struct lock_class *class;
  209. unsigned long idx;
  210. iterate_lock_classes(idx, class) {
  211. if (!test_bit(idx, lock_classes_in_use))
  212. continue;
  213. if (class->usage_mask == 0)
  214. nr_unused++;
  215. if (class->usage_mask == LOCKF_USED)
  216. nr_uncategorized++;
  217. if (class->usage_mask & LOCKF_USED_IN_IRQ)
  218. nr_irq_safe++;
  219. if (class->usage_mask & LOCKF_ENABLED_IRQ)
  220. nr_irq_unsafe++;
  221. if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
  222. nr_softirq_safe++;
  223. if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
  224. nr_softirq_unsafe++;
  225. if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
  226. nr_hardirq_safe++;
  227. if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
  228. nr_hardirq_unsafe++;
  229. if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
  230. nr_irq_read_safe++;
  231. if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
  232. nr_irq_read_unsafe++;
  233. if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
  234. nr_softirq_read_safe++;
  235. if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
  236. nr_softirq_read_unsafe++;
  237. if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
  238. nr_hardirq_read_safe++;
  239. if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
  240. nr_hardirq_read_unsafe++;
  241. sum_forward_deps += lockdep_count_forward_deps(class);
  242. }
  243. #ifdef CONFIG_DEBUG_LOCKDEP
  244. DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
  245. #endif
  246. #endif
  247. seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
  248. nr_lock_classes, MAX_LOCKDEP_KEYS);
  249. seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
  250. nr_list_entries, MAX_LOCKDEP_ENTRIES);
  251. seq_printf(m, " indirect dependencies: %11lu\n",
  252. sum_forward_deps);
  253. /*
  254. * Total number of dependencies:
  255. *
  256. * All irq-safe locks may nest inside irq-unsafe locks,
  257. * plus all the other known dependencies:
  258. */
  259. seq_printf(m, " all direct dependencies: %11lu\n",
  260. nr_irq_unsafe * nr_irq_safe +
  261. nr_hardirq_unsafe * nr_hardirq_safe +
  262. nr_list_entries);
  263. #ifdef CONFIG_PROVE_LOCKING
  264. seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
  265. lock_chain_count(), MAX_LOCKDEP_CHAINS);
  266. seq_printf(m, " dependency chain hlocks used: %11lu [max: %lu]\n",
  267. MAX_LOCKDEP_CHAIN_HLOCKS -
  268. (nr_free_chain_hlocks + nr_lost_chain_hlocks),
  269. MAX_LOCKDEP_CHAIN_HLOCKS);
  270. seq_printf(m, " dependency chain hlocks lost: %11u\n",
  271. nr_lost_chain_hlocks);
  272. #endif
  273. #ifdef CONFIG_TRACE_IRQFLAGS
  274. seq_printf(m, " in-hardirq chains: %11u\n",
  275. nr_hardirq_chains);
  276. seq_printf(m, " in-softirq chains: %11u\n",
  277. nr_softirq_chains);
  278. #endif
  279. seq_printf(m, " in-process chains: %11u\n",
  280. nr_process_chains);
  281. seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
  282. nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
  283. #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
  284. seq_printf(m, " number of stack traces: %11llu\n",
  285. lockdep_stack_trace_count());
  286. seq_printf(m, " number of stack hash chains: %11llu\n",
  287. lockdep_stack_hash_count());
  288. #endif
  289. seq_printf(m, " combined max dependencies: %11u\n",
  290. (nr_hardirq_chains + 1) *
  291. (nr_softirq_chains + 1) *
  292. (nr_process_chains + 1)
  293. );
  294. seq_printf(m, " hardirq-safe locks: %11lu\n",
  295. nr_hardirq_safe);
  296. seq_printf(m, " hardirq-unsafe locks: %11lu\n",
  297. nr_hardirq_unsafe);
  298. seq_printf(m, " softirq-safe locks: %11lu\n",
  299. nr_softirq_safe);
  300. seq_printf(m, " softirq-unsafe locks: %11lu\n",
  301. nr_softirq_unsafe);
  302. seq_printf(m, " irq-safe locks: %11lu\n",
  303. nr_irq_safe);
  304. seq_printf(m, " irq-unsafe locks: %11lu\n",
  305. nr_irq_unsafe);
  306. seq_printf(m, " hardirq-read-safe locks: %11lu\n",
  307. nr_hardirq_read_safe);
  308. seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
  309. nr_hardirq_read_unsafe);
  310. seq_printf(m, " softirq-read-safe locks: %11lu\n",
  311. nr_softirq_read_safe);
  312. seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
  313. nr_softirq_read_unsafe);
  314. seq_printf(m, " irq-read-safe locks: %11lu\n",
  315. nr_irq_read_safe);
  316. seq_printf(m, " irq-read-unsafe locks: %11lu\n",
  317. nr_irq_read_unsafe);
  318. seq_printf(m, " uncategorized locks: %11lu\n",
  319. nr_uncategorized);
  320. seq_printf(m, " unused locks: %11lu\n",
  321. nr_unused);
  322. seq_printf(m, " max locking depth: %11u\n",
  323. max_lockdep_depth);
  324. #ifdef CONFIG_PROVE_LOCKING
  325. seq_printf(m, " max bfs queue depth: %11u\n",
  326. max_bfs_queue_depth);
  327. #endif
  328. seq_printf(m, " max lock class index: %11lu\n",
  329. max_lock_class_idx);
  330. lockdep_stats_debug_show(m);
  331. seq_printf(m, " debug_locks: %11u\n",
  332. debug_locks);
  333. /*
  334. * Zapped classes and lockdep data buffers reuse statistics.
  335. */
  336. seq_puts(m, "\n");
  337. seq_printf(m, " zapped classes: %11lu\n",
  338. nr_zapped_classes);
  339. #ifdef CONFIG_PROVE_LOCKING
  340. seq_printf(m, " zapped lock chains: %11lu\n",
  341. nr_zapped_lock_chains);
  342. seq_printf(m, " large chain blocks: %11u\n",
  343. nr_large_chain_blocks);
  344. #endif
  345. return 0;
  346. }
  347. #ifdef CONFIG_LOCK_STAT
  348. struct lock_stat_data {
  349. struct lock_class *class;
  350. struct lock_class_stats stats;
  351. };
  352. struct lock_stat_seq {
  353. struct lock_stat_data *iter_end;
  354. struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
  355. };
  356. /*
  357. * sort on absolute number of contentions
  358. */
  359. static int lock_stat_cmp(const void *l, const void *r)
  360. {
  361. const struct lock_stat_data *dl = l, *dr = r;
  362. unsigned long nl, nr;
  363. nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
  364. nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
  365. return nr - nl;
  366. }
  367. static void seq_line(struct seq_file *m, char c, int offset, int length)
  368. {
  369. int i;
  370. for (i = 0; i < offset; i++)
  371. seq_puts(m, " ");
  372. for (i = 0; i < length; i++)
  373. seq_printf(m, "%c", c);
  374. seq_puts(m, "\n");
  375. }
  376. static void snprint_time(char *buf, size_t bufsiz, s64 nr)
  377. {
  378. s64 div;
  379. s32 rem;
  380. nr += 5; /* for display rounding */
  381. div = div_s64_rem(nr, 1000, &rem);
  382. snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
  383. }
  384. static void seq_time(struct seq_file *m, s64 time)
  385. {
  386. char num[15];
  387. snprint_time(num, sizeof(num), time);
  388. seq_printf(m, " %14s", num);
  389. }
  390. static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
  391. {
  392. seq_printf(m, "%14lu", lt->nr);
  393. seq_time(m, lt->min);
  394. seq_time(m, lt->max);
  395. seq_time(m, lt->total);
  396. seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
  397. }
  398. static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
  399. {
  400. const struct lockdep_subclass_key *ckey;
  401. struct lock_class_stats *stats;
  402. struct lock_class *class;
  403. const char *cname;
  404. int i, namelen;
  405. char name[39];
  406. class = data->class;
  407. stats = &data->stats;
  408. namelen = 38;
  409. if (class->name_version > 1)
  410. namelen -= 2; /* XXX truncates versions > 9 */
  411. if (class->subclass)
  412. namelen -= 2;
  413. rcu_read_lock_sched();
  414. cname = rcu_dereference_sched(class->name);
  415. ckey = rcu_dereference_sched(class->key);
  416. if (!cname && !ckey) {
  417. rcu_read_unlock_sched();
  418. return;
  419. } else if (!cname) {
  420. char str[KSYM_NAME_LEN];
  421. const char *key_name;
  422. key_name = __get_key_name(ckey, str);
  423. snprintf(name, namelen, "%s", key_name);
  424. } else {
  425. snprintf(name, namelen, "%s", cname);
  426. }
  427. rcu_read_unlock_sched();
  428. namelen = strlen(name);
  429. if (class->name_version > 1) {
  430. snprintf(name+namelen, 3, "#%d", class->name_version);
  431. namelen += 2;
  432. }
  433. if (class->subclass) {
  434. snprintf(name+namelen, 3, "/%d", class->subclass);
  435. namelen += 2;
  436. }
  437. if (stats->write_holdtime.nr) {
  438. if (stats->read_holdtime.nr)
  439. seq_printf(m, "%38s-W:", name);
  440. else
  441. seq_printf(m, "%40s:", name);
  442. seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
  443. seq_lock_time(m, &stats->write_waittime);
  444. seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
  445. seq_lock_time(m, &stats->write_holdtime);
  446. seq_puts(m, "\n");
  447. }
  448. if (stats->read_holdtime.nr) {
  449. seq_printf(m, "%38s-R:", name);
  450. seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
  451. seq_lock_time(m, &stats->read_waittime);
  452. seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
  453. seq_lock_time(m, &stats->read_holdtime);
  454. seq_puts(m, "\n");
  455. }
  456. if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
  457. return;
  458. if (stats->read_holdtime.nr)
  459. namelen += 2;
  460. for (i = 0; i < LOCKSTAT_POINTS; i++) {
  461. char ip[32];
  462. if (class->contention_point[i] == 0)
  463. break;
  464. if (!i)
  465. seq_line(m, '-', 40-namelen, namelen);
  466. snprintf(ip, sizeof(ip), "[<%p>]",
  467. (void *)class->contention_point[i]);
  468. seq_printf(m, "%40s %14lu %29s %pS\n",
  469. name, stats->contention_point[i],
  470. ip, (void *)class->contention_point[i]);
  471. }
  472. for (i = 0; i < LOCKSTAT_POINTS; i++) {
  473. char ip[32];
  474. if (class->contending_point[i] == 0)
  475. break;
  476. if (!i)
  477. seq_line(m, '-', 40-namelen, namelen);
  478. snprintf(ip, sizeof(ip), "[<%p>]",
  479. (void *)class->contending_point[i]);
  480. seq_printf(m, "%40s %14lu %29s %pS\n",
  481. name, stats->contending_point[i],
  482. ip, (void *)class->contending_point[i]);
  483. }
  484. if (i) {
  485. seq_puts(m, "\n");
  486. seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
  487. seq_puts(m, "\n");
  488. }
  489. }
  490. static void seq_header(struct seq_file *m)
  491. {
  492. seq_puts(m, "lock_stat version 0.4\n");
  493. if (unlikely(!debug_locks))
  494. seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
  495. seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
  496. seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
  497. "%14s %14s\n",
  498. "class name",
  499. "con-bounces",
  500. "contentions",
  501. "waittime-min",
  502. "waittime-max",
  503. "waittime-total",
  504. "waittime-avg",
  505. "acq-bounces",
  506. "acquisitions",
  507. "holdtime-min",
  508. "holdtime-max",
  509. "holdtime-total",
  510. "holdtime-avg");
  511. seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
  512. seq_printf(m, "\n");
  513. }
  514. static void *ls_start(struct seq_file *m, loff_t *pos)
  515. {
  516. struct lock_stat_seq *data = m->private;
  517. struct lock_stat_data *iter;
  518. if (*pos == 0)
  519. return SEQ_START_TOKEN;
  520. iter = data->stats + (*pos - 1);
  521. if (iter >= data->iter_end)
  522. iter = NULL;
  523. return iter;
  524. }
  525. static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
  526. {
  527. (*pos)++;
  528. return ls_start(m, pos);
  529. }
  530. static void ls_stop(struct seq_file *m, void *v)
  531. {
  532. }
  533. static int ls_show(struct seq_file *m, void *v)
  534. {
  535. if (v == SEQ_START_TOKEN)
  536. seq_header(m);
  537. else
  538. seq_stats(m, v);
  539. return 0;
  540. }
  541. static const struct seq_operations lockstat_ops = {
  542. .start = ls_start,
  543. .next = ls_next,
  544. .stop = ls_stop,
  545. .show = ls_show,
  546. };
  547. static int lock_stat_open(struct inode *inode, struct file *file)
  548. {
  549. int res;
  550. struct lock_class *class;
  551. struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
  552. if (!data)
  553. return -ENOMEM;
  554. res = seq_open(file, &lockstat_ops);
  555. if (!res) {
  556. struct lock_stat_data *iter = data->stats;
  557. struct seq_file *m = file->private_data;
  558. unsigned long idx;
  559. iterate_lock_classes(idx, class) {
  560. if (!test_bit(idx, lock_classes_in_use))
  561. continue;
  562. iter->class = class;
  563. iter->stats = lock_stats(class);
  564. iter++;
  565. }
  566. data->iter_end = iter;
  567. sort(data->stats, data->iter_end - data->stats,
  568. sizeof(struct lock_stat_data),
  569. lock_stat_cmp, NULL);
  570. m->private = data;
  571. } else
  572. vfree(data);
  573. return res;
  574. }
  575. static ssize_t lock_stat_write(struct file *file, const char __user *buf,
  576. size_t count, loff_t *ppos)
  577. {
  578. struct lock_class *class;
  579. unsigned long idx;
  580. char c;
  581. if (count) {
  582. if (get_user(c, buf))
  583. return -EFAULT;
  584. if (c != '0')
  585. return count;
  586. iterate_lock_classes(idx, class) {
  587. if (!test_bit(idx, lock_classes_in_use))
  588. continue;
  589. clear_lock_stats(class);
  590. }
  591. }
  592. return count;
  593. }
  594. static int lock_stat_release(struct inode *inode, struct file *file)
  595. {
  596. struct seq_file *seq = file->private_data;
  597. vfree(seq->private);
  598. return seq_release(inode, file);
  599. }
  600. static const struct proc_ops lock_stat_proc_ops = {
  601. .proc_open = lock_stat_open,
  602. .proc_write = lock_stat_write,
  603. .proc_read = seq_read,
  604. .proc_lseek = seq_lseek,
  605. .proc_release = lock_stat_release,
  606. };
  607. #endif /* CONFIG_LOCK_STAT */
  608. static int __init lockdep_proc_init(void)
  609. {
  610. proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
  611. #ifdef CONFIG_PROVE_LOCKING
  612. proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
  613. #endif
  614. proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
  615. #ifdef CONFIG_LOCK_STAT
  616. proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
  617. #endif
  618. return 0;
  619. }
  620. __initcall(lockdep_proc_init);