ref_tracker.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/export.h>
  3. #include <linux/ref_tracker.h>
  4. #include <linux/slab.h>
  5. #include <linux/stacktrace.h>
  6. #include <linux/stackdepot.h>
  7. #define REF_TRACKER_STACK_ENTRIES 16
  8. struct ref_tracker {
  9. struct list_head head; /* anchor into dir->list or dir->quarantine */
  10. bool dead;
  11. depot_stack_handle_t alloc_stack_handle;
  12. depot_stack_handle_t free_stack_handle;
  13. };
  14. void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
  15. {
  16. struct ref_tracker *tracker, *n;
  17. unsigned long flags;
  18. bool leak = false;
  19. dir->dead = true;
  20. spin_lock_irqsave(&dir->lock, flags);
  21. list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
  22. list_del(&tracker->head);
  23. kfree(tracker);
  24. dir->quarantine_avail++;
  25. }
  26. list_for_each_entry_safe(tracker, n, &dir->list, head) {
  27. pr_err("leaked reference.\n");
  28. if (tracker->alloc_stack_handle)
  29. stack_depot_print(tracker->alloc_stack_handle);
  30. leak = true;
  31. list_del(&tracker->head);
  32. kfree(tracker);
  33. }
  34. spin_unlock_irqrestore(&dir->lock, flags);
  35. WARN_ON_ONCE(leak);
  36. WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
  37. WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
  38. }
  39. EXPORT_SYMBOL(ref_tracker_dir_exit);
  40. void ref_tracker_dir_print(struct ref_tracker_dir *dir,
  41. unsigned int display_limit)
  42. {
  43. struct ref_tracker *tracker;
  44. unsigned long flags;
  45. unsigned int i = 0;
  46. spin_lock_irqsave(&dir->lock, flags);
  47. list_for_each_entry(tracker, &dir->list, head) {
  48. if (i < display_limit) {
  49. pr_err("leaked reference.\n");
  50. if (tracker->alloc_stack_handle)
  51. stack_depot_print(tracker->alloc_stack_handle);
  52. i++;
  53. } else {
  54. break;
  55. }
  56. }
  57. spin_unlock_irqrestore(&dir->lock, flags);
  58. }
  59. EXPORT_SYMBOL(ref_tracker_dir_print);
  60. int ref_tracker_alloc(struct ref_tracker_dir *dir,
  61. struct ref_tracker **trackerp,
  62. gfp_t gfp)
  63. {
  64. unsigned long entries[REF_TRACKER_STACK_ENTRIES];
  65. struct ref_tracker *tracker;
  66. unsigned int nr_entries;
  67. gfp_t gfp_mask = gfp;
  68. unsigned long flags;
  69. WARN_ON_ONCE(dir->dead);
  70. if (!trackerp) {
  71. refcount_inc(&dir->no_tracker);
  72. return 0;
  73. }
  74. if (gfp & __GFP_DIRECT_RECLAIM)
  75. gfp_mask |= __GFP_NOFAIL;
  76. *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
  77. if (unlikely(!tracker)) {
  78. pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
  79. refcount_inc(&dir->untracked);
  80. return -ENOMEM;
  81. }
  82. nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
  83. tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
  84. spin_lock_irqsave(&dir->lock, flags);
  85. list_add(&tracker->head, &dir->list);
  86. spin_unlock_irqrestore(&dir->lock, flags);
  87. return 0;
  88. }
  89. EXPORT_SYMBOL_GPL(ref_tracker_alloc);
  90. int ref_tracker_free(struct ref_tracker_dir *dir,
  91. struct ref_tracker **trackerp)
  92. {
  93. unsigned long entries[REF_TRACKER_STACK_ENTRIES];
  94. depot_stack_handle_t stack_handle;
  95. struct ref_tracker *tracker;
  96. unsigned int nr_entries;
  97. unsigned long flags;
  98. WARN_ON_ONCE(dir->dead);
  99. if (!trackerp) {
  100. refcount_dec(&dir->no_tracker);
  101. return 0;
  102. }
  103. tracker = *trackerp;
  104. if (!tracker) {
  105. refcount_dec(&dir->untracked);
  106. return -EEXIST;
  107. }
  108. nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
  109. stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
  110. spin_lock_irqsave(&dir->lock, flags);
  111. if (tracker->dead) {
  112. pr_err("reference already released.\n");
  113. if (tracker->alloc_stack_handle) {
  114. pr_err("allocated in:\n");
  115. stack_depot_print(tracker->alloc_stack_handle);
  116. }
  117. if (tracker->free_stack_handle) {
  118. pr_err("freed in:\n");
  119. stack_depot_print(tracker->free_stack_handle);
  120. }
  121. spin_unlock_irqrestore(&dir->lock, flags);
  122. WARN_ON_ONCE(1);
  123. return -EINVAL;
  124. }
  125. tracker->dead = true;
  126. tracker->free_stack_handle = stack_handle;
  127. list_move_tail(&tracker->head, &dir->quarantine);
  128. if (!dir->quarantine_avail) {
  129. tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
  130. list_del(&tracker->head);
  131. } else {
  132. dir->quarantine_avail--;
  133. tracker = NULL;
  134. }
  135. spin_unlock_irqrestore(&dir->lock, flags);
  136. kfree(tracker);
  137. return 0;
  138. }
  139. EXPORT_SYMBOL_GPL(ref_tracker_free);