notification.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]>
  4. */
  5. /*
  6. * Basic idea behind the notification queue: An fsnotify group (like inotify)
  7. * sends the userspace notification about events asynchronously some time after
  8. * the event happened. When inotify gets an event it will need to add that
  9. * event to the group notify queue. Since a single event might need to be on
  10. * multiple group's notification queues we can't add the event directly to each
  11. * queue and instead add a small "event_holder" to each queue. This event_holder
  12. * has a pointer back to the original event. Since the majority of events are
  13. * going to end up on one, and only one, notification queue we embed one
  14. * event_holder into each event. This means we have a single allocation instead
  15. * of always needing two. If the embedded event_holder is already in use by
  16. * another group a new event_holder (from fsnotify_event_holder_cachep) will be
  17. * allocated and used.
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/mount.h>
  25. #include <linux/mutex.h>
  26. #include <linux/namei.h>
  27. #include <linux/path.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/atomic.h>
  31. #include <linux/fsnotify_backend.h>
  32. #include "fsnotify.h"
  33. static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
  34. /**
  35. * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
  36. * Called from fsnotify_move, which is inlined into filesystem modules.
  37. */
  38. u32 fsnotify_get_cookie(void)
  39. {
  40. return atomic_inc_return(&fsnotify_sync_cookie);
  41. }
  42. EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
  43. void fsnotify_destroy_event(struct fsnotify_group *group,
  44. struct fsnotify_event *event)
  45. {
  46. /* Overflow events are per-group and we don't want to free them */
  47. if (!event || event == group->overflow_event)
  48. return;
  49. /*
  50. * If the event is still queued, we have a problem... Do an unreliable
  51. * lockless check first to avoid locking in the common case. The
  52. * locking may be necessary for permission events which got removed
  53. * from the list by a different CPU than the one freeing the event.
  54. */
  55. if (!list_empty(&event->list)) {
  56. spin_lock(&group->notification_lock);
  57. WARN_ON(!list_empty(&event->list));
  58. spin_unlock(&group->notification_lock);
  59. }
  60. group->ops->free_event(group, event);
  61. }
  62. /*
  63. * Try to add an event to the notification queue.
  64. * The group can later pull this event off the queue to deal with.
  65. * The group can use the @merge hook to merge the event with a queued event.
  66. * The group can use the @insert hook to insert the event into hash table.
  67. * The function returns:
  68. * 0 if the event was added to a queue
  69. * 1 if the event was merged with some other queued event
  70. * 2 if the event was not queued - either the queue of events has overflown
  71. * or the group is shutting down.
  72. */
  73. int fsnotify_insert_event(struct fsnotify_group *group,
  74. struct fsnotify_event *event,
  75. int (*merge)(struct fsnotify_group *,
  76. struct fsnotify_event *),
  77. void (*insert)(struct fsnotify_group *,
  78. struct fsnotify_event *))
  79. {
  80. int ret = 0;
  81. struct list_head *list = &group->notification_list;
  82. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  83. spin_lock(&group->notification_lock);
  84. if (group->shutdown) {
  85. spin_unlock(&group->notification_lock);
  86. return 2;
  87. }
  88. if (event == group->overflow_event ||
  89. group->q_len >= group->max_events) {
  90. ret = 2;
  91. /* Queue overflow event only if it isn't already queued */
  92. if (!list_empty(&group->overflow_event->list)) {
  93. spin_unlock(&group->notification_lock);
  94. return ret;
  95. }
  96. event = group->overflow_event;
  97. goto queue;
  98. }
  99. if (!list_empty(list) && merge) {
  100. ret = merge(group, event);
  101. if (ret) {
  102. spin_unlock(&group->notification_lock);
  103. return ret;
  104. }
  105. }
  106. queue:
  107. group->q_len++;
  108. list_add_tail(&event->list, list);
  109. if (insert)
  110. insert(group, event);
  111. spin_unlock(&group->notification_lock);
  112. wake_up(&group->notification_waitq);
  113. kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
  114. return ret;
  115. }
  116. void fsnotify_remove_queued_event(struct fsnotify_group *group,
  117. struct fsnotify_event *event)
  118. {
  119. assert_spin_locked(&group->notification_lock);
  120. /*
  121. * We need to init list head for the case of overflow event so that
  122. * check in fsnotify_add_event() works
  123. */
  124. list_del_init(&event->list);
  125. group->q_len--;
  126. }
  127. /*
  128. * Return the first event on the notification list without removing it.
  129. * Returns NULL if the list is empty.
  130. */
  131. struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
  132. {
  133. assert_spin_locked(&group->notification_lock);
  134. if (fsnotify_notify_queue_is_empty(group))
  135. return NULL;
  136. return list_first_entry(&group->notification_list,
  137. struct fsnotify_event, list);
  138. }
  139. /*
  140. * Remove and return the first event from the notification list. It is the
  141. * responsibility of the caller to destroy the obtained event
  142. */
  143. struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
  144. {
  145. struct fsnotify_event *event = fsnotify_peek_first_event(group);
  146. if (!event)
  147. return NULL;
  148. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  149. fsnotify_remove_queued_event(group, event);
  150. return event;
  151. }
  152. /*
  153. * Called when a group is being torn down to clean up any outstanding
  154. * event notifications.
  155. */
  156. void fsnotify_flush_notify(struct fsnotify_group *group)
  157. {
  158. struct fsnotify_event *event;
  159. spin_lock(&group->notification_lock);
  160. while (!fsnotify_notify_queue_is_empty(group)) {
  161. event = fsnotify_remove_first_event(group);
  162. spin_unlock(&group->notification_lock);
  163. fsnotify_destroy_event(group, event);
  164. spin_lock(&group->notification_lock);
  165. }
  166. spin_unlock(&group->notification_lock);
  167. }