notification.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. /*
  2. * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /*
  19. * Basic idea behind the notification queue: An fsnotify group (like inotify)
  20. * sends the userspace notification about events asynchronously some time after
  21. * the event happened. When inotify gets an event it will need to add that
  22. * event to the group notify queue. Since a single event might need to be on
  23. * multiple group's notification queues we can't add the event directly to each
  24. * queue and instead add a small "event_holder" to each queue. This event_holder
  25. * has a pointer back to the original event. Since the majority of events are
  26. * going to end up on one, and only one, notification queue we embed one
  27. * event_holder into each event. This means we have a single allocation instead
  28. * of always needing two. If the embedded event_holder is already in use by
  29. * another group a new event_holder (from fsnotify_event_holder_cachep) will be
  30. * allocated and used.
  31. */
  32. #include <linux/fs.h>
  33. #include <linux/init.h>
  34. #include <linux/kernel.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/mount.h>
  38. #include <linux/mutex.h>
  39. #include <linux/namei.h>
  40. #include <linux/path.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/atomic.h>
  44. #include <linux/fsnotify_backend.h>
  45. #include "fsnotify.h"
  46. static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
  47. /**
  48. * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
  49. * Called from fsnotify_move, which is inlined into filesystem modules.
  50. */
  51. u32 fsnotify_get_cookie(void)
  52. {
  53. return atomic_inc_return(&fsnotify_sync_cookie);
  54. }
  55. EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
  56. /* return true if the notify queue is empty, false otherwise */
  57. bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
  58. {
  59. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  60. return list_empty(&group->notification_list) ? true : false;
  61. }
  62. void fsnotify_destroy_event(struct fsnotify_group *group,
  63. struct fsnotify_event *event)
  64. {
  65. /* Overflow events are per-group and we don't want to free them */
  66. if (!event || event->mask == FS_Q_OVERFLOW)
  67. return;
  68. group->ops->free_event(event);
  69. }
  70. /*
  71. * Add an event to the group notification queue. The group can later pull this
  72. * event off the queue to deal with. The function returns 0 if the event was
  73. * added to the queue, 1 if the event was merged with some other queued event,
  74. * 2 if the queue of events has overflown.
  75. */
  76. int fsnotify_add_notify_event(struct fsnotify_group *group,
  77. struct fsnotify_event *event,
  78. int (*merge)(struct list_head *,
  79. struct fsnotify_event *))
  80. {
  81. int ret = 0;
  82. struct list_head *list = &group->notification_list;
  83. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  84. mutex_lock(&group->notification_mutex);
  85. if (group->q_len >= group->max_events) {
  86. ret = 2;
  87. /* Queue overflow event only if it isn't already queued */
  88. if (!list_empty(&group->overflow_event->list)) {
  89. mutex_unlock(&group->notification_mutex);
  90. return ret;
  91. }
  92. event = group->overflow_event;
  93. goto queue;
  94. }
  95. if (!list_empty(list) && merge) {
  96. ret = merge(list, event);
  97. if (ret) {
  98. mutex_unlock(&group->notification_mutex);
  99. return ret;
  100. }
  101. }
  102. queue:
  103. group->q_len++;
  104. list_add_tail(&event->list, list);
  105. mutex_unlock(&group->notification_mutex);
  106. wake_up(&group->notification_waitq);
  107. kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
  108. return ret;
  109. }
  110. /*
  111. * Remove and return the first event from the notification list. It is the
  112. * responsibility of the caller to destroy the obtained event
  113. */
  114. struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)
  115. {
  116. struct fsnotify_event *event;
  117. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  118. pr_debug("%s: group=%p\n", __func__, group);
  119. event = list_first_entry(&group->notification_list,
  120. struct fsnotify_event, list);
  121. /*
  122. * We need to init list head for the case of overflow event so that
  123. * check in fsnotify_add_notify_events() works
  124. */
  125. list_del_init(&event->list);
  126. group->q_len--;
  127. return event;
  128. }
  129. /*
  130. * This will not remove the event, that must be done with fsnotify_remove_notify_event()
  131. */
  132. struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)
  133. {
  134. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  135. return list_first_entry(&group->notification_list,
  136. struct fsnotify_event, list);
  137. }
  138. /*
  139. * Called when a group is being torn down to clean up any outstanding
  140. * event notifications.
  141. */
  142. void fsnotify_flush_notify(struct fsnotify_group *group)
  143. {
  144. struct fsnotify_event *event;
  145. mutex_lock(&group->notification_mutex);
  146. while (!fsnotify_notify_queue_is_empty(group)) {
  147. event = fsnotify_remove_notify_event(group);
  148. fsnotify_destroy_event(group, event);
  149. }
  150. mutex_unlock(&group->notification_mutex);
  151. }
  152. /*
  153. * fsnotify_create_event - Allocate a new event which will be sent to each
  154. * group's handle_event function if the group was interested in this
  155. * particular event.
  156. *
  157. * @inode the inode which is supposed to receive the event (sometimes a
  158. * parent of the inode to which the event happened.
  159. * @mask what actually happened.
  160. * @data pointer to the object which was actually affected
  161. * @data_type flag indication if the data is a file, path, inode, nothing...
  162. * @name the filename, if available
  163. */
  164. void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode,
  165. u32 mask)
  166. {
  167. INIT_LIST_HEAD(&event->list);
  168. event->inode = inode;
  169. event->mask = mask;
  170. }