fanotify.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. #include <linux/fanotify.h>
  2. #include <linux/fdtable.h>
  3. #include <linux/fsnotify_backend.h>
  4. #include <linux/init.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/kernel.h> /* UINT_MAX */
  7. #include <linux/mount.h>
  8. #include <linux/sched.h>
  9. #include <linux/types.h>
  10. #include <linux/wait.h>
  11. #include "fanotify.h"
  12. static bool should_merge(struct fsnotify_event *old_fsn,
  13. struct fsnotify_event *new_fsn)
  14. {
  15. struct fanotify_event_info *old, *new;
  16. pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
  17. old = FANOTIFY_E(old_fsn);
  18. new = FANOTIFY_E(new_fsn);
  19. if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
  20. old->path.mnt == new->path.mnt &&
  21. old->path.dentry == new->path.dentry)
  22. return true;
  23. return false;
  24. }
  25. /* and the list better be locked by something too! */
  26. static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
  27. {
  28. struct fsnotify_event *test_event;
  29. bool do_merge = false;
  30. pr_debug("%s: list=%p event=%p\n", __func__, list, event);
  31. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  32. /*
  33. * Don't merge a permission event with any other event so that we know
  34. * the event structure we have created in fanotify_handle_event() is the
  35. * one we should check for permission response.
  36. */
  37. if (event->mask & FAN_ALL_PERM_EVENTS)
  38. return 0;
  39. #endif
  40. list_for_each_entry_reverse(test_event, list, list) {
  41. if (should_merge(test_event, event)) {
  42. do_merge = true;
  43. break;
  44. }
  45. }
  46. if (!do_merge)
  47. return 0;
  48. test_event->mask |= event->mask;
  49. return 1;
  50. }
  51. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  52. static int fanotify_get_response(struct fsnotify_group *group,
  53. struct fanotify_perm_event_info *event)
  54. {
  55. int ret;
  56. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  57. wait_event(group->fanotify_data.access_waitq, event->response ||
  58. atomic_read(&group->fanotify_data.bypass_perm));
  59. if (!event->response) { /* bypass_perm set */
  60. /*
  61. * Event was canceled because group is being destroyed. Remove
  62. * it from group's event list because we are responsible for
  63. * freeing the permission event.
  64. */
  65. fsnotify_remove_event(group, &event->fae.fse);
  66. return 0;
  67. }
  68. /* userspace responded, convert to something usable */
  69. switch (event->response) {
  70. case FAN_ALLOW:
  71. ret = 0;
  72. break;
  73. case FAN_DENY:
  74. default:
  75. ret = -EPERM;
  76. }
  77. event->response = 0;
  78. pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
  79. group, event, ret);
  80. return ret;
  81. }
  82. #endif
  83. static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
  84. struct fsnotify_mark *vfsmnt_mark,
  85. u32 event_mask,
  86. void *data, int data_type)
  87. {
  88. __u32 marks_mask, marks_ignored_mask;
  89. struct path *path = data;
  90. pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
  91. " data_type=%d\n", __func__, inode_mark, vfsmnt_mark,
  92. event_mask, data, data_type);
  93. /* if we don't have enough info to send an event to userspace say no */
  94. if (data_type != FSNOTIFY_EVENT_PATH)
  95. return false;
  96. /* sorry, fanotify only gives a damn about files and dirs */
  97. if (!d_is_reg(path->dentry) &&
  98. !d_can_lookup(path->dentry))
  99. return false;
  100. if (inode_mark && vfsmnt_mark) {
  101. marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
  102. marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
  103. } else if (inode_mark) {
  104. /*
  105. * if the event is for a child and this inode doesn't care about
  106. * events on the child, don't send it!
  107. */
  108. if ((event_mask & FS_EVENT_ON_CHILD) &&
  109. !(inode_mark->mask & FS_EVENT_ON_CHILD))
  110. return false;
  111. marks_mask = inode_mark->mask;
  112. marks_ignored_mask = inode_mark->ignored_mask;
  113. } else if (vfsmnt_mark) {
  114. marks_mask = vfsmnt_mark->mask;
  115. marks_ignored_mask = vfsmnt_mark->ignored_mask;
  116. } else {
  117. BUG();
  118. }
  119. if (d_is_dir(path->dentry) &&
  120. !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
  121. return false;
  122. if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
  123. ~marks_ignored_mask)
  124. return true;
  125. return false;
  126. }
  127. struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
  128. struct path *path)
  129. {
  130. struct fanotify_event_info *event;
  131. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  132. if (mask & FAN_ALL_PERM_EVENTS) {
  133. struct fanotify_perm_event_info *pevent;
  134. pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
  135. GFP_KERNEL);
  136. if (!pevent)
  137. return NULL;
  138. event = &pevent->fae;
  139. pevent->response = 0;
  140. goto init;
  141. }
  142. #endif
  143. event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
  144. if (!event)
  145. return NULL;
  146. init: __maybe_unused
  147. fsnotify_init_event(&event->fse, inode, mask);
  148. event->tgid = get_pid(task_tgid(current));
  149. if (path) {
  150. event->path = *path;
  151. path_get(&event->path);
  152. } else {
  153. event->path.mnt = NULL;
  154. event->path.dentry = NULL;
  155. }
  156. return event;
  157. }
  158. static int fanotify_handle_event(struct fsnotify_group *group,
  159. struct inode *inode,
  160. struct fsnotify_mark *inode_mark,
  161. struct fsnotify_mark *fanotify_mark,
  162. u32 mask, void *data, int data_type,
  163. const unsigned char *file_name, u32 cookie)
  164. {
  165. int ret = 0;
  166. struct fanotify_event_info *event;
  167. struct fsnotify_event *fsn_event;
  168. BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
  169. BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
  170. BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
  171. BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
  172. BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
  173. BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
  174. BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
  175. BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
  176. BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
  177. BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
  178. if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data,
  179. data_type))
  180. return 0;
  181. pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
  182. mask);
  183. event = fanotify_alloc_event(inode, mask, data);
  184. if (unlikely(!event))
  185. return -ENOMEM;
  186. fsn_event = &event->fse;
  187. ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
  188. if (ret) {
  189. /* Permission events shouldn't be merged */
  190. BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
  191. /* Our event wasn't used in the end. Free it. */
  192. fsnotify_destroy_event(group, fsn_event);
  193. return 0;
  194. }
  195. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  196. if (mask & FAN_ALL_PERM_EVENTS) {
  197. ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event));
  198. fsnotify_destroy_event(group, fsn_event);
  199. }
  200. #endif
  201. return ret;
  202. }
  203. static void fanotify_free_group_priv(struct fsnotify_group *group)
  204. {
  205. struct user_struct *user;
  206. user = group->fanotify_data.user;
  207. atomic_dec(&user->fanotify_listeners);
  208. free_uid(user);
  209. }
  210. static void fanotify_free_event(struct fsnotify_event *fsn_event)
  211. {
  212. struct fanotify_event_info *event;
  213. event = FANOTIFY_E(fsn_event);
  214. path_put(&event->path);
  215. put_pid(event->tgid);
  216. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  217. if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
  218. kmem_cache_free(fanotify_perm_event_cachep,
  219. FANOTIFY_PE(fsn_event));
  220. return;
  221. }
  222. #endif
  223. kmem_cache_free(fanotify_event_cachep, event);
  224. }
  225. const struct fsnotify_ops fanotify_fsnotify_ops = {
  226. .handle_event = fanotify_handle_event,
  227. .free_group_priv = fanotify_free_group_priv,
  228. .free_event = fanotify_free_event,
  229. };