fanotify.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. #include <linux/fanotify.h>
  2. #include <linux/fdtable.h>
  3. #include <linux/fsnotify_backend.h>
  4. #include <linux/init.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/kernel.h> /* UINT_MAX */
  7. #include <linux/mount.h>
  8. #include <linux/sched.h>
  9. #include <linux/sched/user.h>
  10. #include <linux/types.h>
  11. #include <linux/wait.h>
  12. #include "fanotify.h"
  13. static bool should_merge(struct fsnotify_event *old_fsn,
  14. struct fsnotify_event *new_fsn)
  15. {
  16. struct fanotify_event_info *old, *new;
  17. pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
  18. old = FANOTIFY_E(old_fsn);
  19. new = FANOTIFY_E(new_fsn);
  20. if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
  21. old->path.mnt == new->path.mnt &&
  22. old->path.dentry == new->path.dentry)
  23. return true;
  24. return false;
  25. }
  26. /* and the list better be locked by something too! */
  27. static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
  28. {
  29. struct fsnotify_event *test_event;
  30. pr_debug("%s: list=%p event=%p\n", __func__, list, event);
  31. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  32. /*
  33. * Don't merge a permission event with any other event so that we know
  34. * the event structure we have created in fanotify_handle_event() is the
  35. * one we should check for permission response.
  36. */
  37. if (event->mask & FAN_ALL_PERM_EVENTS)
  38. return 0;
  39. #endif
  40. list_for_each_entry_reverse(test_event, list, list) {
  41. if (should_merge(test_event, event)) {
  42. test_event->mask |= event->mask;
  43. return 1;
  44. }
  45. }
  46. return 0;
  47. }
  48. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  49. static int fanotify_get_response(struct fsnotify_group *group,
  50. struct fanotify_perm_event_info *event,
  51. struct fsnotify_iter_info *iter_info)
  52. {
  53. int ret;
  54. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  55. /*
  56. * fsnotify_prepare_user_wait() fails if we race with mark deletion.
  57. * Just let the operation pass in that case.
  58. */
  59. if (!fsnotify_prepare_user_wait(iter_info)) {
  60. event->response = FAN_ALLOW;
  61. goto out;
  62. }
  63. wait_event(group->fanotify_data.access_waitq, event->response);
  64. fsnotify_finish_user_wait(iter_info);
  65. out:
  66. /* userspace responded, convert to something usable */
  67. switch (event->response) {
  68. case FAN_ALLOW:
  69. ret = 0;
  70. break;
  71. case FAN_DENY:
  72. default:
  73. ret = -EPERM;
  74. }
  75. event->response = 0;
  76. pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
  77. group, event, ret);
  78. return ret;
  79. }
  80. #endif
  81. static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
  82. struct fsnotify_mark *vfsmnt_mark,
  83. u32 event_mask,
  84. const void *data, int data_type)
  85. {
  86. __u32 marks_mask, marks_ignored_mask;
  87. const struct path *path = data;
  88. pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
  89. " data_type=%d\n", __func__, inode_mark, vfsmnt_mark,
  90. event_mask, data, data_type);
  91. /* if we don't have enough info to send an event to userspace say no */
  92. if (data_type != FSNOTIFY_EVENT_PATH)
  93. return false;
  94. /* sorry, fanotify only gives a damn about files and dirs */
  95. if (!d_is_reg(path->dentry) &&
  96. !d_can_lookup(path->dentry))
  97. return false;
  98. if (inode_mark && vfsmnt_mark) {
  99. marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
  100. marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
  101. } else if (inode_mark) {
  102. /*
  103. * if the event is for a child and this inode doesn't care about
  104. * events on the child, don't send it!
  105. */
  106. if ((event_mask & FS_EVENT_ON_CHILD) &&
  107. !(inode_mark->mask & FS_EVENT_ON_CHILD))
  108. return false;
  109. marks_mask = inode_mark->mask;
  110. marks_ignored_mask = inode_mark->ignored_mask;
  111. } else if (vfsmnt_mark) {
  112. marks_mask = vfsmnt_mark->mask;
  113. marks_ignored_mask = vfsmnt_mark->ignored_mask;
  114. } else {
  115. BUG();
  116. }
  117. if (d_is_dir(path->dentry) &&
  118. !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
  119. return false;
  120. if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
  121. ~marks_ignored_mask)
  122. return true;
  123. return false;
  124. }
  125. struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
  126. const struct path *path)
  127. {
  128. struct fanotify_event_info *event;
  129. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  130. if (mask & FAN_ALL_PERM_EVENTS) {
  131. struct fanotify_perm_event_info *pevent;
  132. pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
  133. GFP_KERNEL);
  134. if (!pevent)
  135. return NULL;
  136. event = &pevent->fae;
  137. pevent->response = 0;
  138. goto init;
  139. }
  140. #endif
  141. event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
  142. if (!event)
  143. return NULL;
  144. init: __maybe_unused
  145. fsnotify_init_event(&event->fse, inode, mask);
  146. event->tgid = get_pid(task_tgid(current));
  147. if (path) {
  148. event->path = *path;
  149. path_get(&event->path);
  150. } else {
  151. event->path.mnt = NULL;
  152. event->path.dentry = NULL;
  153. }
  154. return event;
  155. }
  156. static int fanotify_handle_event(struct fsnotify_group *group,
  157. struct inode *inode,
  158. struct fsnotify_mark *inode_mark,
  159. struct fsnotify_mark *fanotify_mark,
  160. u32 mask, const void *data, int data_type,
  161. const unsigned char *file_name, u32 cookie,
  162. struct fsnotify_iter_info *iter_info)
  163. {
  164. int ret = 0;
  165. struct fanotify_event_info *event;
  166. struct fsnotify_event *fsn_event;
  167. BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
  168. BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
  169. BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
  170. BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
  171. BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
  172. BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
  173. BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
  174. BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
  175. BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
  176. BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
  177. if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data,
  178. data_type))
  179. return 0;
  180. pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
  181. mask);
  182. event = fanotify_alloc_event(inode, mask, data);
  183. if (unlikely(!event))
  184. return -ENOMEM;
  185. fsn_event = &event->fse;
  186. ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
  187. if (ret) {
  188. /* Permission events shouldn't be merged */
  189. BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
  190. /* Our event wasn't used in the end. Free it. */
  191. fsnotify_destroy_event(group, fsn_event);
  192. return 0;
  193. }
  194. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  195. if (mask & FAN_ALL_PERM_EVENTS) {
  196. ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
  197. iter_info);
  198. fsnotify_destroy_event(group, fsn_event);
  199. }
  200. #endif
  201. return ret;
  202. }
  203. static void fanotify_free_group_priv(struct fsnotify_group *group)
  204. {
  205. struct user_struct *user;
  206. user = group->fanotify_data.user;
  207. atomic_dec(&user->fanotify_listeners);
  208. free_uid(user);
  209. }
  210. static void fanotify_free_event(struct fsnotify_event *fsn_event)
  211. {
  212. struct fanotify_event_info *event;
  213. event = FANOTIFY_E(fsn_event);
  214. path_put(&event->path);
  215. put_pid(event->tgid);
  216. #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
  217. if (fsn_event->mask & FAN_ALL_PERM_EVENTS) {
  218. kmem_cache_free(fanotify_perm_event_cachep,
  219. FANOTIFY_PE(fsn_event));
  220. return;
  221. }
  222. #endif
  223. kmem_cache_free(fanotify_event_cachep, event);
  224. }
  225. static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
  226. {
  227. kmem_cache_free(fanotify_mark_cache, fsn_mark);
  228. }
  229. const struct fsnotify_ops fanotify_fsnotify_ops = {
  230. .handle_event = fanotify_handle_event,
  231. .free_group_priv = fanotify_free_group_priv,
  232. .free_event = fanotify_free_event,
  233. .free_mark = fanotify_free_mark,
  234. };