fanotify.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/fanotify.h>
  3. #include <linux/fdtable.h>
  4. #include <linux/fsnotify_backend.h>
  5. #include <linux/init.h>
  6. #include <linux/jiffies.h>
  7. #include <linux/kernel.h> /* UINT_MAX */
  8. #include <linux/mount.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/user.h>
  11. #include <linux/types.h>
  12. #include <linux/wait.h>
  13. #include <linux/audit.h>
  14. #include "fanotify.h"
  15. static bool should_merge(struct fsnotify_event *old_fsn,
  16. struct fsnotify_event *new_fsn)
  17. {
  18. struct fanotify_event_info *old, *new;
  19. pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn);
  20. old = FANOTIFY_E(old_fsn);
  21. new = FANOTIFY_E(new_fsn);
  22. if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid &&
  23. old->path.mnt == new->path.mnt &&
  24. old->path.dentry == new->path.dentry)
  25. return true;
  26. return false;
  27. }
  28. /* and the list better be locked by something too! */
  29. static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
  30. {
  31. struct fsnotify_event *test_event;
  32. pr_debug("%s: list=%p event=%p\n", __func__, list, event);
  33. /*
  34. * Don't merge a permission event with any other event so that we know
  35. * the event structure we have created in fanotify_handle_event() is the
  36. * one we should check for permission response.
  37. */
  38. if (fanotify_is_perm_event(event->mask))
  39. return 0;
  40. list_for_each_entry_reverse(test_event, list, list) {
  41. if (should_merge(test_event, event)) {
  42. test_event->mask |= event->mask;
  43. return 1;
  44. }
  45. }
  46. return 0;
  47. }
  48. static int fanotify_get_response(struct fsnotify_group *group,
  49. struct fanotify_perm_event_info *event,
  50. struct fsnotify_iter_info *iter_info)
  51. {
  52. int ret;
  53. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  54. wait_event(group->fanotify_data.access_waitq, event->response);
  55. /* userspace responded, convert to something usable */
  56. switch (event->response & ~FAN_AUDIT) {
  57. case FAN_ALLOW:
  58. ret = 0;
  59. break;
  60. case FAN_DENY:
  61. default:
  62. ret = -EPERM;
  63. }
  64. /* Check if the response should be audited */
  65. if (event->response & FAN_AUDIT)
  66. audit_fanotify(event->response & ~FAN_AUDIT);
  67. event->response = 0;
  68. pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
  69. group, event, ret);
  70. return ret;
  71. }
  72. static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
  73. u32 event_mask, const void *data,
  74. int data_type)
  75. {
  76. __u32 marks_mask = 0, marks_ignored_mask = 0;
  77. const struct path *path = data;
  78. struct fsnotify_mark *mark;
  79. int type;
  80. pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
  81. __func__, iter_info->report_mask, event_mask, data, data_type);
  82. /* if we don't have enough info to send an event to userspace say no */
  83. if (data_type != FSNOTIFY_EVENT_PATH)
  84. return false;
  85. /* sorry, fanotify only gives a damn about files and dirs */
  86. if (!d_is_reg(path->dentry) &&
  87. !d_can_lookup(path->dentry))
  88. return false;
  89. fsnotify_foreach_obj_type(type) {
  90. if (!fsnotify_iter_should_report_type(iter_info, type))
  91. continue;
  92. mark = iter_info->marks[type];
  93. /*
  94. * if the event is for a child and this inode doesn't care about
  95. * events on the child, don't send it!
  96. */
  97. if (type == FSNOTIFY_OBJ_TYPE_INODE &&
  98. (event_mask & FS_EVENT_ON_CHILD) &&
  99. !(mark->mask & FS_EVENT_ON_CHILD))
  100. continue;
  101. marks_mask |= mark->mask;
  102. marks_ignored_mask |= mark->ignored_mask;
  103. }
  104. if (d_is_dir(path->dentry) &&
  105. !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
  106. return false;
  107. if (event_mask & FAN_ALL_OUTGOING_EVENTS & marks_mask &
  108. ~marks_ignored_mask)
  109. return true;
  110. return false;
  111. }
  112. struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
  113. struct inode *inode, u32 mask,
  114. const struct path *path)
  115. {
  116. struct fanotify_event_info *event;
  117. gfp_t gfp = GFP_KERNEL;
  118. /*
  119. * For queues with unlimited length lost events are not expected and
  120. * can possibly have security implications. Avoid losing events when
  121. * memory is short.
  122. */
  123. if (group->max_events == UINT_MAX)
  124. gfp |= __GFP_NOFAIL;
  125. if (fanotify_is_perm_event(mask)) {
  126. struct fanotify_perm_event_info *pevent;
  127. pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
  128. if (!pevent)
  129. return NULL;
  130. event = &pevent->fae;
  131. pevent->response = 0;
  132. goto init;
  133. }
  134. event = kmem_cache_alloc(fanotify_event_cachep, gfp);
  135. if (!event)
  136. return NULL;
  137. init: __maybe_unused
  138. fsnotify_init_event(&event->fse, inode, mask);
  139. event->tgid = get_pid(task_tgid(current));
  140. if (path) {
  141. event->path = *path;
  142. path_get(&event->path);
  143. } else {
  144. event->path.mnt = NULL;
  145. event->path.dentry = NULL;
  146. }
  147. return event;
  148. }
  149. static int fanotify_handle_event(struct fsnotify_group *group,
  150. struct inode *inode,
  151. u32 mask, const void *data, int data_type,
  152. const unsigned char *file_name, u32 cookie,
  153. struct fsnotify_iter_info *iter_info)
  154. {
  155. int ret = 0;
  156. struct fanotify_event_info *event;
  157. struct fsnotify_event *fsn_event;
  158. BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
  159. BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
  160. BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
  161. BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
  162. BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
  163. BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
  164. BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
  165. BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
  166. BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
  167. BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
  168. if (!fanotify_should_send_event(iter_info, mask, data, data_type))
  169. return 0;
  170. pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
  171. mask);
  172. if (fanotify_is_perm_event(mask)) {
  173. /*
  174. * fsnotify_prepare_user_wait() fails if we race with mark
  175. * deletion. Just let the operation pass in that case.
  176. */
  177. if (!fsnotify_prepare_user_wait(iter_info))
  178. return 0;
  179. }
  180. event = fanotify_alloc_event(group, inode, mask, data);
  181. ret = -ENOMEM;
  182. if (unlikely(!event)) {
  183. /*
  184. * We don't queue overflow events for permission events as
  185. * there the access is denied and so no event is in fact lost.
  186. */
  187. if (!fanotify_is_perm_event(mask))
  188. fsnotify_queue_overflow(group);
  189. goto finish;
  190. }
  191. fsn_event = &event->fse;
  192. ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
  193. if (ret) {
  194. /* Permission events shouldn't be merged */
  195. BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
  196. /* Our event wasn't used in the end. Free it. */
  197. fsnotify_destroy_event(group, fsn_event);
  198. ret = 0;
  199. } else if (fanotify_is_perm_event(mask)) {
  200. ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
  201. iter_info);
  202. fsnotify_destroy_event(group, fsn_event);
  203. }
  204. finish:
  205. if (fanotify_is_perm_event(mask))
  206. fsnotify_finish_user_wait(iter_info);
  207. return ret;
  208. }
  209. static void fanotify_free_group_priv(struct fsnotify_group *group)
  210. {
  211. struct user_struct *user;
  212. user = group->fanotify_data.user;
  213. atomic_dec(&user->fanotify_listeners);
  214. free_uid(user);
  215. }
  216. static void fanotify_free_event(struct fsnotify_event *fsn_event)
  217. {
  218. struct fanotify_event_info *event;
  219. event = FANOTIFY_E(fsn_event);
  220. path_put(&event->path);
  221. put_pid(event->tgid);
  222. if (fanotify_is_perm_event(fsn_event->mask)) {
  223. kmem_cache_free(fanotify_perm_event_cachep,
  224. FANOTIFY_PE(fsn_event));
  225. return;
  226. }
  227. kmem_cache_free(fanotify_event_cachep, event);
  228. }
  229. static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
  230. {
  231. kmem_cache_free(fanotify_mark_cache, fsn_mark);
  232. }
  233. const struct fsnotify_ops fanotify_fsnotify_ops = {
  234. .handle_event = fanotify_handle_event,
  235. .free_group_priv = fanotify_free_group_priv,
  236. .free_event = fanotify_free_event,
  237. .free_mark = fanotify_free_mark,
  238. };