inotify_user.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. /*
  2. * fs/inotify_user.c - inotify support for userspace
  3. *
  4. * Authors:
  5. * John McCutchan <ttb@tentacle.dhs.org>
  6. * Robert Love <rml@novell.com>
  7. *
  8. * Copyright (C) 2005 John McCutchan
  9. * Copyright 2006 Hewlett-Packard Development Company, L.P.
  10. *
  11. * Copyright (C) 2009 Eric Paris <Red Hat Inc>
  12. * inotify was largely rewriten to make use of the fsnotify infrastructure
  13. *
  14. * This program is free software; you can redistribute it and/or modify it
  15. * under the terms of the GNU General Public License as published by the
  16. * Free Software Foundation; either version 2, or (at your option) any
  17. * later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. */
  24. #include <linux/file.h>
  25. #include <linux/fs.h> /* struct inode */
  26. #include <linux/fsnotify_backend.h>
  27. #include <linux/idr.h>
  28. #include <linux/init.h> /* fs_initcall */
  29. #include <linux/inotify.h>
  30. #include <linux/kernel.h> /* roundup() */
  31. #include <linux/namei.h> /* LOOKUP_FOLLOW */
  32. #include <linux/sched/signal.h>
  33. #include <linux/slab.h> /* struct kmem_cache */
  34. #include <linux/syscalls.h>
  35. #include <linux/types.h>
  36. #include <linux/anon_inodes.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/poll.h>
  39. #include <linux/wait.h>
  40. #include "inotify.h"
  41. #include "../fdinfo.h"
  42. #include <asm/ioctls.h>
  43. /* configurable via /proc/sys/fs/inotify/ */
  44. static int inotify_max_queued_events __read_mostly;
  45. struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
  46. #ifdef CONFIG_SYSCTL
  47. #include <linux/sysctl.h>
  48. static int zero;
  49. struct ctl_table inotify_table[] = {
  50. {
  51. .procname = "max_user_instances",
  52. .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
  53. .maxlen = sizeof(int),
  54. .mode = 0644,
  55. .proc_handler = proc_dointvec_minmax,
  56. .extra1 = &zero,
  57. },
  58. {
  59. .procname = "max_user_watches",
  60. .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES],
  61. .maxlen = sizeof(int),
  62. .mode = 0644,
  63. .proc_handler = proc_dointvec_minmax,
  64. .extra1 = &zero,
  65. },
  66. {
  67. .procname = "max_queued_events",
  68. .data = &inotify_max_queued_events,
  69. .maxlen = sizeof(int),
  70. .mode = 0644,
  71. .proc_handler = proc_dointvec_minmax,
  72. .extra1 = &zero
  73. },
  74. { }
  75. };
  76. #endif /* CONFIG_SYSCTL */
  77. static inline __u32 inotify_arg_to_mask(u32 arg)
  78. {
  79. __u32 mask;
  80. /*
  81. * everything should accept their own ignored, cares about children,
  82. * and should receive events when the inode is unmounted
  83. */
  84. mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
  85. /* mask off the flags used to open the fd */
  86. mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
  87. return mask;
  88. }
  89. static inline u32 inotify_mask_to_arg(__u32 mask)
  90. {
  91. return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
  92. IN_Q_OVERFLOW);
  93. }
  94. /* intofiy userspace file descriptor functions */
  95. static unsigned int inotify_poll(struct file *file, poll_table *wait)
  96. {
  97. struct fsnotify_group *group = file->private_data;
  98. int ret = 0;
  99. poll_wait(file, &group->notification_waitq, wait);
  100. spin_lock(&group->notification_lock);
  101. if (!fsnotify_notify_queue_is_empty(group))
  102. ret = POLLIN | POLLRDNORM;
  103. spin_unlock(&group->notification_lock);
  104. return ret;
  105. }
  106. static int round_event_name_len(struct fsnotify_event *fsn_event)
  107. {
  108. struct inotify_event_info *event;
  109. event = INOTIFY_E(fsn_event);
  110. if (!event->name_len)
  111. return 0;
  112. return roundup(event->name_len + 1, sizeof(struct inotify_event));
  113. }
  114. /*
  115. * Get an inotify_kernel_event if one exists and is small
  116. * enough to fit in "count". Return an error pointer if
  117. * not large enough.
  118. *
  119. * Called with the group->notification_lock held.
  120. */
  121. static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
  122. size_t count)
  123. {
  124. size_t event_size = sizeof(struct inotify_event);
  125. struct fsnotify_event *event;
  126. if (fsnotify_notify_queue_is_empty(group))
  127. return NULL;
  128. event = fsnotify_peek_first_event(group);
  129. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  130. event_size += round_event_name_len(event);
  131. if (event_size > count)
  132. return ERR_PTR(-EINVAL);
  133. /* held the notification_lock the whole time, so this is the
  134. * same event we peeked above */
  135. fsnotify_remove_first_event(group);
  136. return event;
  137. }
  138. /*
  139. * Copy an event to user space, returning how much we copied.
  140. *
  141. * We already checked that the event size is smaller than the
  142. * buffer we had in "get_one_event()" above.
  143. */
  144. static ssize_t copy_event_to_user(struct fsnotify_group *group,
  145. struct fsnotify_event *fsn_event,
  146. char __user *buf)
  147. {
  148. struct inotify_event inotify_event;
  149. struct inotify_event_info *event;
  150. size_t event_size = sizeof(struct inotify_event);
  151. size_t name_len;
  152. size_t pad_name_len;
  153. pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event);
  154. event = INOTIFY_E(fsn_event);
  155. name_len = event->name_len;
  156. /*
  157. * round up name length so it is a multiple of event_size
  158. * plus an extra byte for the terminating '\0'.
  159. */
  160. pad_name_len = round_event_name_len(fsn_event);
  161. inotify_event.len = pad_name_len;
  162. inotify_event.mask = inotify_mask_to_arg(fsn_event->mask);
  163. inotify_event.wd = event->wd;
  164. inotify_event.cookie = event->sync_cookie;
  165. /* send the main event */
  166. if (copy_to_user(buf, &inotify_event, event_size))
  167. return -EFAULT;
  168. buf += event_size;
  169. /*
  170. * fsnotify only stores the pathname, so here we have to send the pathname
  171. * and then pad that pathname out to a multiple of sizeof(inotify_event)
  172. * with zeros.
  173. */
  174. if (pad_name_len) {
  175. /* copy the path name */
  176. if (copy_to_user(buf, event->name, name_len))
  177. return -EFAULT;
  178. buf += name_len;
  179. /* fill userspace with 0's */
  180. if (clear_user(buf, pad_name_len - name_len))
  181. return -EFAULT;
  182. event_size += pad_name_len;
  183. }
  184. return event_size;
  185. }
  186. static ssize_t inotify_read(struct file *file, char __user *buf,
  187. size_t count, loff_t *pos)
  188. {
  189. struct fsnotify_group *group;
  190. struct fsnotify_event *kevent;
  191. char __user *start;
  192. int ret;
  193. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  194. start = buf;
  195. group = file->private_data;
  196. add_wait_queue(&group->notification_waitq, &wait);
  197. while (1) {
  198. spin_lock(&group->notification_lock);
  199. kevent = get_one_event(group, count);
  200. spin_unlock(&group->notification_lock);
  201. pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
  202. if (kevent) {
  203. ret = PTR_ERR(kevent);
  204. if (IS_ERR(kevent))
  205. break;
  206. ret = copy_event_to_user(group, kevent, buf);
  207. fsnotify_destroy_event(group, kevent);
  208. if (ret < 0)
  209. break;
  210. buf += ret;
  211. count -= ret;
  212. continue;
  213. }
  214. ret = -EAGAIN;
  215. if (file->f_flags & O_NONBLOCK)
  216. break;
  217. ret = -ERESTARTSYS;
  218. if (signal_pending(current))
  219. break;
  220. if (start != buf)
  221. break;
  222. wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  223. }
  224. remove_wait_queue(&group->notification_waitq, &wait);
  225. if (start != buf && ret != -EFAULT)
  226. ret = buf - start;
  227. return ret;
  228. }
  229. static int inotify_release(struct inode *ignored, struct file *file)
  230. {
  231. struct fsnotify_group *group = file->private_data;
  232. pr_debug("%s: group=%p\n", __func__, group);
  233. /* free this group, matching get was inotify_init->fsnotify_obtain_group */
  234. fsnotify_destroy_group(group);
  235. return 0;
  236. }
  237. static long inotify_ioctl(struct file *file, unsigned int cmd,
  238. unsigned long arg)
  239. {
  240. struct fsnotify_group *group;
  241. struct fsnotify_event *fsn_event;
  242. void __user *p;
  243. int ret = -ENOTTY;
  244. size_t send_len = 0;
  245. group = file->private_data;
  246. p = (void __user *) arg;
  247. pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
  248. switch (cmd) {
  249. case FIONREAD:
  250. spin_lock(&group->notification_lock);
  251. list_for_each_entry(fsn_event, &group->notification_list,
  252. list) {
  253. send_len += sizeof(struct inotify_event);
  254. send_len += round_event_name_len(fsn_event);
  255. }
  256. spin_unlock(&group->notification_lock);
  257. ret = put_user(send_len, (int __user *) p);
  258. break;
  259. }
  260. return ret;
  261. }
  262. static const struct file_operations inotify_fops = {
  263. .show_fdinfo = inotify_show_fdinfo,
  264. .poll = inotify_poll,
  265. .read = inotify_read,
  266. .fasync = fsnotify_fasync,
  267. .release = inotify_release,
  268. .unlocked_ioctl = inotify_ioctl,
  269. .compat_ioctl = inotify_ioctl,
  270. .llseek = noop_llseek,
  271. };
  272. /*
  273. * find_inode - resolve a user-given path to a specific inode
  274. */
  275. static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
  276. {
  277. int error;
  278. error = user_path_at(AT_FDCWD, dirname, flags, path);
  279. if (error)
  280. return error;
  281. /* you can only watch an inode if you have read permissions on it */
  282. error = inode_permission(path->dentry->d_inode, MAY_READ);
  283. if (error)
  284. path_put(path);
  285. return error;
  286. }
  287. static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
  288. struct inotify_inode_mark *i_mark)
  289. {
  290. int ret;
  291. idr_preload(GFP_KERNEL);
  292. spin_lock(idr_lock);
  293. ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
  294. if (ret >= 0) {
  295. /* we added the mark to the idr, take a reference */
  296. i_mark->wd = ret;
  297. fsnotify_get_mark(&i_mark->fsn_mark);
  298. }
  299. spin_unlock(idr_lock);
  300. idr_preload_end();
  301. return ret < 0 ? ret : 0;
  302. }
  303. static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
  304. int wd)
  305. {
  306. struct idr *idr = &group->inotify_data.idr;
  307. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  308. struct inotify_inode_mark *i_mark;
  309. assert_spin_locked(idr_lock);
  310. i_mark = idr_find(idr, wd);
  311. if (i_mark) {
  312. struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
  313. fsnotify_get_mark(fsn_mark);
  314. /* One ref for being in the idr, one ref we just took */
  315. BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
  316. }
  317. return i_mark;
  318. }
  319. static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
  320. int wd)
  321. {
  322. struct inotify_inode_mark *i_mark;
  323. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  324. spin_lock(idr_lock);
  325. i_mark = inotify_idr_find_locked(group, wd);
  326. spin_unlock(idr_lock);
  327. return i_mark;
  328. }
  329. /*
  330. * Remove the mark from the idr (if present) and drop the reference
  331. * on the mark because it was in the idr.
  332. */
  333. static void inotify_remove_from_idr(struct fsnotify_group *group,
  334. struct inotify_inode_mark *i_mark)
  335. {
  336. struct idr *idr = &group->inotify_data.idr;
  337. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  338. struct inotify_inode_mark *found_i_mark = NULL;
  339. int wd;
  340. spin_lock(idr_lock);
  341. wd = i_mark->wd;
  342. /*
  343. * does this i_mark think it is in the idr? we shouldn't get called
  344. * if it wasn't....
  345. */
  346. if (wd == -1) {
  347. WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
  348. __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
  349. goto out;
  350. }
  351. /* Lets look in the idr to see if we find it */
  352. found_i_mark = inotify_idr_find_locked(group, wd);
  353. if (unlikely(!found_i_mark)) {
  354. WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
  355. __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
  356. goto out;
  357. }
  358. /*
  359. * We found an mark in the idr at the right wd, but it's
  360. * not the mark we were told to remove. eparis seriously
  361. * fucked up somewhere.
  362. */
  363. if (unlikely(found_i_mark != i_mark)) {
  364. WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
  365. "found_i_mark=%p found_i_mark->wd=%d "
  366. "found_i_mark->group=%p\n", __func__, i_mark,
  367. i_mark->wd, i_mark->fsn_mark.group, found_i_mark,
  368. found_i_mark->wd, found_i_mark->fsn_mark.group);
  369. goto out;
  370. }
  371. /*
  372. * One ref for being in the idr
  373. * one ref grabbed by inotify_idr_find
  374. */
  375. if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 2)) {
  376. printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p\n",
  377. __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group);
  378. /* we can't really recover with bad ref cnting.. */
  379. BUG();
  380. }
  381. idr_remove(idr, wd);
  382. /* Removed from the idr, drop that ref. */
  383. fsnotify_put_mark(&i_mark->fsn_mark);
  384. out:
  385. i_mark->wd = -1;
  386. spin_unlock(idr_lock);
  387. /* match the ref taken by inotify_idr_find_locked() */
  388. if (found_i_mark)
  389. fsnotify_put_mark(&found_i_mark->fsn_mark);
  390. }
  391. /*
  392. * Send IN_IGNORED for this wd, remove this wd from the idr.
  393. */
  394. void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
  395. struct fsnotify_group *group)
  396. {
  397. struct inotify_inode_mark *i_mark;
  398. /* Queue ignore event for the watch */
  399. inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
  400. NULL, FSNOTIFY_EVENT_NONE, NULL, 0, NULL);
  401. i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
  402. /* remove this mark from the idr */
  403. inotify_remove_from_idr(group, i_mark);
  404. dec_inotify_watches(group->inotify_data.ucounts);
  405. }
  406. static int inotify_update_existing_watch(struct fsnotify_group *group,
  407. struct inode *inode,
  408. u32 arg)
  409. {
  410. struct fsnotify_mark *fsn_mark;
  411. struct inotify_inode_mark *i_mark;
  412. __u32 old_mask, new_mask;
  413. __u32 mask;
  414. int add = (arg & IN_MASK_ADD);
  415. int ret;
  416. mask = inotify_arg_to_mask(arg);
  417. fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group);
  418. if (!fsn_mark)
  419. return -ENOENT;
  420. i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
  421. spin_lock(&fsn_mark->lock);
  422. old_mask = fsn_mark->mask;
  423. if (add)
  424. fsn_mark->mask |= mask;
  425. else
  426. fsn_mark->mask = mask;
  427. new_mask = fsn_mark->mask;
  428. spin_unlock(&fsn_mark->lock);
  429. if (old_mask != new_mask) {
  430. /* more bits in old than in new? */
  431. int dropped = (old_mask & ~new_mask);
  432. /* more bits in this fsn_mark than the inode's mask? */
  433. int do_inode = (new_mask & ~inode->i_fsnotify_mask);
  434. /* update the inode with this new fsn_mark */
  435. if (dropped || do_inode)
  436. fsnotify_recalc_mask(inode->i_fsnotify_marks);
  437. }
  438. /* return the wd */
  439. ret = i_mark->wd;
  440. /* match the get from fsnotify_find_mark() */
  441. fsnotify_put_mark(fsn_mark);
  442. return ret;
  443. }
  444. static int inotify_new_watch(struct fsnotify_group *group,
  445. struct inode *inode,
  446. u32 arg)
  447. {
  448. struct inotify_inode_mark *tmp_i_mark;
  449. __u32 mask;
  450. int ret;
  451. struct idr *idr = &group->inotify_data.idr;
  452. spinlock_t *idr_lock = &group->inotify_data.idr_lock;
  453. mask = inotify_arg_to_mask(arg);
  454. tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
  455. if (unlikely(!tmp_i_mark))
  456. return -ENOMEM;
  457. fsnotify_init_mark(&tmp_i_mark->fsn_mark, group);
  458. tmp_i_mark->fsn_mark.mask = mask;
  459. tmp_i_mark->wd = -1;
  460. ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
  461. if (ret)
  462. goto out_err;
  463. /* increment the number of watches the user has */
  464. if (!inc_inotify_watches(group->inotify_data.ucounts)) {
  465. inotify_remove_from_idr(group, tmp_i_mark);
  466. ret = -ENOSPC;
  467. goto out_err;
  468. }
  469. /* we are on the idr, now get on the inode */
  470. ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, inode, NULL, 0);
  471. if (ret) {
  472. /* we failed to get on the inode, get off the idr */
  473. inotify_remove_from_idr(group, tmp_i_mark);
  474. goto out_err;
  475. }
  476. /* return the watch descriptor for this new mark */
  477. ret = tmp_i_mark->wd;
  478. out_err:
  479. /* match the ref from fsnotify_init_mark() */
  480. fsnotify_put_mark(&tmp_i_mark->fsn_mark);
  481. return ret;
  482. }
  483. static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
  484. {
  485. int ret = 0;
  486. mutex_lock(&group->mark_mutex);
  487. /* try to update and existing watch with the new arg */
  488. ret = inotify_update_existing_watch(group, inode, arg);
  489. /* no mark present, try to add a new one */
  490. if (ret == -ENOENT)
  491. ret = inotify_new_watch(group, inode, arg);
  492. mutex_unlock(&group->mark_mutex);
  493. return ret;
  494. }
  495. static struct fsnotify_group *inotify_new_group(unsigned int max_events)
  496. {
  497. struct fsnotify_group *group;
  498. struct inotify_event_info *oevent;
  499. group = fsnotify_alloc_group(&inotify_fsnotify_ops);
  500. if (IS_ERR(group))
  501. return group;
  502. oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
  503. if (unlikely(!oevent)) {
  504. fsnotify_destroy_group(group);
  505. return ERR_PTR(-ENOMEM);
  506. }
  507. group->overflow_event = &oevent->fse;
  508. fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
  509. oevent->wd = -1;
  510. oevent->sync_cookie = 0;
  511. oevent->name_len = 0;
  512. group->max_events = max_events;
  513. spin_lock_init(&group->inotify_data.idr_lock);
  514. idr_init(&group->inotify_data.idr);
  515. group->inotify_data.ucounts = inc_ucount(current_user_ns(),
  516. current_euid(),
  517. UCOUNT_INOTIFY_INSTANCES);
  518. if (!group->inotify_data.ucounts) {
  519. fsnotify_destroy_group(group);
  520. return ERR_PTR(-EMFILE);
  521. }
  522. return group;
  523. }
  524. /* inotify syscalls */
  525. SYSCALL_DEFINE1(inotify_init1, int, flags)
  526. {
  527. struct fsnotify_group *group;
  528. int ret;
  529. /* Check the IN_* constants for consistency. */
  530. BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
  531. BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
  532. if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
  533. return -EINVAL;
  534. /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
  535. group = inotify_new_group(inotify_max_queued_events);
  536. if (IS_ERR(group))
  537. return PTR_ERR(group);
  538. ret = anon_inode_getfd("inotify", &inotify_fops, group,
  539. O_RDONLY | flags);
  540. if (ret < 0)
  541. fsnotify_destroy_group(group);
  542. return ret;
  543. }
  544. SYSCALL_DEFINE0(inotify_init)
  545. {
  546. return sys_inotify_init1(0);
  547. }
  548. SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
  549. u32, mask)
  550. {
  551. struct fsnotify_group *group;
  552. struct inode *inode;
  553. struct path path;
  554. struct fd f;
  555. int ret;
  556. unsigned flags = 0;
  557. /*
  558. * We share a lot of code with fs/dnotify. We also share
  559. * the bit layout between inotify's IN_* and the fsnotify
  560. * FS_*. This check ensures that only the inotify IN_*
  561. * bits get passed in and set in watches/events.
  562. */
  563. if (unlikely(mask & ~ALL_INOTIFY_BITS))
  564. return -EINVAL;
  565. /*
  566. * Require at least one valid bit set in the mask.
  567. * Without _something_ set, we would have no events to
  568. * watch for.
  569. */
  570. if (unlikely(!(mask & ALL_INOTIFY_BITS)))
  571. return -EINVAL;
  572. f = fdget(fd);
  573. if (unlikely(!f.file))
  574. return -EBADF;
  575. /* verify that this is indeed an inotify instance */
  576. if (unlikely(f.file->f_op != &inotify_fops)) {
  577. ret = -EINVAL;
  578. goto fput_and_out;
  579. }
  580. if (!(mask & IN_DONT_FOLLOW))
  581. flags |= LOOKUP_FOLLOW;
  582. if (mask & IN_ONLYDIR)
  583. flags |= LOOKUP_DIRECTORY;
  584. ret = inotify_find_inode(pathname, &path, flags);
  585. if (ret)
  586. goto fput_and_out;
  587. /* inode held in place by reference to path; group by fget on fd */
  588. inode = path.dentry->d_inode;
  589. group = f.file->private_data;
  590. /* create/update an inode mark */
  591. ret = inotify_update_watch(group, inode, mask);
  592. path_put(&path);
  593. fput_and_out:
  594. fdput(f);
  595. return ret;
  596. }
  597. SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
  598. {
  599. struct fsnotify_group *group;
  600. struct inotify_inode_mark *i_mark;
  601. struct fd f;
  602. int ret = 0;
  603. f = fdget(fd);
  604. if (unlikely(!f.file))
  605. return -EBADF;
  606. /* verify that this is indeed an inotify instance */
  607. ret = -EINVAL;
  608. if (unlikely(f.file->f_op != &inotify_fops))
  609. goto out;
  610. group = f.file->private_data;
  611. ret = -EINVAL;
  612. i_mark = inotify_idr_find(group, wd);
  613. if (unlikely(!i_mark))
  614. goto out;
  615. ret = 0;
  616. fsnotify_destroy_mark(&i_mark->fsn_mark, group);
  617. /* match ref taken by inotify_idr_find */
  618. fsnotify_put_mark(&i_mark->fsn_mark);
  619. out:
  620. fdput(f);
  621. return ret;
  622. }
  623. /*
  624. * inotify_user_setup - Our initialization function. Note that we cannot return
  625. * error because we have compiled-in VFS hooks. So an (unlikely) failure here
  626. * must result in panic().
  627. */
  628. static int __init inotify_user_setup(void)
  629. {
  630. BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
  631. BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
  632. BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
  633. BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
  634. BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
  635. BUILD_BUG_ON(IN_OPEN != FS_OPEN);
  636. BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
  637. BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
  638. BUILD_BUG_ON(IN_CREATE != FS_CREATE);
  639. BUILD_BUG_ON(IN_DELETE != FS_DELETE);
  640. BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
  641. BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
  642. BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
  643. BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
  644. BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
  645. BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
  646. BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
  647. BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
  648. BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
  649. inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
  650. inotify_max_queued_events = 16384;
  651. init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
  652. init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = 8192;
  653. return 0;
  654. }
  655. fs_initcall(inotify_user_setup);