file.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915
  1. /*
  2. * linux/fs/file.c
  3. *
  4. * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes
  5. *
  6. * Manage the dynamic fd arrays in the process files_struct.
  7. */
  8. #include <linux/syscalls.h>
  9. #include <linux/export.h>
  10. #include <linux/fs.h>
  11. #include <linux/mm.h>
  12. #include <linux/mmzone.h>
  13. #include <linux/time.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/file.h>
  18. #include <linux/fdtable.h>
  19. #include <linux/bitops.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/rcupdate.h>
  23. #include <linux/workqueue.h>
  24. int sysctl_nr_open __read_mostly = 1024*1024;
  25. int sysctl_nr_open_min = BITS_PER_LONG;
  26. /* our max() is unusable in constant expressions ;-/ */
  27. #define __const_max(x, y) ((x) < (y) ? (x) : (y))
  28. int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) &
  29. -BITS_PER_LONG;
  30. static void *alloc_fdmem(size_t size)
  31. {
  32. /*
  33. * Very large allocations can stress page reclaim, so fall back to
  34. * vmalloc() if the allocation size will be considered "large" by the VM.
  35. */
  36. if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
  37. void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
  38. if (data != NULL)
  39. return data;
  40. }
  41. return vmalloc(size);
  42. }
  43. static void __free_fdtable(struct fdtable *fdt)
  44. {
  45. kvfree(fdt->fd);
  46. kvfree(fdt->open_fds);
  47. kfree(fdt);
  48. }
  49. static void free_fdtable_rcu(struct rcu_head *rcu)
  50. {
  51. __free_fdtable(container_of(rcu, struct fdtable, rcu));
  52. }
  53. /*
  54. * Expand the fdset in the files_struct. Called with the files spinlock
  55. * held for write.
  56. */
  57. static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
  58. {
  59. unsigned int cpy, set;
  60. BUG_ON(nfdt->max_fds < ofdt->max_fds);
  61. cpy = ofdt->max_fds * sizeof(struct file *);
  62. set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
  63. memcpy(nfdt->fd, ofdt->fd, cpy);
  64. memset((char *)(nfdt->fd) + cpy, 0, set);
  65. cpy = ofdt->max_fds / BITS_PER_BYTE;
  66. set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE;
  67. memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
  68. memset((char *)(nfdt->open_fds) + cpy, 0, set);
  69. memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
  70. memset((char *)(nfdt->close_on_exec) + cpy, 0, set);
  71. }
  72. static struct fdtable * alloc_fdtable(unsigned int nr)
  73. {
  74. struct fdtable *fdt;
  75. void *data;
  76. /*
  77. * Figure out how many fds we actually want to support in this fdtable.
  78. * Allocation steps are keyed to the size of the fdarray, since it
  79. * grows far faster than any of the other dynamic data. We try to fit
  80. * the fdarray into comfortable page-tuned chunks: starting at 1024B
  81. * and growing in powers of two from there on.
  82. */
  83. nr /= (1024 / sizeof(struct file *));
  84. nr = roundup_pow_of_two(nr + 1);
  85. nr *= (1024 / sizeof(struct file *));
  86. /*
  87. * Note that this can drive nr *below* what we had passed if sysctl_nr_open
  88. * had been set lower between the check in expand_files() and here. Deal
  89. * with that in caller, it's cheaper that way.
  90. *
  91. * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise
  92. * bitmaps handling below becomes unpleasant, to put it mildly...
  93. */
  94. if (unlikely(nr > sysctl_nr_open))
  95. nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1;
  96. fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
  97. if (!fdt)
  98. goto out;
  99. fdt->max_fds = nr;
  100. data = alloc_fdmem(nr * sizeof(struct file *));
  101. if (!data)
  102. goto out_fdt;
  103. fdt->fd = data;
  104. data = alloc_fdmem(max_t(size_t,
  105. 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES));
  106. if (!data)
  107. goto out_arr;
  108. fdt->open_fds = data;
  109. data += nr / BITS_PER_BYTE;
  110. fdt->close_on_exec = data;
  111. return fdt;
  112. out_arr:
  113. kvfree(fdt->fd);
  114. out_fdt:
  115. kfree(fdt);
  116. out:
  117. return NULL;
  118. }
  119. /*
  120. * Expand the file descriptor table.
  121. * This function will allocate a new fdtable and both fd array and fdset, of
  122. * the given size.
  123. * Return <0 error code on error; 1 on successful completion.
  124. * The files->file_lock should be held on entry, and will be held on exit.
  125. */
  126. static int expand_fdtable(struct files_struct *files, int nr)
  127. __releases(files->file_lock)
  128. __acquires(files->file_lock)
  129. {
  130. struct fdtable *new_fdt, *cur_fdt;
  131. spin_unlock(&files->file_lock);
  132. new_fdt = alloc_fdtable(nr);
  133. spin_lock(&files->file_lock);
  134. if (!new_fdt)
  135. return -ENOMEM;
  136. /*
  137. * extremely unlikely race - sysctl_nr_open decreased between the check in
  138. * caller and alloc_fdtable(). Cheaper to catch it here...
  139. */
  140. if (unlikely(new_fdt->max_fds <= nr)) {
  141. __free_fdtable(new_fdt);
  142. return -EMFILE;
  143. }
  144. /*
  145. * Check again since another task may have expanded the fd table while
  146. * we dropped the lock
  147. */
  148. cur_fdt = files_fdtable(files);
  149. if (nr >= cur_fdt->max_fds) {
  150. /* Continue as planned */
  151. copy_fdtable(new_fdt, cur_fdt);
  152. rcu_assign_pointer(files->fdt, new_fdt);
  153. if (cur_fdt != &files->fdtab)
  154. call_rcu(&cur_fdt->rcu, free_fdtable_rcu);
  155. } else {
  156. /* Somebody else expanded, so undo our attempt */
  157. __free_fdtable(new_fdt);
  158. }
  159. return 1;
  160. }
  161. /*
  162. * Expand files.
  163. * This function will expand the file structures, if the requested size exceeds
  164. * the current capacity and there is room for expansion.
  165. * Return <0 error code on error; 0 when nothing done; 1 when files were
  166. * expanded and execution may have blocked.
  167. * The files->file_lock should be held on entry, and will be held on exit.
  168. */
  169. static int expand_files(struct files_struct *files, int nr)
  170. {
  171. struct fdtable *fdt;
  172. fdt = files_fdtable(files);
  173. /* Do we need to expand? */
  174. if (nr < fdt->max_fds)
  175. return 0;
  176. /* Can we expand? */
  177. if (nr >= sysctl_nr_open)
  178. return -EMFILE;
  179. /* All good, so we try */
  180. return expand_fdtable(files, nr);
  181. }
  182. static inline void __set_close_on_exec(int fd, struct fdtable *fdt)
  183. {
  184. __set_bit(fd, fdt->close_on_exec);
  185. }
  186. static inline void __clear_close_on_exec(int fd, struct fdtable *fdt)
  187. {
  188. __clear_bit(fd, fdt->close_on_exec);
  189. }
  190. static inline void __set_open_fd(int fd, struct fdtable *fdt)
  191. {
  192. __set_bit(fd, fdt->open_fds);
  193. }
  194. static inline void __clear_open_fd(int fd, struct fdtable *fdt)
  195. {
  196. __clear_bit(fd, fdt->open_fds);
  197. }
  198. static int count_open_files(struct fdtable *fdt)
  199. {
  200. int size = fdt->max_fds;
  201. int i;
  202. /* Find the last open fd */
  203. for (i = size / BITS_PER_LONG; i > 0; ) {
  204. if (fdt->open_fds[--i])
  205. break;
  206. }
  207. i = (i + 1) * BITS_PER_LONG;
  208. return i;
  209. }
  210. /*
  211. * Allocate a new files structure and copy contents from the
  212. * passed in files structure.
  213. * errorp will be valid only when the returned files_struct is NULL.
  214. */
  215. struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
  216. {
  217. struct files_struct *newf;
  218. struct file **old_fds, **new_fds;
  219. int open_files, size, i;
  220. struct fdtable *old_fdt, *new_fdt;
  221. *errorp = -ENOMEM;
  222. newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
  223. if (!newf)
  224. goto out;
  225. atomic_set(&newf->count, 1);
  226. spin_lock_init(&newf->file_lock);
  227. newf->next_fd = 0;
  228. new_fdt = &newf->fdtab;
  229. new_fdt->max_fds = NR_OPEN_DEFAULT;
  230. new_fdt->close_on_exec = newf->close_on_exec_init;
  231. new_fdt->open_fds = newf->open_fds_init;
  232. new_fdt->fd = &newf->fd_array[0];
  233. spin_lock(&oldf->file_lock);
  234. old_fdt = files_fdtable(oldf);
  235. open_files = count_open_files(old_fdt);
  236. /*
  237. * Check whether we need to allocate a larger fd array and fd set.
  238. */
  239. while (unlikely(open_files > new_fdt->max_fds)) {
  240. spin_unlock(&oldf->file_lock);
  241. if (new_fdt != &newf->fdtab)
  242. __free_fdtable(new_fdt);
  243. new_fdt = alloc_fdtable(open_files - 1);
  244. if (!new_fdt) {
  245. *errorp = -ENOMEM;
  246. goto out_release;
  247. }
  248. /* beyond sysctl_nr_open; nothing to do */
  249. if (unlikely(new_fdt->max_fds < open_files)) {
  250. __free_fdtable(new_fdt);
  251. *errorp = -EMFILE;
  252. goto out_release;
  253. }
  254. /*
  255. * Reacquire the oldf lock and a pointer to its fd table
  256. * who knows it may have a new bigger fd table. We need
  257. * the latest pointer.
  258. */
  259. spin_lock(&oldf->file_lock);
  260. old_fdt = files_fdtable(oldf);
  261. open_files = count_open_files(old_fdt);
  262. }
  263. old_fds = old_fdt->fd;
  264. new_fds = new_fdt->fd;
  265. memcpy(new_fdt->open_fds, old_fdt->open_fds, open_files / 8);
  266. memcpy(new_fdt->close_on_exec, old_fdt->close_on_exec, open_files / 8);
  267. for (i = open_files; i != 0; i--) {
  268. struct file *f = *old_fds++;
  269. if (f) {
  270. get_file(f);
  271. } else {
  272. /*
  273. * The fd may be claimed in the fd bitmap but not yet
  274. * instantiated in the files array if a sibling thread
  275. * is partway through open(). So make sure that this
  276. * fd is available to the new process.
  277. */
  278. __clear_open_fd(open_files - i, new_fdt);
  279. }
  280. rcu_assign_pointer(*new_fds++, f);
  281. }
  282. spin_unlock(&oldf->file_lock);
  283. /* compute the remainder to be cleared */
  284. size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
  285. /* This is long word aligned thus could use a optimized version */
  286. memset(new_fds, 0, size);
  287. if (new_fdt->max_fds > open_files) {
  288. int left = (new_fdt->max_fds - open_files) / 8;
  289. int start = open_files / BITS_PER_LONG;
  290. memset(&new_fdt->open_fds[start], 0, left);
  291. memset(&new_fdt->close_on_exec[start], 0, left);
  292. }
  293. rcu_assign_pointer(newf->fdt, new_fdt);
  294. return newf;
  295. out_release:
  296. kmem_cache_free(files_cachep, newf);
  297. out:
  298. return NULL;
  299. }
  300. static struct fdtable *close_files(struct files_struct * files)
  301. {
  302. /*
  303. * It is safe to dereference the fd table without RCU or
  304. * ->file_lock because this is the last reference to the
  305. * files structure.
  306. */
  307. struct fdtable *fdt = rcu_dereference_raw(files->fdt);
  308. int i, j = 0;
  309. for (;;) {
  310. unsigned long set;
  311. i = j * BITS_PER_LONG;
  312. if (i >= fdt->max_fds)
  313. break;
  314. set = fdt->open_fds[j++];
  315. while (set) {
  316. if (set & 1) {
  317. struct file * file = xchg(&fdt->fd[i], NULL);
  318. if (file) {
  319. filp_close(file, files);
  320. cond_resched_rcu_qs();
  321. }
  322. }
  323. i++;
  324. set >>= 1;
  325. }
  326. }
  327. return fdt;
  328. }
  329. struct files_struct *get_files_struct(struct task_struct *task)
  330. {
  331. struct files_struct *files;
  332. task_lock(task);
  333. files = task->files;
  334. if (files)
  335. atomic_inc(&files->count);
  336. task_unlock(task);
  337. return files;
  338. }
  339. void put_files_struct(struct files_struct *files)
  340. {
  341. if (atomic_dec_and_test(&files->count)) {
  342. struct fdtable *fdt = close_files(files);
  343. /* free the arrays if they are not embedded */
  344. if (fdt != &files->fdtab)
  345. __free_fdtable(fdt);
  346. kmem_cache_free(files_cachep, files);
  347. }
  348. }
  349. void reset_files_struct(struct files_struct *files)
  350. {
  351. struct task_struct *tsk = current;
  352. struct files_struct *old;
  353. old = tsk->files;
  354. task_lock(tsk);
  355. tsk->files = files;
  356. task_unlock(tsk);
  357. put_files_struct(old);
  358. }
  359. void exit_files(struct task_struct *tsk)
  360. {
  361. struct files_struct * files = tsk->files;
  362. if (files) {
  363. task_lock(tsk);
  364. tsk->files = NULL;
  365. task_unlock(tsk);
  366. put_files_struct(files);
  367. }
  368. }
  369. struct files_struct init_files = {
  370. .count = ATOMIC_INIT(1),
  371. .fdt = &init_files.fdtab,
  372. .fdtab = {
  373. .max_fds = NR_OPEN_DEFAULT,
  374. .fd = &init_files.fd_array[0],
  375. .close_on_exec = init_files.close_on_exec_init,
  376. .open_fds = init_files.open_fds_init,
  377. },
  378. .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
  379. };
  380. /*
  381. * allocate a file descriptor, mark it busy.
  382. */
  383. int __alloc_fd(struct files_struct *files,
  384. unsigned start, unsigned end, unsigned flags)
  385. {
  386. unsigned int fd;
  387. int error;
  388. struct fdtable *fdt;
  389. spin_lock(&files->file_lock);
  390. repeat:
  391. fdt = files_fdtable(files);
  392. fd = start;
  393. if (fd < files->next_fd)
  394. fd = files->next_fd;
  395. if (fd < fdt->max_fds)
  396. fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, fd);
  397. /*
  398. * N.B. For clone tasks sharing a files structure, this test
  399. * will limit the total number of files that can be opened.
  400. */
  401. error = -EMFILE;
  402. if (fd >= end)
  403. goto out;
  404. error = expand_files(files, fd);
  405. if (error < 0)
  406. goto out;
  407. /*
  408. * If we needed to expand the fs array we
  409. * might have blocked - try again.
  410. */
  411. if (error)
  412. goto repeat;
  413. if (start <= files->next_fd)
  414. files->next_fd = fd + 1;
  415. __set_open_fd(fd, fdt);
  416. if (flags & O_CLOEXEC)
  417. __set_close_on_exec(fd, fdt);
  418. else
  419. __clear_close_on_exec(fd, fdt);
  420. error = fd;
  421. #if 1
  422. /* Sanity check */
  423. if (rcu_access_pointer(fdt->fd[fd]) != NULL) {
  424. printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd);
  425. rcu_assign_pointer(fdt->fd[fd], NULL);
  426. }
  427. #endif
  428. out:
  429. spin_unlock(&files->file_lock);
  430. return error;
  431. }
  432. static int alloc_fd(unsigned start, unsigned flags)
  433. {
  434. return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
  435. }
  436. int get_unused_fd_flags(unsigned flags)
  437. {
  438. return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
  439. }
  440. EXPORT_SYMBOL(get_unused_fd_flags);
  441. static void __put_unused_fd(struct files_struct *files, unsigned int fd)
  442. {
  443. struct fdtable *fdt = files_fdtable(files);
  444. __clear_open_fd(fd, fdt);
  445. if (fd < files->next_fd)
  446. files->next_fd = fd;
  447. }
  448. void put_unused_fd(unsigned int fd)
  449. {
  450. struct files_struct *files = current->files;
  451. spin_lock(&files->file_lock);
  452. __put_unused_fd(files, fd);
  453. spin_unlock(&files->file_lock);
  454. }
  455. EXPORT_SYMBOL(put_unused_fd);
  456. /*
  457. * Install a file pointer in the fd array.
  458. *
  459. * The VFS is full of places where we drop the files lock between
  460. * setting the open_fds bitmap and installing the file in the file
  461. * array. At any such point, we are vulnerable to a dup2() race
  462. * installing a file in the array before us. We need to detect this and
  463. * fput() the struct file we are about to overwrite in this case.
  464. *
  465. * It should never happen - if we allow dup2() do it, _really_ bad things
  466. * will follow.
  467. *
  468. * NOTE: __fd_install() variant is really, really low-level; don't
  469. * use it unless you are forced to by truly lousy API shoved down
  470. * your throat. 'files' *MUST* be either current->files or obtained
  471. * by get_files_struct(current) done by whoever had given it to you,
  472. * or really bad things will happen. Normally you want to use
  473. * fd_install() instead.
  474. */
  475. void __fd_install(struct files_struct *files, unsigned int fd,
  476. struct file *file)
  477. {
  478. struct fdtable *fdt;
  479. spin_lock(&files->file_lock);
  480. fdt = files_fdtable(files);
  481. BUG_ON(fdt->fd[fd] != NULL);
  482. rcu_assign_pointer(fdt->fd[fd], file);
  483. spin_unlock(&files->file_lock);
  484. }
  485. void fd_install(unsigned int fd, struct file *file)
  486. {
  487. __fd_install(current->files, fd, file);
  488. }
  489. EXPORT_SYMBOL(fd_install);
  490. /*
  491. * The same warnings as for __alloc_fd()/__fd_install() apply here...
  492. */
  493. int __close_fd(struct files_struct *files, unsigned fd)
  494. {
  495. struct file *file;
  496. struct fdtable *fdt;
  497. spin_lock(&files->file_lock);
  498. fdt = files_fdtable(files);
  499. if (fd >= fdt->max_fds)
  500. goto out_unlock;
  501. file = fdt->fd[fd];
  502. if (!file)
  503. goto out_unlock;
  504. rcu_assign_pointer(fdt->fd[fd], NULL);
  505. __clear_close_on_exec(fd, fdt);
  506. __put_unused_fd(files, fd);
  507. spin_unlock(&files->file_lock);
  508. return filp_close(file, files);
  509. out_unlock:
  510. spin_unlock(&files->file_lock);
  511. return -EBADF;
  512. }
  513. void do_close_on_exec(struct files_struct *files)
  514. {
  515. unsigned i;
  516. struct fdtable *fdt;
  517. /* exec unshares first */
  518. spin_lock(&files->file_lock);
  519. for (i = 0; ; i++) {
  520. unsigned long set;
  521. unsigned fd = i * BITS_PER_LONG;
  522. fdt = files_fdtable(files);
  523. if (fd >= fdt->max_fds)
  524. break;
  525. set = fdt->close_on_exec[i];
  526. if (!set)
  527. continue;
  528. fdt->close_on_exec[i] = 0;
  529. for ( ; set ; fd++, set >>= 1) {
  530. struct file *file;
  531. if (!(set & 1))
  532. continue;
  533. file = fdt->fd[fd];
  534. if (!file)
  535. continue;
  536. rcu_assign_pointer(fdt->fd[fd], NULL);
  537. __put_unused_fd(files, fd);
  538. spin_unlock(&files->file_lock);
  539. filp_close(file, files);
  540. cond_resched();
  541. spin_lock(&files->file_lock);
  542. }
  543. }
  544. spin_unlock(&files->file_lock);
  545. }
  546. static struct file *__fget(unsigned int fd, fmode_t mask)
  547. {
  548. struct files_struct *files = current->files;
  549. struct file *file;
  550. rcu_read_lock();
  551. file = fcheck_files(files, fd);
  552. if (file) {
  553. /* File object ref couldn't be taken */
  554. if ((file->f_mode & mask) ||
  555. !atomic_long_inc_not_zero(&file->f_count))
  556. file = NULL;
  557. }
  558. rcu_read_unlock();
  559. return file;
  560. }
  561. struct file *fget(unsigned int fd)
  562. {
  563. return __fget(fd, FMODE_PATH);
  564. }
  565. EXPORT_SYMBOL(fget);
  566. struct file *fget_raw(unsigned int fd)
  567. {
  568. return __fget(fd, 0);
  569. }
  570. EXPORT_SYMBOL(fget_raw);
  571. /*
  572. * Lightweight file lookup - no refcnt increment if fd table isn't shared.
  573. *
  574. * You can use this instead of fget if you satisfy all of the following
  575. * conditions:
  576. * 1) You must call fput_light before exiting the syscall and returning control
  577. * to userspace (i.e. you cannot remember the returned struct file * after
  578. * returning to userspace).
  579. * 2) You must not call filp_close on the returned struct file * in between
  580. * calls to fget_light and fput_light.
  581. * 3) You must not clone the current task in between the calls to fget_light
  582. * and fput_light.
  583. *
  584. * The fput_needed flag returned by fget_light should be passed to the
  585. * corresponding fput_light.
  586. */
  587. static unsigned long __fget_light(unsigned int fd, fmode_t mask)
  588. {
  589. struct files_struct *files = current->files;
  590. struct file *file;
  591. if (atomic_read(&files->count) == 1) {
  592. file = __fcheck_files(files, fd);
  593. if (!file || unlikely(file->f_mode & mask))
  594. return 0;
  595. return (unsigned long)file;
  596. } else {
  597. file = __fget(fd, mask);
  598. if (!file)
  599. return 0;
  600. return FDPUT_FPUT | (unsigned long)file;
  601. }
  602. }
  603. unsigned long __fdget(unsigned int fd)
  604. {
  605. return __fget_light(fd, FMODE_PATH);
  606. }
  607. EXPORT_SYMBOL(__fdget);
  608. unsigned long __fdget_raw(unsigned int fd)
  609. {
  610. return __fget_light(fd, 0);
  611. }
  612. unsigned long __fdget_pos(unsigned int fd)
  613. {
  614. unsigned long v = __fdget(fd);
  615. struct file *file = (struct file *)(v & ~3);
  616. if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
  617. if (file_count(file) > 1) {
  618. v |= FDPUT_POS_UNLOCK;
  619. mutex_lock(&file->f_pos_lock);
  620. }
  621. }
  622. return v;
  623. }
  624. /*
  625. * We only lock f_pos if we have threads or if the file might be
  626. * shared with another process. In both cases we'll have an elevated
  627. * file count (done either by fdget() or by fork()).
  628. */
  629. void set_close_on_exec(unsigned int fd, int flag)
  630. {
  631. struct files_struct *files = current->files;
  632. struct fdtable *fdt;
  633. spin_lock(&files->file_lock);
  634. fdt = files_fdtable(files);
  635. if (flag)
  636. __set_close_on_exec(fd, fdt);
  637. else
  638. __clear_close_on_exec(fd, fdt);
  639. spin_unlock(&files->file_lock);
  640. }
  641. bool get_close_on_exec(unsigned int fd)
  642. {
  643. struct files_struct *files = current->files;
  644. struct fdtable *fdt;
  645. bool res;
  646. rcu_read_lock();
  647. fdt = files_fdtable(files);
  648. res = close_on_exec(fd, fdt);
  649. rcu_read_unlock();
  650. return res;
  651. }
  652. static int do_dup2(struct files_struct *files,
  653. struct file *file, unsigned fd, unsigned flags)
  654. __releases(&files->file_lock)
  655. {
  656. struct file *tofree;
  657. struct fdtable *fdt;
  658. /*
  659. * We need to detect attempts to do dup2() over allocated but still
  660. * not finished descriptor. NB: OpenBSD avoids that at the price of
  661. * extra work in their equivalent of fget() - they insert struct
  662. * file immediately after grabbing descriptor, mark it larval if
  663. * more work (e.g. actual opening) is needed and make sure that
  664. * fget() treats larval files as absent. Potentially interesting,
  665. * but while extra work in fget() is trivial, locking implications
  666. * and amount of surgery on open()-related paths in VFS are not.
  667. * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
  668. * deadlocks in rather amusing ways, AFAICS. All of that is out of
  669. * scope of POSIX or SUS, since neither considers shared descriptor
  670. * tables and this condition does not arise without those.
  671. */
  672. fdt = files_fdtable(files);
  673. tofree = fdt->fd[fd];
  674. if (!tofree && fd_is_open(fd, fdt))
  675. goto Ebusy;
  676. get_file(file);
  677. rcu_assign_pointer(fdt->fd[fd], file);
  678. __set_open_fd(fd, fdt);
  679. if (flags & O_CLOEXEC)
  680. __set_close_on_exec(fd, fdt);
  681. else
  682. __clear_close_on_exec(fd, fdt);
  683. spin_unlock(&files->file_lock);
  684. if (tofree)
  685. filp_close(tofree, files);
  686. return fd;
  687. Ebusy:
  688. spin_unlock(&files->file_lock);
  689. return -EBUSY;
  690. }
  691. int replace_fd(unsigned fd, struct file *file, unsigned flags)
  692. {
  693. int err;
  694. struct files_struct *files = current->files;
  695. if (!file)
  696. return __close_fd(files, fd);
  697. if (fd >= rlimit(RLIMIT_NOFILE))
  698. return -EBADF;
  699. spin_lock(&files->file_lock);
  700. err = expand_files(files, fd);
  701. if (unlikely(err < 0))
  702. goto out_unlock;
  703. return do_dup2(files, file, fd, flags);
  704. out_unlock:
  705. spin_unlock(&files->file_lock);
  706. return err;
  707. }
  708. SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
  709. {
  710. int err = -EBADF;
  711. struct file *file;
  712. struct files_struct *files = current->files;
  713. if ((flags & ~O_CLOEXEC) != 0)
  714. return -EINVAL;
  715. if (unlikely(oldfd == newfd))
  716. return -EINVAL;
  717. if (newfd >= rlimit(RLIMIT_NOFILE))
  718. return -EBADF;
  719. spin_lock(&files->file_lock);
  720. err = expand_files(files, newfd);
  721. file = fcheck(oldfd);
  722. if (unlikely(!file))
  723. goto Ebadf;
  724. if (unlikely(err < 0)) {
  725. if (err == -EMFILE)
  726. goto Ebadf;
  727. goto out_unlock;
  728. }
  729. return do_dup2(files, file, newfd, flags);
  730. Ebadf:
  731. err = -EBADF;
  732. out_unlock:
  733. spin_unlock(&files->file_lock);
  734. return err;
  735. }
  736. SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
  737. {
  738. if (unlikely(newfd == oldfd)) { /* corner case */
  739. struct files_struct *files = current->files;
  740. int retval = oldfd;
  741. rcu_read_lock();
  742. if (!fcheck_files(files, oldfd))
  743. retval = -EBADF;
  744. rcu_read_unlock();
  745. return retval;
  746. }
  747. return sys_dup3(oldfd, newfd, 0);
  748. }
  749. SYSCALL_DEFINE1(dup, unsigned int, fildes)
  750. {
  751. int ret = -EBADF;
  752. struct file *file = fget_raw(fildes);
  753. if (file) {
  754. ret = get_unused_fd_flags(0);
  755. if (ret >= 0)
  756. fd_install(ret, file);
  757. else
  758. fput(file);
  759. }
  760. return ret;
  761. }
  762. int f_dupfd(unsigned int from, struct file *file, unsigned flags)
  763. {
  764. int err;
  765. if (from >= rlimit(RLIMIT_NOFILE))
  766. return -EINVAL;
  767. err = alloc_fd(from, flags);
  768. if (err >= 0) {
  769. get_file(file);
  770. fd_install(err, file);
  771. }
  772. return err;
  773. }
  774. int iterate_fd(struct files_struct *files, unsigned n,
  775. int (*f)(const void *, struct file *, unsigned),
  776. const void *p)
  777. {
  778. struct fdtable *fdt;
  779. int res = 0;
  780. if (!files)
  781. return 0;
  782. spin_lock(&files->file_lock);
  783. for (fdt = files_fdtable(files); n < fdt->max_fds; n++) {
  784. struct file *file;
  785. file = rcu_dereference_check_fdtable(files, fdt->fd[n]);
  786. if (!file)
  787. continue;
  788. res = f(p, file, n);
  789. if (res)
  790. break;
  791. }
  792. spin_unlock(&files->file_lock);
  793. return res;
  794. }
  795. EXPORT_SYMBOL(iterate_fd);