stat.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * linux/fs/stat.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. #include <linux/export.h>
  7. #include <linux/mm.h>
  8. #include <linux/errno.h>
  9. #include <linux/file.h>
  10. #include <linux/highuid.h>
  11. #include <linux/fs.h>
  12. #include <linux/namei.h>
  13. #include <linux/security.h>
  14. #include <linux/cred.h>
  15. #include <linux/syscalls.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/compat.h>
  18. #include <linux/uaccess.h>
  19. #include <asm/unistd.h>
  20. /**
  21. * generic_fillattr - Fill in the basic attributes from the inode struct
  22. * @inode: Inode to use as the source
  23. * @stat: Where to fill in the attributes
  24. *
  25. * Fill in the basic attributes in the kstat structure from data that's to be
  26. * found on the VFS inode structure. This is the default if no getattr inode
  27. * operation is supplied.
  28. */
  29. void generic_fillattr(struct inode *inode, struct kstat *stat)
  30. {
  31. stat->dev = inode->i_sb->s_dev;
  32. stat->ino = inode->i_ino;
  33. stat->mode = inode->i_mode;
  34. stat->nlink = inode->i_nlink;
  35. stat->uid = inode->i_uid;
  36. stat->gid = inode->i_gid;
  37. stat->rdev = inode->i_rdev;
  38. stat->size = i_size_read(inode);
  39. stat->atime = inode->i_atime;
  40. stat->mtime = inode->i_mtime;
  41. stat->ctime = inode->i_ctime;
  42. stat->blksize = i_blocksize(inode);
  43. stat->blocks = inode->i_blocks;
  44. if (IS_NOATIME(inode))
  45. stat->result_mask &= ~STATX_ATIME;
  46. if (IS_AUTOMOUNT(inode))
  47. stat->attributes |= STATX_ATTR_AUTOMOUNT;
  48. }
  49. EXPORT_SYMBOL(generic_fillattr);
  50. /**
  51. * vfs_getattr_nosec - getattr without security checks
  52. * @path: file to get attributes from
  53. * @stat: structure to return attributes in
  54. * @request_mask: STATX_xxx flags indicating what the caller wants
  55. * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
  56. *
  57. * Get attributes without calling security_inode_getattr.
  58. *
  59. * Currently the only caller other than vfs_getattr is internal to the
  60. * filehandle lookup code, which uses only the inode number and returns no
  61. * attributes to any user. Any other code probably wants vfs_getattr.
  62. */
  63. int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
  64. u32 request_mask, unsigned int query_flags)
  65. {
  66. struct inode *inode = d_backing_inode(path->dentry);
  67. memset(stat, 0, sizeof(*stat));
  68. stat->result_mask |= STATX_BASIC_STATS;
  69. request_mask &= STATX_ALL;
  70. query_flags &= KSTAT_QUERY_FLAGS;
  71. if (inode->i_op->getattr)
  72. return inode->i_op->getattr(path, stat, request_mask,
  73. query_flags);
  74. generic_fillattr(inode, stat);
  75. return 0;
  76. }
  77. EXPORT_SYMBOL(vfs_getattr_nosec);
  78. /*
  79. * vfs_getattr - Get the enhanced basic attributes of a file
  80. * @path: The file of interest
  81. * @stat: Where to return the statistics
  82. * @request_mask: STATX_xxx flags indicating what the caller wants
  83. * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
  84. *
  85. * Ask the filesystem for a file's attributes. The caller must indicate in
  86. * request_mask and query_flags to indicate what they want.
  87. *
  88. * If the file is remote, the filesystem can be forced to update the attributes
  89. * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
  90. * suppress the update by passing AT_STATX_DONT_SYNC.
  91. *
  92. * Bits must have been set in request_mask to indicate which attributes the
  93. * caller wants retrieving. Any such attribute not requested may be returned
  94. * anyway, but the value may be approximate, and, if remote, may not have been
  95. * synchronised with the server.
  96. *
  97. * 0 will be returned on success, and a -ve error code if unsuccessful.
  98. */
  99. int vfs_getattr(const struct path *path, struct kstat *stat,
  100. u32 request_mask, unsigned int query_flags)
  101. {
  102. int retval;
  103. retval = security_inode_getattr(path);
  104. if (retval)
  105. return retval;
  106. return vfs_getattr_nosec(path, stat, request_mask, query_flags);
  107. }
  108. EXPORT_SYMBOL(vfs_getattr);
  109. /**
  110. * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
  111. * @fd: The file descriptor referring to the file of interest
  112. * @stat: The result structure to fill in.
  113. * @request_mask: STATX_xxx flags indicating what the caller wants
  114. * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
  115. *
  116. * This function is a wrapper around vfs_getattr(). The main difference is
  117. * that it uses a file descriptor to determine the file location.
  118. *
  119. * 0 will be returned on success, and a -ve error code if unsuccessful.
  120. */
  121. int vfs_statx_fd(unsigned int fd, struct kstat *stat,
  122. u32 request_mask, unsigned int query_flags)
  123. {
  124. struct fd f;
  125. int error = -EBADF;
  126. if (query_flags & ~KSTAT_QUERY_FLAGS)
  127. return -EINVAL;
  128. f = fdget_raw(fd);
  129. if (f.file) {
  130. error = vfs_getattr(&f.file->f_path, stat,
  131. request_mask, query_flags);
  132. fdput(f);
  133. }
  134. return error;
  135. }
  136. EXPORT_SYMBOL(vfs_statx_fd);
  137. /**
  138. * vfs_statx - Get basic and extra attributes by filename
  139. * @dfd: A file descriptor representing the base dir for a relative filename
  140. * @filename: The name of the file of interest
  141. * @flags: Flags to control the query
  142. * @stat: The result structure to fill in.
  143. * @request_mask: STATX_xxx flags indicating what the caller wants
  144. *
  145. * This function is a wrapper around vfs_getattr(). The main difference is
  146. * that it uses a filename and base directory to determine the file location.
  147. * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
  148. * at the given name from being referenced.
  149. *
  150. * 0 will be returned on success, and a -ve error code if unsuccessful.
  151. */
  152. int vfs_statx(int dfd, const char __user *filename, int flags,
  153. struct kstat *stat, u32 request_mask)
  154. {
  155. struct path path;
  156. int error = -EINVAL;
  157. unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
  158. if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
  159. AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
  160. return -EINVAL;
  161. if (flags & AT_SYMLINK_NOFOLLOW)
  162. lookup_flags &= ~LOOKUP_FOLLOW;
  163. if (flags & AT_NO_AUTOMOUNT)
  164. lookup_flags &= ~LOOKUP_AUTOMOUNT;
  165. if (flags & AT_EMPTY_PATH)
  166. lookup_flags |= LOOKUP_EMPTY;
  167. retry:
  168. error = user_path_at(dfd, filename, lookup_flags, &path);
  169. if (error)
  170. goto out;
  171. error = vfs_getattr(&path, stat, request_mask, flags);
  172. path_put(&path);
  173. if (retry_estale(error, lookup_flags)) {
  174. lookup_flags |= LOOKUP_REVAL;
  175. goto retry;
  176. }
  177. out:
  178. return error;
  179. }
  180. EXPORT_SYMBOL(vfs_statx);
  181. #ifdef __ARCH_WANT_OLD_STAT
  182. /*
  183. * For backward compatibility? Maybe this should be moved
  184. * into arch/i386 instead?
  185. */
  186. static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
  187. {
  188. static int warncount = 5;
  189. struct __old_kernel_stat tmp;
  190. if (warncount > 0) {
  191. warncount--;
  192. printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
  193. current->comm);
  194. } else if (warncount < 0) {
  195. /* it's laughable, but... */
  196. warncount = 0;
  197. }
  198. memset(&tmp, 0, sizeof(struct __old_kernel_stat));
  199. tmp.st_dev = old_encode_dev(stat->dev);
  200. tmp.st_ino = stat->ino;
  201. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  202. return -EOVERFLOW;
  203. tmp.st_mode = stat->mode;
  204. tmp.st_nlink = stat->nlink;
  205. if (tmp.st_nlink != stat->nlink)
  206. return -EOVERFLOW;
  207. SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
  208. SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
  209. tmp.st_rdev = old_encode_dev(stat->rdev);
  210. #if BITS_PER_LONG == 32
  211. if (stat->size > MAX_NON_LFS)
  212. return -EOVERFLOW;
  213. #endif
  214. tmp.st_size = stat->size;
  215. tmp.st_atime = stat->atime.tv_sec;
  216. tmp.st_mtime = stat->mtime.tv_sec;
  217. tmp.st_ctime = stat->ctime.tv_sec;
  218. return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
  219. }
  220. SYSCALL_DEFINE2(stat, const char __user *, filename,
  221. struct __old_kernel_stat __user *, statbuf)
  222. {
  223. struct kstat stat;
  224. int error;
  225. error = vfs_stat(filename, &stat);
  226. if (error)
  227. return error;
  228. return cp_old_stat(&stat, statbuf);
  229. }
  230. SYSCALL_DEFINE2(lstat, const char __user *, filename,
  231. struct __old_kernel_stat __user *, statbuf)
  232. {
  233. struct kstat stat;
  234. int error;
  235. error = vfs_lstat(filename, &stat);
  236. if (error)
  237. return error;
  238. return cp_old_stat(&stat, statbuf);
  239. }
  240. SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
  241. {
  242. struct kstat stat;
  243. int error = vfs_fstat(fd, &stat);
  244. if (!error)
  245. error = cp_old_stat(&stat, statbuf);
  246. return error;
  247. }
  248. #endif /* __ARCH_WANT_OLD_STAT */
  249. #if BITS_PER_LONG == 32
  250. # define choose_32_64(a,b) a
  251. #else
  252. # define choose_32_64(a,b) b
  253. #endif
  254. #define valid_dev(x) choose_32_64(old_valid_dev(x),true)
  255. #define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
  256. #ifndef INIT_STRUCT_STAT_PADDING
  257. # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
  258. #endif
  259. static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
  260. {
  261. struct stat tmp;
  262. if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
  263. return -EOVERFLOW;
  264. #if BITS_PER_LONG == 32
  265. if (stat->size > MAX_NON_LFS)
  266. return -EOVERFLOW;
  267. #endif
  268. INIT_STRUCT_STAT_PADDING(tmp);
  269. tmp.st_dev = encode_dev(stat->dev);
  270. tmp.st_ino = stat->ino;
  271. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  272. return -EOVERFLOW;
  273. tmp.st_mode = stat->mode;
  274. tmp.st_nlink = stat->nlink;
  275. if (tmp.st_nlink != stat->nlink)
  276. return -EOVERFLOW;
  277. SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
  278. SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
  279. tmp.st_rdev = encode_dev(stat->rdev);
  280. tmp.st_size = stat->size;
  281. tmp.st_atime = stat->atime.tv_sec;
  282. tmp.st_mtime = stat->mtime.tv_sec;
  283. tmp.st_ctime = stat->ctime.tv_sec;
  284. #ifdef STAT_HAVE_NSEC
  285. tmp.st_atime_nsec = stat->atime.tv_nsec;
  286. tmp.st_mtime_nsec = stat->mtime.tv_nsec;
  287. tmp.st_ctime_nsec = stat->ctime.tv_nsec;
  288. #endif
  289. tmp.st_blocks = stat->blocks;
  290. tmp.st_blksize = stat->blksize;
  291. return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
  292. }
  293. SYSCALL_DEFINE2(newstat, const char __user *, filename,
  294. struct stat __user *, statbuf)
  295. {
  296. struct kstat stat;
  297. int error = vfs_stat(filename, &stat);
  298. if (error)
  299. return error;
  300. return cp_new_stat(&stat, statbuf);
  301. }
  302. SYSCALL_DEFINE2(newlstat, const char __user *, filename,
  303. struct stat __user *, statbuf)
  304. {
  305. struct kstat stat;
  306. int error;
  307. error = vfs_lstat(filename, &stat);
  308. if (error)
  309. return error;
  310. return cp_new_stat(&stat, statbuf);
  311. }
  312. #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
  313. SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
  314. struct stat __user *, statbuf, int, flag)
  315. {
  316. struct kstat stat;
  317. int error;
  318. error = vfs_fstatat(dfd, filename, &stat, flag);
  319. if (error)
  320. return error;
  321. return cp_new_stat(&stat, statbuf);
  322. }
  323. #endif
  324. SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
  325. {
  326. struct kstat stat;
  327. int error = vfs_fstat(fd, &stat);
  328. if (!error)
  329. error = cp_new_stat(&stat, statbuf);
  330. return error;
  331. }
  332. SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
  333. char __user *, buf, int, bufsiz)
  334. {
  335. struct path path;
  336. int error;
  337. int empty = 0;
  338. unsigned int lookup_flags = LOOKUP_EMPTY;
  339. if (bufsiz <= 0)
  340. return -EINVAL;
  341. retry:
  342. error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
  343. if (!error) {
  344. struct inode *inode = d_backing_inode(path.dentry);
  345. error = empty ? -ENOENT : -EINVAL;
  346. /*
  347. * AFS mountpoints allow readlink(2) but are not symlinks
  348. */
  349. if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
  350. error = security_inode_readlink(path.dentry);
  351. if (!error) {
  352. touch_atime(&path);
  353. error = vfs_readlink(path.dentry, buf, bufsiz);
  354. }
  355. }
  356. path_put(&path);
  357. if (retry_estale(error, lookup_flags)) {
  358. lookup_flags |= LOOKUP_REVAL;
  359. goto retry;
  360. }
  361. }
  362. return error;
  363. }
  364. SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
  365. int, bufsiz)
  366. {
  367. return sys_readlinkat(AT_FDCWD, path, buf, bufsiz);
  368. }
  369. /* ---------- LFS-64 ----------- */
  370. #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
  371. #ifndef INIT_STRUCT_STAT64_PADDING
  372. # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
  373. #endif
  374. static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
  375. {
  376. struct stat64 tmp;
  377. INIT_STRUCT_STAT64_PADDING(tmp);
  378. #ifdef CONFIG_MIPS
  379. /* mips has weird padding, so we don't get 64 bits there */
  380. tmp.st_dev = new_encode_dev(stat->dev);
  381. tmp.st_rdev = new_encode_dev(stat->rdev);
  382. #else
  383. tmp.st_dev = huge_encode_dev(stat->dev);
  384. tmp.st_rdev = huge_encode_dev(stat->rdev);
  385. #endif
  386. tmp.st_ino = stat->ino;
  387. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  388. return -EOVERFLOW;
  389. #ifdef STAT64_HAS_BROKEN_ST_INO
  390. tmp.__st_ino = stat->ino;
  391. #endif
  392. tmp.st_mode = stat->mode;
  393. tmp.st_nlink = stat->nlink;
  394. tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
  395. tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
  396. tmp.st_atime = stat->atime.tv_sec;
  397. tmp.st_atime_nsec = stat->atime.tv_nsec;
  398. tmp.st_mtime = stat->mtime.tv_sec;
  399. tmp.st_mtime_nsec = stat->mtime.tv_nsec;
  400. tmp.st_ctime = stat->ctime.tv_sec;
  401. tmp.st_ctime_nsec = stat->ctime.tv_nsec;
  402. tmp.st_size = stat->size;
  403. tmp.st_blocks = stat->blocks;
  404. tmp.st_blksize = stat->blksize;
  405. return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
  406. }
  407. SYSCALL_DEFINE2(stat64, const char __user *, filename,
  408. struct stat64 __user *, statbuf)
  409. {
  410. struct kstat stat;
  411. int error = vfs_stat(filename, &stat);
  412. if (!error)
  413. error = cp_new_stat64(&stat, statbuf);
  414. return error;
  415. }
  416. SYSCALL_DEFINE2(lstat64, const char __user *, filename,
  417. struct stat64 __user *, statbuf)
  418. {
  419. struct kstat stat;
  420. int error = vfs_lstat(filename, &stat);
  421. if (!error)
  422. error = cp_new_stat64(&stat, statbuf);
  423. return error;
  424. }
  425. SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
  426. {
  427. struct kstat stat;
  428. int error = vfs_fstat(fd, &stat);
  429. if (!error)
  430. error = cp_new_stat64(&stat, statbuf);
  431. return error;
  432. }
  433. SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
  434. struct stat64 __user *, statbuf, int, flag)
  435. {
  436. struct kstat stat;
  437. int error;
  438. error = vfs_fstatat(dfd, filename, &stat, flag);
  439. if (error)
  440. return error;
  441. return cp_new_stat64(&stat, statbuf);
  442. }
  443. #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
  444. static noinline_for_stack int
  445. cp_statx(const struct kstat *stat, struct statx __user *buffer)
  446. {
  447. struct statx tmp;
  448. memset(&tmp, 0, sizeof(tmp));
  449. tmp.stx_mask = stat->result_mask;
  450. tmp.stx_blksize = stat->blksize;
  451. tmp.stx_attributes = stat->attributes;
  452. tmp.stx_nlink = stat->nlink;
  453. tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
  454. tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
  455. tmp.stx_mode = stat->mode;
  456. tmp.stx_ino = stat->ino;
  457. tmp.stx_size = stat->size;
  458. tmp.stx_blocks = stat->blocks;
  459. tmp.stx_attributes_mask = stat->attributes_mask;
  460. tmp.stx_atime.tv_sec = stat->atime.tv_sec;
  461. tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
  462. tmp.stx_btime.tv_sec = stat->btime.tv_sec;
  463. tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
  464. tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
  465. tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
  466. tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
  467. tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
  468. tmp.stx_rdev_major = MAJOR(stat->rdev);
  469. tmp.stx_rdev_minor = MINOR(stat->rdev);
  470. tmp.stx_dev_major = MAJOR(stat->dev);
  471. tmp.stx_dev_minor = MINOR(stat->dev);
  472. return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
  473. }
  474. /**
  475. * sys_statx - System call to get enhanced stats
  476. * @dfd: Base directory to pathwalk from *or* fd to stat.
  477. * @filename: File to stat or "" with AT_EMPTY_PATH
  478. * @flags: AT_* flags to control pathwalk.
  479. * @mask: Parts of statx struct actually required.
  480. * @buffer: Result buffer.
  481. *
  482. * Note that fstat() can be emulated by setting dfd to the fd of interest,
  483. * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
  484. */
  485. SYSCALL_DEFINE5(statx,
  486. int, dfd, const char __user *, filename, unsigned, flags,
  487. unsigned int, mask,
  488. struct statx __user *, buffer)
  489. {
  490. struct kstat stat;
  491. int error;
  492. if (mask & STATX__RESERVED)
  493. return -EINVAL;
  494. if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
  495. return -EINVAL;
  496. error = vfs_statx(dfd, filename, flags, &stat, mask);
  497. if (error)
  498. return error;
  499. return cp_statx(&stat, buffer);
  500. }
  501. #ifdef CONFIG_COMPAT
  502. static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
  503. {
  504. struct compat_stat tmp;
  505. if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
  506. return -EOVERFLOW;
  507. memset(&tmp, 0, sizeof(tmp));
  508. tmp.st_dev = old_encode_dev(stat->dev);
  509. tmp.st_ino = stat->ino;
  510. if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
  511. return -EOVERFLOW;
  512. tmp.st_mode = stat->mode;
  513. tmp.st_nlink = stat->nlink;
  514. if (tmp.st_nlink != stat->nlink)
  515. return -EOVERFLOW;
  516. SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
  517. SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
  518. tmp.st_rdev = old_encode_dev(stat->rdev);
  519. if ((u64) stat->size > MAX_NON_LFS)
  520. return -EOVERFLOW;
  521. tmp.st_size = stat->size;
  522. tmp.st_atime = stat->atime.tv_sec;
  523. tmp.st_atime_nsec = stat->atime.tv_nsec;
  524. tmp.st_mtime = stat->mtime.tv_sec;
  525. tmp.st_mtime_nsec = stat->mtime.tv_nsec;
  526. tmp.st_ctime = stat->ctime.tv_sec;
  527. tmp.st_ctime_nsec = stat->ctime.tv_nsec;
  528. tmp.st_blocks = stat->blocks;
  529. tmp.st_blksize = stat->blksize;
  530. return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
  531. }
  532. COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
  533. struct compat_stat __user *, statbuf)
  534. {
  535. struct kstat stat;
  536. int error;
  537. error = vfs_stat(filename, &stat);
  538. if (error)
  539. return error;
  540. return cp_compat_stat(&stat, statbuf);
  541. }
  542. COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
  543. struct compat_stat __user *, statbuf)
  544. {
  545. struct kstat stat;
  546. int error;
  547. error = vfs_lstat(filename, &stat);
  548. if (error)
  549. return error;
  550. return cp_compat_stat(&stat, statbuf);
  551. }
  552. #ifndef __ARCH_WANT_STAT64
  553. COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
  554. const char __user *, filename,
  555. struct compat_stat __user *, statbuf, int, flag)
  556. {
  557. struct kstat stat;
  558. int error;
  559. error = vfs_fstatat(dfd, filename, &stat, flag);
  560. if (error)
  561. return error;
  562. return cp_compat_stat(&stat, statbuf);
  563. }
  564. #endif
  565. COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
  566. struct compat_stat __user *, statbuf)
  567. {
  568. struct kstat stat;
  569. int error = vfs_fstat(fd, &stat);
  570. if (!error)
  571. error = cp_compat_stat(&stat, statbuf);
  572. return error;
  573. }
  574. #endif
  575. /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
  576. void __inode_add_bytes(struct inode *inode, loff_t bytes)
  577. {
  578. inode->i_blocks += bytes >> 9;
  579. bytes &= 511;
  580. inode->i_bytes += bytes;
  581. if (inode->i_bytes >= 512) {
  582. inode->i_blocks++;
  583. inode->i_bytes -= 512;
  584. }
  585. }
  586. EXPORT_SYMBOL(__inode_add_bytes);
  587. void inode_add_bytes(struct inode *inode, loff_t bytes)
  588. {
  589. spin_lock(&inode->i_lock);
  590. __inode_add_bytes(inode, bytes);
  591. spin_unlock(&inode->i_lock);
  592. }
  593. EXPORT_SYMBOL(inode_add_bytes);
  594. void __inode_sub_bytes(struct inode *inode, loff_t bytes)
  595. {
  596. inode->i_blocks -= bytes >> 9;
  597. bytes &= 511;
  598. if (inode->i_bytes < bytes) {
  599. inode->i_blocks--;
  600. inode->i_bytes += 512;
  601. }
  602. inode->i_bytes -= bytes;
  603. }
  604. EXPORT_SYMBOL(__inode_sub_bytes);
  605. void inode_sub_bytes(struct inode *inode, loff_t bytes)
  606. {
  607. spin_lock(&inode->i_lock);
  608. __inode_sub_bytes(inode, bytes);
  609. spin_unlock(&inode->i_lock);
  610. }
  611. EXPORT_SYMBOL(inode_sub_bytes);
  612. loff_t inode_get_bytes(struct inode *inode)
  613. {
  614. loff_t ret;
  615. spin_lock(&inode->i_lock);
  616. ret = __inode_get_bytes(inode);
  617. spin_unlock(&inode->i_lock);
  618. return ret;
  619. }
  620. EXPORT_SYMBOL(inode_get_bytes);
  621. void inode_set_bytes(struct inode *inode, loff_t bytes)
  622. {
  623. /* Caller is here responsible for sufficient locking
  624. * (ie. inode->i_lock) */
  625. inode->i_blocks = bytes >> 9;
  626. inode->i_bytes = bytes & 511;
  627. }
  628. EXPORT_SYMBOL(inode_set_bytes);