file.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * linux/fs/ext2/file.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/file.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext2 fs regular file handling primitives
  16. *
  17. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  18. * (jj@sunsite.ms.mff.cuni.cz)
  19. */
  20. #include <linux/time.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/dax.h>
  23. #include <linux/quotaops.h>
  24. #include <linux/iomap.h>
  25. #include <linux/uio.h>
  26. #include "ext2.h"
  27. #include "xattr.h"
  28. #include "acl.h"
  29. #ifdef CONFIG_FS_DAX
  30. static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  31. {
  32. struct inode *inode = iocb->ki_filp->f_mapping->host;
  33. ssize_t ret;
  34. if (!iov_iter_count(to))
  35. return 0; /* skip atime */
  36. inode_lock_shared(inode);
  37. ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
  38. inode_unlock_shared(inode);
  39. file_accessed(iocb->ki_filp);
  40. return ret;
  41. }
  42. static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
  43. {
  44. struct file *file = iocb->ki_filp;
  45. struct inode *inode = file->f_mapping->host;
  46. ssize_t ret;
  47. inode_lock(inode);
  48. ret = generic_write_checks(iocb, from);
  49. if (ret <= 0)
  50. goto out_unlock;
  51. ret = file_remove_privs(file);
  52. if (ret)
  53. goto out_unlock;
  54. ret = file_update_time(file);
  55. if (ret)
  56. goto out_unlock;
  57. ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
  58. if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
  59. i_size_write(inode, iocb->ki_pos);
  60. mark_inode_dirty(inode);
  61. }
  62. out_unlock:
  63. inode_unlock(inode);
  64. if (ret > 0)
  65. ret = generic_write_sync(iocb, ret);
  66. return ret;
  67. }
  68. /*
  69. * The lock ordering for ext2 DAX fault paths is:
  70. *
  71. * mmap_sem (MM)
  72. * sb_start_pagefault (vfs, freeze)
  73. * ext2_inode_info->dax_sem
  74. * address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
  75. * ext2_inode_info->truncate_mutex
  76. *
  77. * The default page_lock and i_size verification done by non-DAX fault paths
  78. * is sufficient because ext2 doesn't support hole punching.
  79. */
  80. static int ext2_dax_fault(struct vm_fault *vmf)
  81. {
  82. struct inode *inode = file_inode(vmf->vma->vm_file);
  83. struct ext2_inode_info *ei = EXT2_I(inode);
  84. int ret;
  85. if (vmf->flags & FAULT_FLAG_WRITE) {
  86. sb_start_pagefault(inode->i_sb);
  87. file_update_time(vmf->vma->vm_file);
  88. }
  89. down_read(&ei->dax_sem);
  90. ret = dax_iomap_fault(vmf, &ext2_iomap_ops);
  91. up_read(&ei->dax_sem);
  92. if (vmf->flags & FAULT_FLAG_WRITE)
  93. sb_end_pagefault(inode->i_sb);
  94. return ret;
  95. }
  96. static int ext2_dax_pfn_mkwrite(struct vm_fault *vmf)
  97. {
  98. struct inode *inode = file_inode(vmf->vma->vm_file);
  99. struct ext2_inode_info *ei = EXT2_I(inode);
  100. loff_t size;
  101. int ret;
  102. sb_start_pagefault(inode->i_sb);
  103. file_update_time(vmf->vma->vm_file);
  104. down_read(&ei->dax_sem);
  105. /* check that the faulting page hasn't raced with truncate */
  106. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  107. if (vmf->pgoff >= size)
  108. ret = VM_FAULT_SIGBUS;
  109. else
  110. ret = dax_pfn_mkwrite(vmf);
  111. up_read(&ei->dax_sem);
  112. sb_end_pagefault(inode->i_sb);
  113. return ret;
  114. }
  115. static const struct vm_operations_struct ext2_dax_vm_ops = {
  116. .fault = ext2_dax_fault,
  117. /*
  118. * .pmd_fault is not supported for DAX because allocation in ext2
  119. * cannot be reliably aligned to huge page sizes and so pmd faults
  120. * will always fail and fail back to regular faults.
  121. */
  122. .page_mkwrite = ext2_dax_fault,
  123. .pfn_mkwrite = ext2_dax_pfn_mkwrite,
  124. };
  125. static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
  126. {
  127. if (!IS_DAX(file_inode(file)))
  128. return generic_file_mmap(file, vma);
  129. file_accessed(file);
  130. vma->vm_ops = &ext2_dax_vm_ops;
  131. vma->vm_flags |= VM_MIXEDMAP;
  132. return 0;
  133. }
  134. #else
  135. #define ext2_file_mmap generic_file_mmap
  136. #endif
  137. /*
  138. * Called when filp is released. This happens when all file descriptors
  139. * for a single struct file are closed. Note that different open() calls
  140. * for the same file yield different struct file structures.
  141. */
  142. static int ext2_release_file (struct inode * inode, struct file * filp)
  143. {
  144. if (filp->f_mode & FMODE_WRITE) {
  145. mutex_lock(&EXT2_I(inode)->truncate_mutex);
  146. ext2_discard_reservation(inode);
  147. mutex_unlock(&EXT2_I(inode)->truncate_mutex);
  148. }
  149. return 0;
  150. }
  151. int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  152. {
  153. int ret;
  154. struct super_block *sb = file->f_mapping->host->i_sb;
  155. struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
  156. ret = generic_file_fsync(file, start, end, datasync);
  157. if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
  158. /* We don't really know where the IO error happened... */
  159. ext2_error(sb, __func__,
  160. "detected IO error when writing metadata buffers");
  161. ret = -EIO;
  162. }
  163. return ret;
  164. }
  165. static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  166. {
  167. #ifdef CONFIG_FS_DAX
  168. if (IS_DAX(iocb->ki_filp->f_mapping->host))
  169. return ext2_dax_read_iter(iocb, to);
  170. #endif
  171. return generic_file_read_iter(iocb, to);
  172. }
  173. static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  174. {
  175. #ifdef CONFIG_FS_DAX
  176. if (IS_DAX(iocb->ki_filp->f_mapping->host))
  177. return ext2_dax_write_iter(iocb, from);
  178. #endif
  179. return generic_file_write_iter(iocb, from);
  180. }
  181. const struct file_operations ext2_file_operations = {
  182. .llseek = generic_file_llseek,
  183. .read_iter = ext2_file_read_iter,
  184. .write_iter = ext2_file_write_iter,
  185. .unlocked_ioctl = ext2_ioctl,
  186. #ifdef CONFIG_COMPAT
  187. .compat_ioctl = ext2_compat_ioctl,
  188. #endif
  189. .mmap = ext2_file_mmap,
  190. .open = dquot_file_open,
  191. .release = ext2_release_file,
  192. .fsync = ext2_fsync,
  193. .get_unmapped_area = thp_get_unmapped_area,
  194. .splice_read = generic_file_splice_read,
  195. .splice_write = iter_file_splice_write,
  196. };
  197. const struct inode_operations ext2_file_inode_operations = {
  198. #ifdef CONFIG_EXT2_FS_XATTR
  199. .listxattr = ext2_listxattr,
  200. #endif
  201. .setattr = ext2_setattr,
  202. .get_acl = ext2_get_acl,
  203. .set_acl = ext2_set_acl,
  204. .fiemap = ext2_fiemap,
  205. };