file.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * linux/fs/ext2/file.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/file.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext2 fs regular file handling primitives
  16. *
  17. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  18. * (jj@sunsite.ms.mff.cuni.cz)
  19. */
  20. #include <linux/time.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/dax.h>
  23. #include <linux/quotaops.h>
  24. #include <linux/iomap.h>
  25. #include <linux/uio.h>
  26. #include "ext2.h"
  27. #include "xattr.h"
  28. #include "acl.h"
  29. #ifdef CONFIG_FS_DAX
  30. static ssize_t ext2_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
  31. {
  32. struct inode *inode = iocb->ki_filp->f_mapping->host;
  33. ssize_t ret;
  34. if (!iov_iter_count(to))
  35. return 0; /* skip atime */
  36. inode_lock_shared(inode);
  37. ret = dax_iomap_rw(iocb, to, &ext2_iomap_ops);
  38. inode_unlock_shared(inode);
  39. file_accessed(iocb->ki_filp);
  40. return ret;
  41. }
  42. static ssize_t ext2_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
  43. {
  44. struct file *file = iocb->ki_filp;
  45. struct inode *inode = file->f_mapping->host;
  46. ssize_t ret;
  47. inode_lock(inode);
  48. ret = generic_write_checks(iocb, from);
  49. if (ret <= 0)
  50. goto out_unlock;
  51. ret = file_remove_privs(file);
  52. if (ret)
  53. goto out_unlock;
  54. ret = file_update_time(file);
  55. if (ret)
  56. goto out_unlock;
  57. ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops);
  58. if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
  59. i_size_write(inode, iocb->ki_pos);
  60. mark_inode_dirty(inode);
  61. }
  62. out_unlock:
  63. inode_unlock(inode);
  64. if (ret > 0)
  65. ret = generic_write_sync(iocb, ret);
  66. return ret;
  67. }
  68. /*
  69. * The lock ordering for ext2 DAX fault paths is:
  70. *
  71. * mmap_sem (MM)
  72. * sb_start_pagefault (vfs, freeze)
  73. * ext2_inode_info->dax_sem
  74. * address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
  75. * ext2_inode_info->truncate_mutex
  76. *
  77. * The default page_lock and i_size verification done by non-DAX fault paths
  78. * is sufficient because ext2 doesn't support hole punching.
  79. */
  80. static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  81. {
  82. struct inode *inode = file_inode(vma->vm_file);
  83. struct ext2_inode_info *ei = EXT2_I(inode);
  84. int ret;
  85. if (vmf->flags & FAULT_FLAG_WRITE) {
  86. sb_start_pagefault(inode->i_sb);
  87. file_update_time(vma->vm_file);
  88. }
  89. down_read(&ei->dax_sem);
  90. ret = dax_iomap_fault(vma, vmf, &ext2_iomap_ops);
  91. up_read(&ei->dax_sem);
  92. if (vmf->flags & FAULT_FLAG_WRITE)
  93. sb_end_pagefault(inode->i_sb);
  94. return ret;
  95. }
  96. static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
  97. struct vm_fault *vmf)
  98. {
  99. struct inode *inode = file_inode(vma->vm_file);
  100. struct ext2_inode_info *ei = EXT2_I(inode);
  101. loff_t size;
  102. int ret;
  103. sb_start_pagefault(inode->i_sb);
  104. file_update_time(vma->vm_file);
  105. down_read(&ei->dax_sem);
  106. /* check that the faulting page hasn't raced with truncate */
  107. size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
  108. if (vmf->pgoff >= size)
  109. ret = VM_FAULT_SIGBUS;
  110. else
  111. ret = dax_pfn_mkwrite(vma, vmf);
  112. up_read(&ei->dax_sem);
  113. sb_end_pagefault(inode->i_sb);
  114. return ret;
  115. }
  116. static const struct vm_operations_struct ext2_dax_vm_ops = {
  117. .fault = ext2_dax_fault,
  118. /*
  119. * .pmd_fault is not supported for DAX because allocation in ext2
  120. * cannot be reliably aligned to huge page sizes and so pmd faults
  121. * will always fail and fail back to regular faults.
  122. */
  123. .page_mkwrite = ext2_dax_fault,
  124. .pfn_mkwrite = ext2_dax_pfn_mkwrite,
  125. };
  126. static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
  127. {
  128. if (!IS_DAX(file_inode(file)))
  129. return generic_file_mmap(file, vma);
  130. file_accessed(file);
  131. vma->vm_ops = &ext2_dax_vm_ops;
  132. vma->vm_flags |= VM_MIXEDMAP;
  133. return 0;
  134. }
  135. #else
  136. #define ext2_file_mmap generic_file_mmap
  137. #endif
  138. /*
  139. * Called when filp is released. This happens when all file descriptors
  140. * for a single struct file are closed. Note that different open() calls
  141. * for the same file yield different struct file structures.
  142. */
  143. static int ext2_release_file (struct inode * inode, struct file * filp)
  144. {
  145. if (filp->f_mode & FMODE_WRITE) {
  146. mutex_lock(&EXT2_I(inode)->truncate_mutex);
  147. ext2_discard_reservation(inode);
  148. mutex_unlock(&EXT2_I(inode)->truncate_mutex);
  149. }
  150. return 0;
  151. }
  152. int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  153. {
  154. int ret;
  155. struct super_block *sb = file->f_mapping->host->i_sb;
  156. struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
  157. ret = generic_file_fsync(file, start, end, datasync);
  158. if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
  159. /* We don't really know where the IO error happened... */
  160. ext2_error(sb, __func__,
  161. "detected IO error when writing metadata buffers");
  162. ret = -EIO;
  163. }
  164. return ret;
  165. }
  166. static ssize_t ext2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
  167. {
  168. #ifdef CONFIG_FS_DAX
  169. if (IS_DAX(iocb->ki_filp->f_mapping->host))
  170. return ext2_dax_read_iter(iocb, to);
  171. #endif
  172. return generic_file_read_iter(iocb, to);
  173. }
  174. static ssize_t ext2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
  175. {
  176. #ifdef CONFIG_FS_DAX
  177. if (IS_DAX(iocb->ki_filp->f_mapping->host))
  178. return ext2_dax_write_iter(iocb, from);
  179. #endif
  180. return generic_file_write_iter(iocb, from);
  181. }
  182. const struct file_operations ext2_file_operations = {
  183. .llseek = generic_file_llseek,
  184. .read_iter = ext2_file_read_iter,
  185. .write_iter = ext2_file_write_iter,
  186. .unlocked_ioctl = ext2_ioctl,
  187. #ifdef CONFIG_COMPAT
  188. .compat_ioctl = ext2_compat_ioctl,
  189. #endif
  190. .mmap = ext2_file_mmap,
  191. .open = dquot_file_open,
  192. .release = ext2_release_file,
  193. .fsync = ext2_fsync,
  194. .get_unmapped_area = thp_get_unmapped_area,
  195. .splice_read = generic_file_splice_read,
  196. .splice_write = iter_file_splice_write,
  197. };
  198. const struct inode_operations ext2_file_inode_operations = {
  199. #ifdef CONFIG_EXT2_FS_XATTR
  200. .listxattr = ext2_listxattr,
  201. #endif
  202. .setattr = ext2_setattr,
  203. .get_acl = ext2_get_acl,
  204. .set_acl = ext2_set_acl,
  205. .fiemap = ext2_fiemap,
  206. };