|
@@ -886,6 +886,95 @@ int do_journal_get_write_access(handle_t *handle,
|
|
|
|
|
|
static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
|
|
|
struct buffer_head *bh_result, int create);
|
|
|
+
|
|
|
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
|
+static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|
|
+ get_block_t *get_block)
|
|
|
+{
|
|
|
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
|
|
|
+ unsigned to = from + len;
|
|
|
+ struct inode *inode = page->mapping->host;
|
|
|
+ unsigned block_start, block_end;
|
|
|
+ sector_t block;
|
|
|
+ int err = 0;
|
|
|
+ unsigned blocksize = inode->i_sb->s_blocksize;
|
|
|
+ unsigned bbits;
|
|
|
+ struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
|
|
|
+ bool decrypt = false;
|
|
|
+
|
|
|
+ BUG_ON(!PageLocked(page));
|
|
|
+ BUG_ON(from > PAGE_CACHE_SIZE);
|
|
|
+ BUG_ON(to > PAGE_CACHE_SIZE);
|
|
|
+ BUG_ON(from > to);
|
|
|
+
|
|
|
+ if (!page_has_buffers(page))
|
|
|
+ create_empty_buffers(page, blocksize, 0);
|
|
|
+ head = page_buffers(page);
|
|
|
+ bbits = ilog2(blocksize);
|
|
|
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
|
|
|
+
|
|
|
+ for (bh = head, block_start = 0; bh != head || !block_start;
|
|
|
+ block++, block_start = block_end, bh = bh->b_this_page) {
|
|
|
+ block_end = block_start + blocksize;
|
|
|
+ if (block_end <= from || block_start >= to) {
|
|
|
+ if (PageUptodate(page)) {
|
|
|
+ if (!buffer_uptodate(bh))
|
|
|
+ set_buffer_uptodate(bh);
|
|
|
+ }
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (buffer_new(bh))
|
|
|
+ clear_buffer_new(bh);
|
|
|
+ if (!buffer_mapped(bh)) {
|
|
|
+ WARN_ON(bh->b_size != blocksize);
|
|
|
+ err = get_block(inode, block, bh, 1);
|
|
|
+ if (err)
|
|
|
+ break;
|
|
|
+ if (buffer_new(bh)) {
|
|
|
+ unmap_underlying_metadata(bh->b_bdev,
|
|
|
+ bh->b_blocknr);
|
|
|
+ if (PageUptodate(page)) {
|
|
|
+ clear_buffer_new(bh);
|
|
|
+ set_buffer_uptodate(bh);
|
|
|
+ mark_buffer_dirty(bh);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (block_end > to || block_start < from)
|
|
|
+ zero_user_segments(page, to, block_end,
|
|
|
+ block_start, from);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (PageUptodate(page)) {
|
|
|
+ if (!buffer_uptodate(bh))
|
|
|
+ set_buffer_uptodate(bh);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
|
|
|
+ !buffer_unwritten(bh) &&
|
|
|
+ (block_start < from || block_end > to)) {
|
|
|
+ ll_rw_block(READ, 1, &bh);
|
|
|
+ *wait_bh++ = bh;
|
|
|
+ decrypt = ext4_encrypted_inode(inode) &&
|
|
|
+ S_ISREG(inode->i_mode);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * If we issued read requests, let them complete.
|
|
|
+ */
|
|
|
+ while (wait_bh > wait) {
|
|
|
+ wait_on_buffer(*--wait_bh);
|
|
|
+ if (!buffer_uptodate(*wait_bh))
|
|
|
+ err = -EIO;
|
|
|
+ }
|
|
|
+ if (unlikely(err))
|
|
|
+ page_zero_new_buffers(page, from, to);
|
|
|
+ else if (decrypt)
|
|
|
+ err = ext4_decrypt_one(inode, page);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static int ext4_write_begin(struct file *file, struct address_space *mapping,
|
|
|
loff_t pos, unsigned len, unsigned flags,
|
|
|
struct page **pagep, void **fsdata)
|
|
@@ -948,11 +1037,19 @@ retry_journal:
|
|
|
/* In case writeback began while the page was unlocked */
|
|
|
wait_for_stable_page(page);
|
|
|
|
|
|
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
|
+ if (ext4_should_dioread_nolock(inode))
|
|
|
+ ret = ext4_block_write_begin(page, pos, len,
|
|
|
+ ext4_get_block_write);
|
|
|
+ else
|
|
|
+ ret = ext4_block_write_begin(page, pos, len,
|
|
|
+ ext4_get_block);
|
|
|
+#else
|
|
|
if (ext4_should_dioread_nolock(inode))
|
|
|
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
|
|
|
else
|
|
|
ret = __block_write_begin(page, pos, len, ext4_get_block);
|
|
|
-
|
|
|
+#endif
|
|
|
if (!ret && ext4_should_journal_data(inode)) {
|
|
|
ret = ext4_walk_page_buffers(handle, page_buffers(page),
|
|
|
from, to, NULL,
|
|
@@ -2574,7 +2671,12 @@ retry_journal:
|
|
|
/* In case writeback began while the page was unlocked */
|
|
|
wait_for_stable_page(page);
|
|
|
|
|
|
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
|
+ ret = ext4_block_write_begin(page, pos, len,
|
|
|
+ ext4_da_get_block_prep);
|
|
|
+#else
|
|
|
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
|
|
|
+#endif
|
|
|
if (ret < 0) {
|
|
|
unlock_page(page);
|
|
|
ext4_journal_stop(handle);
|
|
@@ -3032,6 +3134,9 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
|
|
|
get_block_func = ext4_get_block_write;
|
|
|
dio_flags = DIO_LOCKING;
|
|
|
}
|
|
|
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
|
+ BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
|
|
|
+#endif
|
|
|
if (IS_DAX(inode))
|
|
|
ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
|
|
|
ext4_end_io_dio, dio_flags);
|
|
@@ -3096,6 +3201,11 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
|
|
|
size_t count = iov_iter_count(iter);
|
|
|
ssize_t ret;
|
|
|
|
|
|
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
|
|
|
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
|
|
|
+ return 0;
|
|
|
+#endif
|
|
|
+
|
|
|
/*
|
|
|
* If we are doing data journalling we don't support O_DIRECT
|
|
|
*/
|