|
@@ -1952,6 +1952,77 @@ out:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * It's worthy to make sure that space is reserved on disk for the write,
|
|
|
+ * but how to implement it without killing performance need more thinking.
|
|
|
+ */
|
|
|
+static int fuse_write_begin(struct file *file, struct address_space *mapping,
|
|
|
+ loff_t pos, unsigned len, unsigned flags,
|
|
|
+ struct page **pagep, void **fsdata)
|
|
|
+{
|
|
|
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
|
|
|
+ struct fuse_conn *fc = get_fuse_conn(file->f_dentry->d_inode);
|
|
|
+ struct page *page;
|
|
|
+ loff_t fsize;
|
|
|
+ int err = -ENOMEM;
|
|
|
+
|
|
|
+ WARN_ON(!fc->writeback_cache);
|
|
|
+
|
|
|
+ page = grab_cache_page_write_begin(mapping, index, flags);
|
|
|
+ if (!page)
|
|
|
+ goto error;
|
|
|
+
|
|
|
+ fuse_wait_on_page_writeback(mapping->host, page->index);
|
|
|
+
|
|
|
+ if (PageUptodate(page) || len == PAGE_CACHE_SIZE)
|
|
|
+ goto success;
|
|
|
+ /*
|
|
|
+ * Check if the start this page comes after the end of file, in which
|
|
|
+ * case the readpage can be optimized away.
|
|
|
+ */
|
|
|
+ fsize = i_size_read(mapping->host);
|
|
|
+ if (fsize <= (pos & PAGE_CACHE_MASK)) {
|
|
|
+ size_t off = pos & ~PAGE_CACHE_MASK;
|
|
|
+ if (off)
|
|
|
+ zero_user_segment(page, 0, off);
|
|
|
+ goto success;
|
|
|
+ }
|
|
|
+ err = fuse_do_readpage(file, page);
|
|
|
+ if (err)
|
|
|
+ goto cleanup;
|
|
|
+success:
|
|
|
+ *pagep = page;
|
|
|
+ return 0;
|
|
|
+
|
|
|
+cleanup:
|
|
|
+ unlock_page(page);
|
|
|
+ page_cache_release(page);
|
|
|
+error:
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int fuse_write_end(struct file *file, struct address_space *mapping,
|
|
|
+ loff_t pos, unsigned len, unsigned copied,
|
|
|
+ struct page *page, void *fsdata)
|
|
|
+{
|
|
|
+ struct inode *inode = page->mapping->host;
|
|
|
+
|
|
|
+ if (!PageUptodate(page)) {
|
|
|
+ /* Zero any unwritten bytes at the end of the page */
|
|
|
+ size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
|
|
|
+ if (endoff)
|
|
|
+ zero_user_segment(page, endoff, PAGE_CACHE_SIZE);
|
|
|
+ SetPageUptodate(page);
|
|
|
+ }
|
|
|
+
|
|
|
+ fuse_write_update_size(inode, pos + copied);
|
|
|
+ set_page_dirty(page);
|
|
|
+ unlock_page(page);
|
|
|
+ page_cache_release(page);
|
|
|
+
|
|
|
+ return copied;
|
|
|
+}
|
|
|
+
|
|
|
static int fuse_launder_page(struct page *page)
|
|
|
{
|
|
|
int err = 0;
|
|
@@ -2979,6 +3050,8 @@ static const struct address_space_operations fuse_file_aops = {
|
|
|
.set_page_dirty = __set_page_dirty_nobuffers,
|
|
|
.bmap = fuse_bmap,
|
|
|
.direct_IO = fuse_direct_IO,
|
|
|
+ .write_begin = fuse_write_begin,
|
|
|
+ .write_end = fuse_write_end,
|
|
|
};
|
|
|
|
|
|
void fuse_init_file_inode(struct inode *inode)
|