|
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
|
|
{
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
struct inode *inode = file->f_mapping->host;
|
|
|
+ struct address_space *mapping = inode->i_mapping;
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
struct gfs2_holder gh;
|
|
|
int rv;
|
|
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
|
|
|
if (rv != 1)
|
|
|
goto out; /* dio not valid, fall back to buffered i/o */
|
|
|
|
|
|
+ /*
|
|
|
+ * Now since we are holding a deferred (CW) lock at this point, you
|
|
|
+ * might be wondering why this is ever needed. There is a case however
|
|
|
+ * where we've granted a deferred local lock against a cached exclusive
|
|
|
+ * glock. That is ok provided all granted local locks are deferred, but
|
|
|
+ * it also means that it is possible to encounter pages which are
|
|
|
+ * cached and possibly also mapped. So here we check for that and sort
|
|
|
+ * them out ahead of the dio. The glock state machine will take care of
|
|
|
+ * everything else.
|
|
|
+ *
|
|
|
+ * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
|
|
|
+ * the first place, mapping->nr_pages will always be zero.
|
|
|
+ */
|
|
|
+ if (mapping->nrpages) {
|
|
|
+ loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
|
|
|
+ loff_t len = iov_length(iov, nr_segs);
|
|
|
+ loff_t end = PAGE_ALIGN(offset + len) - 1;
|
|
|
+
|
|
|
+ rv = 0;
|
|
|
+ if (len == 0)
|
|
|
+ goto out;
|
|
|
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
|
|
|
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
|
|
|
+ rv = filemap_write_and_wait_range(mapping, lstart, end);
|
|
|
+ if (rv)
|
|
|
+ return rv;
|
|
|
+ truncate_inode_pages_range(mapping, lstart, end);
|
|
|
+ }
|
|
|
+
|
|
|
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
|
|
|
offset, nr_segs, gfs2_get_block_direct,
|
|
|
NULL, NULL, 0);
|