|
@@ -1753,11 +1753,72 @@ xfs_vm_readpages(
|
|
|
return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * This is basically a copy of __set_page_dirty_buffers() with one
|
|
|
+ * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
|
|
|
+ * dirty, we'll never be able to clean them because we don't write buffers
|
|
|
+ * beyond EOF, and that means we can't invalidate pages that span EOF
|
|
|
+ * that have been marked dirty. Further, the dirty state can leak into
|
|
|
+ * the file interior if the file is extended, resulting in all sorts of
|
|
|
+ * bad things happening as the state does not match the underlying data.
|
|
|
+ *
|
|
|
+ * XXX: this really indicates that bufferheads in XFS need to die. Warts like
|
|
|
+ * this only exist because of bufferheads and how the generic code manages them.
|
|
|
+ */
|
|
|
+STATIC int
|
|
|
+xfs_vm_set_page_dirty(
|
|
|
+ struct page *page)
|
|
|
+{
|
|
|
+ struct address_space *mapping = page->mapping;
|
|
|
+ struct inode *inode = mapping->host;
|
|
|
+ loff_t end_offset;
|
|
|
+ loff_t offset;
|
|
|
+ int newly_dirty;
|
|
|
+
|
|
|
+ if (unlikely(!mapping))
|
|
|
+ return !TestSetPageDirty(page);
|
|
|
+
|
|
|
+ end_offset = i_size_read(inode);
|
|
|
+ offset = page_offset(page);
|
|
|
+
|
|
|
+ spin_lock(&mapping->private_lock);
|
|
|
+ if (page_has_buffers(page)) {
|
|
|
+ struct buffer_head *head = page_buffers(page);
|
|
|
+ struct buffer_head *bh = head;
|
|
|
+
|
|
|
+ do {
|
|
|
+ if (offset < end_offset)
|
|
|
+ set_buffer_dirty(bh);
|
|
|
+ bh = bh->b_this_page;
|
|
|
+ offset += 1 << inode->i_blkbits;
|
|
|
+ } while (bh != head);
|
|
|
+ }
|
|
|
+ newly_dirty = !TestSetPageDirty(page);
|
|
|
+ spin_unlock(&mapping->private_lock);
|
|
|
+
|
|
|
+ if (newly_dirty) {
|
|
|
+ /* sigh - __set_page_dirty() is static, so copy it here, too */
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&mapping->tree_lock, flags);
|
|
|
+ if (page->mapping) { /* Race with truncate? */
|
|
|
+ WARN_ON_ONCE(!PageUptodate(page));
|
|
|
+ account_page_dirtied(page, mapping);
|
|
|
+ radix_tree_tag_set(&mapping->page_tree,
|
|
|
+ page_index(page), PAGECACHE_TAG_DIRTY);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
|
|
|
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
|
|
|
+ }
|
|
|
+ return newly_dirty;
|
|
|
+}
|
|
|
+
|
|
|
const struct address_space_operations xfs_address_space_operations = {
|
|
|
.readpage = xfs_vm_readpage,
|
|
|
.readpages = xfs_vm_readpages,
|
|
|
.writepage = xfs_vm_writepage,
|
|
|
.writepages = xfs_vm_writepages,
|
|
|
+ .set_page_dirty = xfs_vm_set_page_dirty,
|
|
|
.releasepage = xfs_vm_releasepage,
|
|
|
.invalidatepage = xfs_vm_invalidatepage,
|
|
|
.write_begin = xfs_vm_write_begin,
|