|
@@ -1060,6 +1060,45 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|
|
}
|
|
|
EXPORT_SYMBOL(noop_fsync);
|
|
|
|
|
|
+int noop_set_page_dirty(struct page *page)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * Unlike __set_page_dirty_no_writeback that handles dirty page
|
|
|
+ * tracking in the page object, dax does all dirty tracking in
|
|
|
+ * the inode address_space in response to mkwrite faults. In the
|
|
|
+ * dax case we only need to worry about potentially dirty CPU
|
|
|
+ * caches, not dirty page cache pages to write back.
|
|
|
+ *
|
|
|
+ * This callback is defined to prevent fallback to
|
|
|
+ * __set_page_dirty_buffers() in set_page_dirty().
|
|
|
+ */
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(noop_set_page_dirty);
|
|
|
+
|
|
|
+void noop_invalidatepage(struct page *page, unsigned int offset,
|
|
|
+ unsigned int length)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * There is no page cache to invalidate in the dax case, however
|
|
|
+ * we need this callback defined to prevent falling back to
|
|
|
+ * block_invalidatepage() in do_invalidatepage().
|
|
|
+ */
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(noop_invalidatepage);
|
|
|
+
|
|
|
+ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * iomap based filesystems support direct I/O without need for
|
|
|
+ * this callback. However, it still needs to be set in
|
|
|
+ * inode->a_ops so that open/fcntl know that direct I/O is
|
|
|
+ * generally supported.
|
|
|
+ */
|
|
|
+ return -EINVAL;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(noop_direct_IO);
|
|
|
+
|
|
|
/* Because kfree isn't assignment-compatible with void(void*) ;-/ */
|
|
|
void kfree_link(void *p)
|
|
|
{
|