|
@@ -718,6 +718,38 @@ buffered:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+xfs_wait_dax_page(
|
|
|
+ struct inode *inode,
|
|
|
+ bool *did_unlock)
|
|
|
+{
|
|
|
+ struct xfs_inode *ip = XFS_I(inode);
|
|
|
+
|
|
|
+ *did_unlock = true;
|
|
|
+ xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
|
|
+ schedule();
|
|
|
+ xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+xfs_break_dax_layouts(
|
|
|
+ struct inode *inode,
|
|
|
+ uint iolock,
|
|
|
+ bool *did_unlock)
|
|
|
+{
|
|
|
+ struct page *page;
|
|
|
+
|
|
|
+ ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
|
|
|
+
|
|
|
+ page = dax_layout_busy_page(inode->i_mapping);
|
|
|
+ if (!page)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return ___wait_var_event(&page->_refcount,
|
|
|
+ atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
|
|
|
+ 0, 0, xfs_wait_dax_page(inode, did_unlock));
|
|
|
+}
|
|
|
+
|
|
|
int
|
|
|
xfs_break_layouts(
|
|
|
struct inode *inode,
|
|
@@ -725,19 +757,28 @@ xfs_break_layouts(
|
|
|
enum layout_break_reason reason)
|
|
|
{
|
|
|
bool retry;
|
|
|
+ int error;
|
|
|
|
|
|
ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
|
|
|
|
|
|
- switch (reason) {
|
|
|
- case BREAK_UNMAP:
|
|
|
- ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
|
|
|
- /* fall through */
|
|
|
- case BREAK_WRITE:
|
|
|
- return xfs_break_leased_layouts(inode, iolock, &retry);
|
|
|
- default:
|
|
|
- WARN_ON_ONCE(1);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ do {
|
|
|
+ retry = false;
|
|
|
+ switch (reason) {
|
|
|
+ case BREAK_UNMAP:
|
|
|
+ error = xfs_break_dax_layouts(inode, *iolock, &retry);
|
|
|
+ if (error || retry)
|
|
|
+ break;
|
|
|
+ /* fall through */
|
|
|
+ case BREAK_WRITE:
|
|
|
+ error = xfs_break_leased_layouts(inode, iolock, &retry);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
+ error = -EINVAL;
|
|
|
+ }
|
|
|
+ } while (error == 0 && retry);
|
|
|
+
|
|
|
+ return error;
|
|
|
}
|
|
|
|
|
|
#define XFS_FALLOC_FL_SUPPORTED \
|