|
@@ -721,12 +721,10 @@ xfs_file_write_iter(
|
|
|
|
|
|
static void
|
|
static void
|
|
xfs_wait_dax_page(
|
|
xfs_wait_dax_page(
|
|
- struct inode *inode,
|
|
|
|
- bool *did_unlock)
|
|
|
|
|
|
+ struct inode *inode)
|
|
{
|
|
{
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
struct xfs_inode *ip = XFS_I(inode);
|
|
|
|
|
|
- *did_unlock = true;
|
|
|
|
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
|
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
|
|
schedule();
|
|
schedule();
|
|
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
|
|
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
|
|
@@ -735,7 +733,7 @@ xfs_wait_dax_page(
|
|
static int
|
|
static int
|
|
xfs_break_dax_layouts(
|
|
xfs_break_dax_layouts(
|
|
struct inode *inode,
|
|
struct inode *inode,
|
|
- bool *did_unlock)
|
|
|
|
|
|
+ bool *retry)
|
|
{
|
|
{
|
|
struct page *page;
|
|
struct page *page;
|
|
|
|
|
|
@@ -745,9 +743,10 @@ xfs_break_dax_layouts(
|
|
if (!page)
|
|
if (!page)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
|
|
+ *retry = true;
|
|
return ___wait_var_event(&page->_refcount,
|
|
return ___wait_var_event(&page->_refcount,
|
|
atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
|
|
atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
|
|
- 0, 0, xfs_wait_dax_page(inode, did_unlock));
|
|
|
|
|
|
+ 0, 0, xfs_wait_dax_page(inode));
|
|
}
|
|
}
|
|
|
|
|
|
int
|
|
int
|