|
@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * The only thing keeping the address space around is the i_pages lock
|
|
|
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
|
|
|
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
|
|
|
+ */
|
|
|
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
|
|
|
+{
|
|
|
+ struct wait_exceptional_entry_queue ewait;
|
|
|
+ wait_queue_head_t *wq;
|
|
|
+
|
|
|
+ init_wait(&ewait.wait);
|
|
|
+ ewait.wait.func = wake_exceptional_entry_func;
|
|
|
+
|
|
|
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
|
|
|
+ prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
|
|
|
+ xas_unlock_irq(xas);
|
|
|
+ schedule();
|
|
|
+ finish_wait(wq, &ewait.wait);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Entry lock waits are exclusive. Wake up the next waiter since
|
|
|
+ * we aren't sure we will acquire the entry lock and thus wake
|
|
|
+ * the next waiter up on unlock.
|
|
|
+ */
|
|
|
+ if (waitqueue_active(wq))
|
|
|
+ __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
|
|
|
+}
|
|
|
+
|
|
|
static void put_unlocked_entry(struct xa_state *xas, void *entry)
|
|
|
{
|
|
|
/* If we were the only waiter woken, wake the next one */
|
|
@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page)
|
|
|
entry = xas_load(&xas);
|
|
|
if (dax_is_locked(entry)) {
|
|
|
rcu_read_unlock();
|
|
|
- entry = get_unlocked_entry(&xas);
|
|
|
- xas_unlock_irq(&xas);
|
|
|
- put_unlocked_entry(&xas, entry);
|
|
|
+ wait_entry_unlocked(&xas, entry);
|
|
|
rcu_read_lock();
|
|
|
continue;
|
|
|
}
|