|
|
@@ -229,8 +229,8 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
*
|
|
|
* Must be called with the i_pages lock held.
|
|
|
*/
|
|
|
-static void *__get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
- pgoff_t index, void ***slotp, bool (*wait_fn)(void))
|
|
|
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
+ pgoff_t index, void ***slotp)
|
|
|
{
|
|
|
void *entry, **slot;
|
|
|
struct wait_exceptional_entry_queue ewait;
|
|
|
@@ -240,8 +240,6 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
ewait.wait.func = wake_exceptional_entry_func;
|
|
|
|
|
|
for (;;) {
|
|
|
- bool revalidate;
|
|
|
-
|
|
|
entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
|
|
|
&slot);
|
|
|
if (!entry ||
|
|
|
@@ -256,30 +254,39 @@ static void *__get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
prepare_to_wait_exclusive(wq, &ewait.wait,
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
xa_unlock_irq(&mapping->i_pages);
|
|
|
- revalidate = wait_fn();
|
|
|
+ schedule();
|
|
|
finish_wait(wq, &ewait.wait);
|
|
|
xa_lock_irq(&mapping->i_pages);
|
|
|
- if (revalidate) {
|
|
|
- put_unlocked_mapping_entry(mapping, index, entry);
|
|
|
- return ERR_PTR(-EAGAIN);
|
|
|
- }
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static bool entry_wait(void)
|
|
|
+/*
|
|
|
+ * The only thing keeping the address space around is the i_pages lock
|
|
|
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
|
|
|
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
|
|
|
+ */
|
|
|
+static void wait_entry_unlocked(struct address_space *mapping, pgoff_t index,
|
|
|
+ void ***slotp, void *entry)
|
|
|
{
|
|
|
+ struct wait_exceptional_entry_queue ewait;
|
|
|
+ wait_queue_head_t *wq;
|
|
|
+
|
|
|
+ init_wait(&ewait.wait);
|
|
|
+ ewait.wait.func = wake_exceptional_entry_func;
|
|
|
+
|
|
|
+ wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
|
|
|
+ prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
|
|
|
+ xa_unlock_irq(&mapping->i_pages);
|
|
|
schedule();
|
|
|
+ finish_wait(wq, &ewait.wait);
|
|
|
+
|
|
|
/*
|
|
|
- * Never return an ERR_PTR() from
|
|
|
- * __get_unlocked_mapping_entry(), just keep looping.
|
|
|
+ * Entry lock waits are exclusive. Wake up the next waiter since
|
|
|
+ * we aren't sure we will acquire the entry lock and thus wake
|
|
|
+ * the next waiter up on unlock.
|
|
|
*/
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
|
|
|
- pgoff_t index, void ***slotp)
|
|
|
-{
|
|
|
- return __get_unlocked_mapping_entry(mapping, index, slotp, entry_wait);
|
|
|
+ if (waitqueue_active(wq))
|
|
|
+ __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
|
|
|
}
|
|
|
|
|
|
static void unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
|
|
|
@@ -398,19 +405,6 @@ static struct page *dax_busy_page(void *entry)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static bool entry_wait_revalidate(void)
|
|
|
-{
|
|
|
- rcu_read_unlock();
|
|
|
- schedule();
|
|
|
- rcu_read_lock();
|
|
|
-
|
|
|
- /*
|
|
|
- * Tell __get_unlocked_mapping_entry() to take a break, we need
|
|
|
- * to revalidate page->mapping after dropping locks
|
|
|
- */
|
|
|
- return true;
|
|
|
-}
|
|
|
-
|
|
|
bool dax_lock_mapping_entry(struct page *page)
|
|
|
{
|
|
|
pgoff_t index;
|
|
|
@@ -446,14 +440,15 @@ bool dax_lock_mapping_entry(struct page *page)
|
|
|
}
|
|
|
index = page->index;
|
|
|
|
|
|
- entry = __get_unlocked_mapping_entry(mapping, index, &slot,
|
|
|
- entry_wait_revalidate);
|
|
|
+ entry = __radix_tree_lookup(&mapping->i_pages, index,
|
|
|
+ NULL, &slot);
|
|
|
if (!entry) {
|
|
|
xa_unlock_irq(&mapping->i_pages);
|
|
|
break;
|
|
|
- } else if (IS_ERR(entry)) {
|
|
|
- xa_unlock_irq(&mapping->i_pages);
|
|
|
- WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
|
|
|
+ } else if (slot_locked(mapping, slot)) {
|
|
|
+ rcu_read_unlock();
|
|
|
+ wait_entry_unlocked(mapping, index, &slot, entry);
|
|
|
+ rcu_read_lock();
|
|
|
continue;
|
|
|
}
|
|
|
lock_slot(mapping, slot);
|