|
@@ -909,13 +909,33 @@ static void wake_up_page_bit(struct page *page, int bit_nr)
|
|
|
wait_queue_head_t *q = page_waitqueue(page);
|
|
|
struct wait_page_key key;
|
|
|
unsigned long flags;
|
|
|
+ wait_queue_entry_t bookmark;
|
|
|
|
|
|
key.page = page;
|
|
|
key.bit_nr = bit_nr;
|
|
|
key.page_match = 0;
|
|
|
|
|
|
+ bookmark.flags = 0;
|
|
|
+ bookmark.private = NULL;
|
|
|
+ bookmark.func = NULL;
|
|
|
+ INIT_LIST_HEAD(&bookmark.entry);
|
|
|
+
|
|
|
spin_lock_irqsave(&q->lock, flags);
|
|
|
- __wake_up_locked_key(q, TASK_NORMAL, &key);
|
|
|
+ __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
|
|
|
+
|
|
|
+ while (bookmark.flags & WQ_FLAG_BOOKMARK) {
|
|
|
+ /*
|
|
|
+ * Take a breather from holding the lock,
|
|
|
+ * allow pages that finish wake up asynchronously
|
|
|
+ * to acquire the lock and remove themselves
|
|
|
+ * from wait queue
|
|
|
+ */
|
|
|
+ spin_unlock_irqrestore(&q->lock, flags);
|
|
|
+ cpu_relax();
|
|
|
+ spin_lock_irqsave(&q->lock, flags);
|
|
|
+ __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* It is possible for other pages to have collided on the waitqueue
|
|
|
* hash, so in that case check for a page match. That prevents a long-
|