|
@@ -134,19 +134,14 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
|
|
|
/*
|
|
|
* nfs_page_group_lock - lock the head of the page group
|
|
|
* @req - request in group that is to be locked
|
|
|
- * @nonblock - if true don't block waiting for lock
|
|
|
*
|
|
|
- * this lock must be held if modifying the page group list
|
|
|
+ * this lock must be held when traversing or modifying the page
|
|
|
+ * group list
|
|
|
*
|
|
|
- * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
|
|
|
- * result from wait_on_bit_lock
|
|
|
- *
|
|
|
- * NOTE: calling with nonblock=false should always have set the
|
|
|
- * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
|
|
|
- * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
|
|
|
+ * return 0 on success, < 0 on error
|
|
|
*/
|
|
|
int
|
|
|
-nfs_page_group_lock(struct nfs_page *req, bool nonblock)
|
|
|
+nfs_page_group_lock(struct nfs_page *req)
|
|
|
{
|
|
|
struct nfs_page *head = req->wb_head;
|
|
|
|
|
@@ -155,14 +150,10 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock)
|
|
|
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
|
|
|
return 0;
|
|
|
|
|
|
- if (!nonblock) {
|
|
|
- set_bit(PG_CONTENDED1, &head->wb_flags);
|
|
|
- smp_mb__after_atomic();
|
|
|
- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
|
|
|
+ set_bit(PG_CONTENDED1, &head->wb_flags);
|
|
|
+ smp_mb__after_atomic();
|
|
|
+ return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
- }
|
|
|
-
|
|
|
- return -EAGAIN;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -225,7 +216,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
|
|
|
{
|
|
|
bool ret;
|
|
|
|
|
|
- nfs_page_group_lock(req, false);
|
|
|
+ nfs_page_group_lock(req);
|
|
|
ret = nfs_page_group_sync_on_bit_locked(req, bit);
|
|
|
nfs_page_group_unlock(req);
|
|
|
|
|
@@ -1016,7 +1007,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|
|
unsigned int bytes_left = 0;
|
|
|
unsigned int offset, pgbase;
|
|
|
|
|
|
- nfs_page_group_lock(req, false);
|
|
|
+ nfs_page_group_lock(req);
|
|
|
|
|
|
subreq = req;
|
|
|
bytes_left = subreq->wb_bytes;
|
|
@@ -1038,7 +1029,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|
|
if (mirror->pg_recoalesce)
|
|
|
return 0;
|
|
|
/* retry add_request for this subreq */
|
|
|
- nfs_page_group_lock(req, false);
|
|
|
+ nfs_page_group_lock(req);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1135,7 +1126,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
|
|
|
|
|
|
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
|
|
|
if (midx) {
|
|
|
- nfs_page_group_lock(req, false);
|
|
|
+ nfs_page_group_lock(req);
|
|
|
|
|
|
/* find the last request */
|
|
|
for (lastreq = req->wb_head;
|