|
@@ -933,9 +933,9 @@ static struct io_context *rq_ioc(struct bio *bio)
|
|
|
* Get a free request from @q. This function may fail under memory
|
|
|
* pressure or if @q is dead.
|
|
|
*
|
|
|
- * Must be callled with @q->queue_lock held and,
|
|
|
- * Returns %NULL on failure, with @q->queue_lock held.
|
|
|
- * Returns !%NULL on success, with @q->queue_lock *not held*.
|
|
|
+ * Must be called with @q->queue_lock held and,
|
|
|
+ * Returns ERR_PTR on failure, with @q->queue_lock held.
|
|
|
+ * Returns request pointer on success, with @q->queue_lock *not held*.
|
|
|
*/
|
|
|
static struct request *__get_request(struct request_list *rl, int rw_flags,
|
|
|
struct bio *bio, gfp_t gfp_mask)
|
|
@@ -949,7 +949,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
|
|
int may_queue;
|
|
|
|
|
|
if (unlikely(blk_queue_dying(q)))
|
|
|
- return NULL;
|
|
|
+ return ERR_PTR(-ENODEV);
|
|
|
|
|
|
may_queue = elv_may_queue(q, rw_flags);
|
|
|
if (may_queue == ELV_MQUEUE_NO)
|
|
@@ -974,7 +974,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
|
|
* process is not a "batcher", and not
|
|
|
* exempted by the IO scheduler
|
|
|
*/
|
|
|
- return NULL;
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -992,7 +992,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
|
|
|
* allocated with any setting of ->nr_requests
|
|
|
*/
|
|
|
if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
|
|
|
- return NULL;
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
q->nr_rqs[is_sync]++;
|
|
|
rl->count[is_sync]++;
|
|
@@ -1097,7 +1097,7 @@ fail_alloc:
|
|
|
rq_starved:
|
|
|
if (unlikely(rl->count[is_sync] == 0))
|
|
|
rl->starved[is_sync] = 1;
|
|
|
- return NULL;
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1110,9 +1110,9 @@ rq_starved:
|
|
|
* Get a free request from @q. If %__GFP_WAIT is set in @gfp_mask, this
|
|
|
* function keeps retrying under memory pressure and fails iff @q is dead.
|
|
|
*
|
|
|
- * Must be callled with @q->queue_lock held and,
|
|
|
- * Returns %NULL on failure, with @q->queue_lock held.
|
|
|
- * Returns !%NULL on success, with @q->queue_lock *not held*.
|
|
|
+ * Must be called with @q->queue_lock held and,
|
|
|
+ * Returns ERR_PTR on failure, with @q->queue_lock held.
|
|
|
+ * Returns request pointer on success, with @q->queue_lock *not held*.
|
|
|
*/
|
|
|
static struct request *get_request(struct request_queue *q, int rw_flags,
|
|
|
struct bio *bio, gfp_t gfp_mask)
|
|
@@ -1125,12 +1125,12 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
|
|
|
rl = blk_get_rl(q, bio); /* transferred to @rq on success */
|
|
|
retry:
|
|
|
rq = __get_request(rl, rw_flags, bio, gfp_mask);
|
|
|
- if (rq)
|
|
|
+ if (!IS_ERR(rq))
|
|
|
return rq;
|
|
|
|
|
|
if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
|
|
|
blk_put_rl(rl);
|
|
|
- return NULL;
|
|
|
+ return rq;
|
|
|
}
|
|
|
|
|
|
/* wait on @rl and retry */
|
|
@@ -1167,7 +1167,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
|
|
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
rq = get_request(q, rw, NULL, gfp_mask);
|
|
|
- if (!rq)
|
|
|
+ if (IS_ERR(rq))
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
/* q->queue_lock is unlocked at this point */
|
|
|
|
|
@@ -1219,8 +1219,8 @@ struct request *blk_make_request(struct request_queue *q, struct bio *bio,
|
|
|
{
|
|
|
struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
|
|
|
|
|
|
- if (unlikely(!rq))
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+ if (IS_ERR(rq))
|
|
|
+ return rq;
|
|
|
|
|
|
blk_rq_set_block_pc(rq);
|
|
|
|
|
@@ -1615,8 +1615,8 @@ get_rq:
|
|
|
* Returns with the queue unlocked.
|
|
|
*/
|
|
|
req = get_request(q, rw_flags, bio, GFP_NOIO);
|
|
|
- if (unlikely(!req)) {
|
|
|
- bio_endio(bio, -ENODEV); /* @q is dead */
|
|
|
+ if (IS_ERR(req)) {
|
|
|
+ bio_endio(bio, PTR_ERR(req)); /* @q is dead */
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
|