|
@@ -77,7 +77,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
|
|
|
clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
|
|
|
}
|
|
|
|
|
|
-static int blk_mq_queue_enter(struct request_queue *q)
|
|
|
+static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
|
|
|
{
|
|
|
while (true) {
|
|
|
int ret;
|
|
@@ -85,6 +85,9 @@ static int blk_mq_queue_enter(struct request_queue *q)
|
|
|
if (percpu_ref_tryget_live(&q->mq_usage_counter))
|
|
|
return 0;
|
|
|
|
|
|
+ if (!(gfp & __GFP_WAIT))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
ret = wait_event_interruptible(q->mq_freeze_wq,
|
|
|
!q->mq_freeze_depth || blk_queue_dying(q));
|
|
|
if (blk_queue_dying(q))
|
|
@@ -256,7 +259,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
|
|
|
struct blk_mq_alloc_data alloc_data;
|
|
|
int ret;
|
|
|
|
|
|
- ret = blk_mq_queue_enter(q);
|
|
|
+ ret = blk_mq_queue_enter(q, gfp);
|
|
|
if (ret)
|
|
|
return ERR_PTR(ret);
|
|
|
|
|
@@ -1186,7 +1189,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
|
|
|
int rw = bio_data_dir(bio);
|
|
|
struct blk_mq_alloc_data alloc_data;
|
|
|
|
|
|
- if (unlikely(blk_mq_queue_enter(q))) {
|
|
|
+ if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
|
|
|
bio_endio(bio, -EIO);
|
|
|
return NULL;
|
|
|
}
|