|
@@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q)
|
|
|
}
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
}
|
|
|
+
|
|
|
+ /* Make blk_queue_enter() reexamine the DYING flag. */
|
|
|
+ wake_up_all(&q->mq_freeze_wq);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
|
|
|
|
|
@@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q,
|
|
|
unsigned int op, gfp_t gfp_mask)
|
|
|
{
|
|
|
struct request *rq;
|
|
|
+ int ret = 0;
|
|
|
|
|
|
WARN_ON_ONCE(q->mq_ops);
|
|
|
|
|
|
/* create ioc upfront */
|
|
|
create_io_context(gfp_mask, q->node);
|
|
|
|
|
|
+ ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
|
|
|
+ (op & REQ_NOWAIT));
|
|
|
+ if (ret)
|
|
|
+ return ERR_PTR(ret);
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
rq = get_request(q, op, NULL, gfp_mask);
|
|
|
if (IS_ERR(rq)) {
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
+ blk_queue_exit(q);
|
|
|
return rq;
|
|
|
}
|
|
|
|
|
@@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
|
|
|
blk_free_request(rl, req);
|
|
|
freed_request(rl, sync, rq_flags);
|
|
|
blk_put_rl(rl);
|
|
|
+ blk_queue_exit(q);
|
|
|
}
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(__blk_put_request);
|
|
@@ -1860,8 +1870,10 @@ get_rq:
|
|
|
* Grab a free request. This is might sleep but can not fail.
|
|
|
* Returns with the queue unlocked.
|
|
|
*/
|
|
|
+ blk_queue_enter_live(q);
|
|
|
req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
|
|
|
if (IS_ERR(req)) {
|
|
|
+ blk_queue_exit(q);
|
|
|
__wbt_done(q->rq_wb, wb_acct);
|
|
|
if (PTR_ERR(req) == -ENOMEM)
|
|
|
bio->bi_status = BLK_STS_RESOURCE;
|