|
@@ -374,6 +374,7 @@ void blk_clear_preempt_only(struct request_queue *q)
|
|
|
|
|
|
spin_lock_irqsave(q->queue_lock, flags);
|
|
|
queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
|
|
|
+ wake_up_all(&q->mq_freeze_wq);
|
|
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
|
|
@@ -795,15 +796,38 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_alloc_queue);
|
|
|
|
|
|
-int blk_queue_enter(struct request_queue *q, bool nowait)
|
|
|
+/**
|
|
|
+ * blk_queue_enter() - try to increase q->q_usage_counter
|
|
|
+ * @q: request queue pointer
|
|
|
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
|
|
|
+ */
|
|
|
+int blk_queue_enter(struct request_queue *q, unsigned int flags)
|
|
|
{
|
|
|
+ const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
|
|
|
+
|
|
|
while (true) {
|
|
|
+ bool success = false;
|
|
|
int ret;
|
|
|
|
|
|
- if (percpu_ref_tryget_live(&q->q_usage_counter))
|
|
|
+ rcu_read_lock_sched();
|
|
|
+ if (percpu_ref_tryget_live(&q->q_usage_counter)) {
|
|
|
+ /*
|
|
|
+ * The code that sets the PREEMPT_ONLY flag is
|
|
|
+ * responsible for ensuring that that flag is globally
|
|
|
+ * visible before the queue is unfrozen.
|
|
|
+ */
|
|
|
+ if (preempt || !blk_queue_preempt_only(q)) {
|
|
|
+ success = true;
|
|
|
+ } else {
|
|
|
+ percpu_ref_put(&q->q_usage_counter);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ rcu_read_unlock_sched();
|
|
|
+
|
|
|
+ if (success)
|
|
|
return 0;
|
|
|
|
|
|
- if (nowait)
|
|
|
+ if (flags & BLK_MQ_REQ_NOWAIT)
|
|
|
return -EBUSY;
|
|
|
|
|
|
/*
|
|
@@ -816,7 +840,8 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
|
|
|
smp_rmb();
|
|
|
|
|
|
ret = wait_event_interruptible(q->mq_freeze_wq,
|
|
|
- !atomic_read(&q->mq_freeze_depth) ||
|
|
|
+ (atomic_read(&q->mq_freeze_depth) == 0 &&
|
|
|
+ (preempt || !blk_queue_preempt_only(q))) ||
|
|
|
blk_queue_dying(q));
|
|
|
if (blk_queue_dying(q))
|
|
|
return -ENODEV;
|
|
@@ -1445,8 +1470,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
|
|
|
/* create ioc upfront */
|
|
|
create_io_context(gfp_mask, q->node);
|
|
|
|
|
|
- ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
|
|
|
- (op & REQ_NOWAIT));
|
|
|
+ ret = blk_queue_enter(q, flags);
|
|
|
if (ret)
|
|
|
return ERR_PTR(ret);
|
|
|
spin_lock_irq(q->queue_lock);
|
|
@@ -2267,8 +2291,10 @@ blk_qc_t generic_make_request(struct bio *bio)
|
|
|
current->bio_list = bio_list_on_stack;
|
|
|
do {
|
|
|
struct request_queue *q = bio->bi_disk->queue;
|
|
|
+ unsigned int flags = bio->bi_opf & REQ_NOWAIT ?
|
|
|
+ BLK_MQ_REQ_NOWAIT : 0;
|
|
|
|
|
|
- if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
|
|
|
+ if (likely(blk_queue_enter(q, flags) == 0)) {
|
|
|
struct bio_list lower, same;
|
|
|
|
|
|
/* Create a fresh bio_list for all subordinate requests */
|
|
@@ -2327,7 +2353,7 @@ blk_qc_t direct_make_request(struct bio *bio)
|
|
|
if (!generic_make_request_checks(bio))
|
|
|
return BLK_QC_T_NONE;
|
|
|
|
|
|
- if (unlikely(blk_queue_enter(q, nowait))) {
|
|
|
+ if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) {
|
|
|
if (nowait && !blk_queue_dying(q))
|
|
|
bio->bi_status = BLK_STS_AGAIN;
|
|
|
else
|