|
@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
|
|
|
*/
|
|
|
void blk_queue_bypass_start(struct request_queue *q)
|
|
|
{
|
|
|
- bool drain;
|
|
|
-
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
- drain = !q->bypass_depth++;
|
|
|
+ q->bypass_depth++;
|
|
|
queue_flag_set(QUEUE_FLAG_BYPASS, q);
|
|
|
spin_unlock_irq(q->queue_lock);
|
|
|
|
|
|
- if (drain) {
|
|
|
+ /*
|
|
|
+ * Queues start drained. Skip actual draining till init is
|
|
|
+ * complete. This avoids lenghty delays during queue init which
|
|
|
+ * can happen many times during boot.
|
|
|
+ */
|
|
|
+ if (blk_queue_init_done(q)) {
|
|
|
spin_lock_irq(q->queue_lock);
|
|
|
__blk_drain_queue(q, false);
|
|
|
spin_unlock_irq(q->queue_lock);
|