|
@@ -3629,7 +3629,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
|
|
blk_run_queue_async(q);
|
|
|
else
|
|
|
__blk_run_queue(q);
|
|
|
- spin_unlock(q->queue_lock);
|
|
|
+ spin_unlock_irq(q->queue_lock);
|
|
|
}
|
|
|
|
|
|
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
|
|
@@ -3677,7 +3677,6 @@ EXPORT_SYMBOL(blk_check_plugged);
|
|
|
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
|
{
|
|
|
struct request_queue *q;
|
|
|
- unsigned long flags;
|
|
|
struct request *rq;
|
|
|
LIST_HEAD(list);
|
|
|
unsigned int depth;
|
|
@@ -3697,11 +3696,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
|
q = NULL;
|
|
|
depth = 0;
|
|
|
|
|
|
- /*
|
|
|
- * Save and disable interrupts here, to avoid doing it for every
|
|
|
- * queue lock we have to take.
|
|
|
- */
|
|
|
- local_irq_save(flags);
|
|
|
while (!list_empty(&list)) {
|
|
|
rq = list_entry_rq(list.next);
|
|
|
list_del_init(&rq->queuelist);
|
|
@@ -3714,7 +3708,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
|
queue_unplugged(q, depth, from_schedule);
|
|
|
q = rq->q;
|
|
|
depth = 0;
|
|
|
- spin_lock(q->queue_lock);
|
|
|
+ spin_lock_irq(q->queue_lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3741,8 +3735,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|
|
*/
|
|
|
if (q)
|
|
|
queue_unplugged(q, depth, from_schedule);
|
|
|
-
|
|
|
- local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
void blk_finish_plug(struct blk_plug *plug)
|