|
@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
|
|
|
*/
|
|
|
q->queue_tags = tags;
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
|
|
|
- INIT_LIST_HEAD(&q->tag_busy_list);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_queue_init_tags);
|
|
@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
|
|
rq->tag = tag;
|
|
|
bqt->tag_index[tag] = rq;
|
|
|
blk_start_request(rq);
|
|
|
- list_add(&rq->queuelist, &q->tag_busy_list);
|
|
|
return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_queue_start_tag);
|
|
|
-
|
|
|
-/**
|
|
|
- * blk_queue_invalidate_tags - invalidate all pending tags
|
|
|
- * @q: the request queue for the device
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Hardware conditions may dictate a need to stop all pending requests.
|
|
|
- * In this case, we will safely clear the block side of the tag queue and
|
|
|
- * readd all requests to the request queue in the right order.
|
|
|
- **/
|
|
|
-void blk_queue_invalidate_tags(struct request_queue *q)
|
|
|
-{
|
|
|
- struct list_head *tmp, *n;
|
|
|
-
|
|
|
- lockdep_assert_held(q->queue_lock);
|
|
|
-
|
|
|
- list_for_each_safe(tmp, n, &q->tag_busy_list)
|
|
|
- blk_requeue_request(q, list_entry_rq(tmp));
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(blk_queue_invalidate_tags);
|