|
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
|
|
|
|
|
|
-void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
|
|
|
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|
|
void *priv)
|
|
|
{
|
|
|
- struct blk_mq_tags *tags = hctx->tags;
|
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
|
+ int i;
|
|
|
+
|
|
|
+
|
|
|
+ queue_for_each_hw_ctx(q, hctx, i) {
|
|
|
+ struct blk_mq_tags *tags = hctx->tags;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If not software queues are currently mapped to this
|
|
|
+ * hardware queue, there's nothing to check
|
|
|
+ */
|
|
|
+ if (!blk_mq_hw_queue_mapped(hctx))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (tags->nr_reserved_tags)
|
|
|
+ bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
|
|
|
+ bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
|
|
|
+ false);
|
|
|
+ }
|
|
|
|
|
|
- if (tags->nr_reserved_tags)
|
|
|
- bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
|
|
|
- bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
|
|
|
- false);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(blk_mq_tag_busy_iter);
|
|
|
|
|
|
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
|
|
|
{
|