|
@@ -2461,6 +2461,60 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
|
|
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
|
|
|
|
|
|
|
|
+static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
|
|
|
+{
|
|
|
|
+ struct request_queue *q = hctx->queue;
|
|
|
|
+ long state;
|
|
|
|
+
|
|
|
|
+ hctx->poll_considered++;
|
|
|
|
+
|
|
|
|
+ state = current->state;
|
|
|
|
+ while (!need_resched()) {
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ hctx->poll_invoked++;
|
|
|
|
+
|
|
|
|
+ ret = q->mq_ops->poll(hctx, rq->tag);
|
|
|
|
+ if (ret > 0) {
|
|
|
|
+ hctx->poll_success++;
|
|
|
|
+ set_current_state(TASK_RUNNING);
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (signal_pending_state(state, current))
|
|
|
|
+ set_current_state(TASK_RUNNING);
|
|
|
|
+
|
|
|
|
+ if (current->state == TASK_RUNNING)
|
|
|
|
+ return true;
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ break;
|
|
|
|
+ cpu_relax();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return false;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
|
|
|
|
+{
|
|
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
|
|
+ struct blk_plug *plug;
|
|
|
|
+ struct request *rq;
|
|
|
|
+
|
|
|
|
+ if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
|
|
|
|
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
|
|
|
|
+ return false;
|
|
|
|
+
|
|
|
|
+ plug = current->plug;
|
|
|
|
+ if (plug)
|
|
|
|
+ blk_flush_plug_list(plug, false);
|
|
|
|
+
|
|
|
|
+ hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
|
|
|
|
+ rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
|
|
|
|
+
|
|
|
|
+ return __blk_mq_poll(hctx, rq);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(blk_mq_poll);
|
|
|
|
+
|
|
void blk_mq_disable_hotplug(void)
|
|
void blk_mq_disable_hotplug(void)
|
|
{
|
|
{
|
|
mutex_lock(&all_q_mutex);
|
|
mutex_lock(&all_q_mutex);
|