|
@@ -31,6 +31,7 @@
|
|
#include "blk-mq.h"
|
|
#include "blk-mq.h"
|
|
#include "blk-mq-tag.h"
|
|
#include "blk-mq-tag.h"
|
|
#include "blk-stat.h"
|
|
#include "blk-stat.h"
|
|
|
|
+#include "blk-wbt.h"
|
|
|
|
|
|
static DEFINE_MUTEX(all_q_mutex);
|
|
static DEFINE_MUTEX(all_q_mutex);
|
|
static LIST_HEAD(all_q_list);
|
|
static LIST_HEAD(all_q_list);
|
|
@@ -326,6 +327,8 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
|
|
|
|
|
|
if (rq->rq_flags & RQF_MQ_INFLIGHT)
|
|
if (rq->rq_flags & RQF_MQ_INFLIGHT)
|
|
atomic_dec(&hctx->nr_active);
|
|
atomic_dec(&hctx->nr_active);
|
|
|
|
+
|
|
|
|
+ wbt_done(q->rq_wb, &rq->issue_stat);
|
|
rq->rq_flags = 0;
|
|
rq->rq_flags = 0;
|
|
|
|
|
|
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
|
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
|
|
@@ -354,6 +357,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
|
|
blk_account_io_done(rq);
|
|
blk_account_io_done(rq);
|
|
|
|
|
|
if (rq->end_io) {
|
|
if (rq->end_io) {
|
|
|
|
+ wbt_done(rq->q->rq_wb, &rq->issue_stat);
|
|
rq->end_io(rq, error);
|
|
rq->end_io(rq, error);
|
|
} else {
|
|
} else {
|
|
if (unlikely(blk_bidi_rq(rq)))
|
|
if (unlikely(blk_bidi_rq(rq)))
|
|
@@ -471,6 +475,7 @@ void blk_mq_start_request(struct request *rq)
|
|
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
|
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
|
blk_stat_set_issue_time(&rq->issue_stat);
|
|
blk_stat_set_issue_time(&rq->issue_stat);
|
|
rq->rq_flags |= RQF_STATS;
|
|
rq->rq_flags |= RQF_STATS;
|
|
|
|
+ wbt_issue(q->rq_wb, &rq->issue_stat);
|
|
}
|
|
}
|
|
|
|
|
|
blk_add_timer(rq);
|
|
blk_add_timer(rq);
|
|
@@ -508,6 +513,7 @@ static void __blk_mq_requeue_request(struct request *rq)
|
|
struct request_queue *q = rq->q;
|
|
struct request_queue *q = rq->q;
|
|
|
|
|
|
trace_block_rq_requeue(q, rq);
|
|
trace_block_rq_requeue(q, rq);
|
|
|
|
+ wbt_requeue(q->rq_wb, &rq->issue_stat);
|
|
|
|
|
|
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
|
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
|
if (q->dma_drain_size && blk_rq_bytes(rq))
|
|
if (q->dma_drain_size && blk_rq_bytes(rq))
|
|
@@ -1339,6 +1345,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|
struct blk_plug *plug;
|
|
struct blk_plug *plug;
|
|
struct request *same_queue_rq = NULL;
|
|
struct request *same_queue_rq = NULL;
|
|
blk_qc_t cookie;
|
|
blk_qc_t cookie;
|
|
|
|
+ unsigned int wb_acct;
|
|
|
|
|
|
blk_queue_bounce(q, &bio);
|
|
blk_queue_bounce(q, &bio);
|
|
|
|
|
|
@@ -1353,9 +1360,15 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
|
blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
|
|
return BLK_QC_T_NONE;
|
|
return BLK_QC_T_NONE;
|
|
|
|
|
|
|
|
+ wb_acct = wbt_wait(q->rq_wb, bio, NULL);
|
|
|
|
+
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
- if (unlikely(!rq))
|
|
|
|
|
|
+ if (unlikely(!rq)) {
|
|
|
|
+ __wbt_done(q->rq_wb, wb_acct);
|
|
return BLK_QC_T_NONE;
|
|
return BLK_QC_T_NONE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ wbt_track(&rq->issue_stat, wb_acct);
|
|
|
|
|
|
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
|
|
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
|
|
|
|
|
|
@@ -1439,6 +1452,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|
struct blk_mq_alloc_data data;
|
|
struct blk_mq_alloc_data data;
|
|
struct request *rq;
|
|
struct request *rq;
|
|
blk_qc_t cookie;
|
|
blk_qc_t cookie;
|
|
|
|
+ unsigned int wb_acct;
|
|
|
|
|
|
blk_queue_bounce(q, &bio);
|
|
blk_queue_bounce(q, &bio);
|
|
|
|
|
|
@@ -1455,9 +1469,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|
} else
|
|
} else
|
|
request_count = blk_plug_queued_count(q);
|
|
request_count = blk_plug_queued_count(q);
|
|
|
|
|
|
|
|
+ wb_acct = wbt_wait(q->rq_wb, bio, NULL);
|
|
|
|
+
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
rq = blk_mq_map_request(q, bio, &data);
|
|
- if (unlikely(!rq))
|
|
|
|
|
|
+ if (unlikely(!rq)) {
|
|
|
|
+ __wbt_done(q->rq_wb, wb_acct);
|
|
return BLK_QC_T_NONE;
|
|
return BLK_QC_T_NONE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ wbt_track(&rq->issue_stat, wb_acct);
|
|
|
|
|
|
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
|
|
cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
|
|
|
|
|
|
@@ -2139,6 +2159,8 @@ void blk_mq_free_queue(struct request_queue *q)
|
|
list_del_init(&q->all_q_node);
|
|
list_del_init(&q->all_q_node);
|
|
mutex_unlock(&all_q_mutex);
|
|
mutex_unlock(&all_q_mutex);
|
|
|
|
|
|
|
|
+ wbt_exit(q);
|
|
|
|
+
|
|
blk_mq_del_queue_tag_set(q);
|
|
blk_mq_del_queue_tag_set(q);
|
|
|
|
|
|
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
|
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|