|
@@ -21,6 +21,7 @@
|
|
|
#include <linux/delay.h>
|
|
|
#include <linux/wait.h>
|
|
|
#include <linux/kthread.h>
|
|
|
+#include <linux/ktime.h>
|
|
|
#include <linux/elevator.h> /* for rq_end_sector() */
|
|
|
|
|
|
#include <trace/events/block.h>
|
|
@@ -219,8 +220,10 @@ struct mapped_device {
|
|
|
struct task_struct *kworker_task;
|
|
|
|
|
|
/* for request-based merge heuristic in dm_request_fn() */
|
|
|
- sector_t last_rq_pos;
|
|
|
+ unsigned seq_rq_merge_deadline_usecs;
|
|
|
int last_rq_rw;
|
|
|
+ sector_t last_rq_pos;
|
|
|
+ ktime_t last_rq_start_time;
|
|
|
};
|
|
|
|
|
|
/*
|
|
@@ -1935,8 +1938,11 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
|
|
|
blk_start_request(orig);
|
|
|
atomic_inc(&md->pending[rq_data_dir(orig)]);
|
|
|
|
|
|
- md->last_rq_pos = rq_end_sector(orig);
|
|
|
- md->last_rq_rw = rq_data_dir(orig);
|
|
|
+ if (md->seq_rq_merge_deadline_usecs) {
|
|
|
+ md->last_rq_pos = rq_end_sector(orig);
|
|
|
+ md->last_rq_rw = rq_data_dir(orig);
|
|
|
+ md->last_rq_start_time = ktime_get();
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Hold the md reference here for the in-flight I/O.
|
|
@@ -1948,6 +1954,45 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
|
|
|
dm_get(md);
|
|
|
}
|
|
|
|
|
|
+#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
|
|
|
+
|
|
|
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
|
|
|
+{
|
|
|
+ return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
|
|
|
+}
|
|
|
+
|
|
|
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
|
|
|
+ const char *buf, size_t count)
|
|
|
+{
|
|
|
+ unsigned deadline;
|
|
|
+
|
|
|
+ if (!dm_request_based(md))
|
|
|
+ return count;
|
|
|
+
|
|
|
+ if (kstrtouint(buf, 10, &deadline))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
|
|
|
+ deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
|
|
|
+
|
|
|
+ md->seq_rq_merge_deadline_usecs = deadline;
|
|
|
+
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
|
|
|
+{
|
|
|
+ ktime_t kt_deadline;
|
|
|
+
|
|
|
+ if (!md->seq_rq_merge_deadline_usecs)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
|
|
|
+ kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
|
|
|
+
|
|
|
+ return !ktime_after(ktime_get(), kt_deadline);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* q->request_fn for request-based dm.
|
|
|
* Called with the queue lock held.
|
|
@@ -1990,7 +2035,8 @@ static void dm_request_fn(struct request_queue *q)
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
- if (md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
|
|
|
+ if (dm_request_peeked_before_merge_deadline(md) &&
|
|
|
+ md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
|
|
|
md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
|
|
|
goto delay_and_out;
|
|
|
|
|
@@ -2532,6 +2578,9 @@ static int dm_init_request_based_queue(struct mapped_device *md)
|
|
|
if (!q)
|
|
|
return 0;
|
|
|
|
|
|
+ /* disable dm_request_fn's merge heuristic by default */
|
|
|
+ md->seq_rq_merge_deadline_usecs = 0;
|
|
|
+
|
|
|
md->queue = q;
|
|
|
dm_init_md_queue(md);
|
|
|
blk_queue_softirq_done(md->queue, dm_softirq_done);
|