|
@@ -714,6 +714,31 @@ static void blk_account_io_merge(struct request *req)
|
|
|
part_stat_unlock();
|
|
|
}
|
|
|
}
|
|
|
+/*
|
|
|
+ * Two cases of handling DISCARD merge:
|
|
|
+ * If max_discard_segments > 1, the driver takes every bio
|
|
|
+ * as a range and send them to controller together. The ranges
|
|
|
+ * needn't to be contiguous.
|
|
|
+ * Otherwise, the bios/requests will be handled as same as
|
|
|
+ * others which should be contiguous.
|
|
|
+ */
|
|
|
+static inline bool blk_discard_mergable(struct request *req)
|
|
|
+{
|
|
|
+ if (req_op(req) == REQ_OP_DISCARD &&
|
|
|
+ queue_max_discard_segments(req->q) > 1)
|
|
|
+ return true;
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
|
|
|
+{
|
|
|
+ if (blk_discard_mergable(req))
|
|
|
+ return ELEVATOR_DISCARD_MERGE;
|
|
|
+ else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
|
|
|
+ return ELEVATOR_BACK_MERGE;
|
|
|
+
|
|
|
+ return ELEVATOR_NO_MERGE;
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* For non-mq, this has to be called with the request spinlock acquired.
|
|
@@ -731,12 +756,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
|
|
if (req_op(req) != req_op(next))
|
|
|
return NULL;
|
|
|
|
|
|
- /*
|
|
|
- * not contiguous
|
|
|
- */
|
|
|
- if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
|
|
|
- return NULL;
|
|
|
-
|
|
|
if (rq_data_dir(req) != rq_data_dir(next)
|
|
|
|| req->rq_disk != next->rq_disk
|
|
|
|| req_no_special_merge(next))
|
|
@@ -760,11 +779,19 @@ static struct request *attempt_merge(struct request_queue *q,
|
|
|
* counts here. Handle DISCARDs separately, as they
|
|
|
* have separate settings.
|
|
|
*/
|
|
|
- if (req_op(req) == REQ_OP_DISCARD) {
|
|
|
+
|
|
|
+ switch (blk_try_req_merge(req, next)) {
|
|
|
+ case ELEVATOR_DISCARD_MERGE:
|
|
|
if (!req_attempt_discard_merge(q, req, next))
|
|
|
return NULL;
|
|
|
- } else if (!ll_merge_requests_fn(q, req, next))
|
|
|
+ break;
|
|
|
+ case ELEVATOR_BACK_MERGE:
|
|
|
+ if (!ll_merge_requests_fn(q, req, next))
|
|
|
+ return NULL;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
return NULL;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* If failfast settings disagree or any of the two is already
|
|
@@ -888,8 +915,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|
|
|
|
|
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
|
|
|
{
|
|
|
- if (req_op(rq) == REQ_OP_DISCARD &&
|
|
|
- queue_max_discard_segments(rq->q) > 1)
|
|
|
+ if (blk_discard_mergable(rq))
|
|
|
return ELEVATOR_DISCARD_MERGE;
|
|
|
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
|
|
|
return ELEVATOR_BACK_MERGE;
|