blk-merge.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. /*
  2. * Functions related to segment and merge handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/scatterlist.h>
  9. #include "blk.h"
  10. static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
  11. struct bio *bio)
  12. {
  13. struct bio_vec bv, bvprv = { NULL };
  14. int cluster, high, highprv = 1;
  15. unsigned int seg_size, nr_phys_segs;
  16. struct bio *fbio, *bbio;
  17. struct bvec_iter iter;
  18. if (!bio)
  19. return 0;
  20. /*
  21. * This should probably be returning 0, but blk_add_request_payload()
  22. * (Christoph!!!!)
  23. */
  24. if (bio->bi_rw & REQ_DISCARD)
  25. return 1;
  26. if (bio->bi_rw & REQ_WRITE_SAME)
  27. return 1;
  28. fbio = bio;
  29. cluster = blk_queue_cluster(q);
  30. seg_size = 0;
  31. nr_phys_segs = 0;
  32. for_each_bio(bio) {
  33. bio_for_each_segment(bv, bio, iter) {
  34. /*
  35. * the trick here is making sure that a high page is
  36. * never considered part of another segment, since that
  37. * might change with the bounce page.
  38. */
  39. high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
  40. if (!high && !highprv && cluster) {
  41. if (seg_size + bv.bv_len
  42. > queue_max_segment_size(q))
  43. goto new_segment;
  44. if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
  45. goto new_segment;
  46. if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
  47. goto new_segment;
  48. seg_size += bv.bv_len;
  49. bvprv = bv;
  50. continue;
  51. }
  52. new_segment:
  53. if (nr_phys_segs == 1 && seg_size >
  54. fbio->bi_seg_front_size)
  55. fbio->bi_seg_front_size = seg_size;
  56. nr_phys_segs++;
  57. bvprv = bv;
  58. seg_size = bv.bv_len;
  59. highprv = high;
  60. }
  61. bbio = bio;
  62. }
  63. if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  64. fbio->bi_seg_front_size = seg_size;
  65. if (seg_size > bbio->bi_seg_back_size)
  66. bbio->bi_seg_back_size = seg_size;
  67. return nr_phys_segs;
  68. }
  69. void blk_recalc_rq_segments(struct request *rq)
  70. {
  71. rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
  72. }
  73. void blk_recount_segments(struct request_queue *q, struct bio *bio)
  74. {
  75. struct bio *nxt = bio->bi_next;
  76. bio->bi_next = NULL;
  77. bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
  78. bio->bi_next = nxt;
  79. bio->bi_flags |= (1 << BIO_SEG_VALID);
  80. }
  81. EXPORT_SYMBOL(blk_recount_segments);
  82. static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  83. struct bio *nxt)
  84. {
  85. struct bio_vec end_bv = { NULL }, nxt_bv;
  86. struct bvec_iter iter;
  87. if (!blk_queue_cluster(q))
  88. return 0;
  89. if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
  90. queue_max_segment_size(q))
  91. return 0;
  92. if (!bio_has_data(bio))
  93. return 1;
  94. bio_for_each_segment(end_bv, bio, iter)
  95. if (end_bv.bv_len == iter.bi_size)
  96. break;
  97. nxt_bv = bio_iovec(nxt);
  98. if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
  99. return 0;
  100. /*
  101. * bio and nxt are contiguous in memory; check if the queue allows
  102. * these two to be merged into one
  103. */
  104. if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
  105. return 1;
  106. return 0;
  107. }
  108. static inline void
  109. __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
  110. struct scatterlist *sglist, struct bio_vec *bvprv,
  111. struct scatterlist **sg, int *nsegs, int *cluster)
  112. {
  113. int nbytes = bvec->bv_len;
  114. if (*sg && *cluster) {
  115. if ((*sg)->length + nbytes > queue_max_segment_size(q))
  116. goto new_segment;
  117. if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
  118. goto new_segment;
  119. if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
  120. goto new_segment;
  121. (*sg)->length += nbytes;
  122. } else {
  123. new_segment:
  124. if (!*sg)
  125. *sg = sglist;
  126. else {
  127. /*
  128. * If the driver previously mapped a shorter
  129. * list, we could see a termination bit
  130. * prematurely unless it fully inits the sg
  131. * table on each mapping. We KNOW that there
  132. * must be more entries here or the driver
  133. * would be buggy, so force clear the
  134. * termination bit to avoid doing a full
  135. * sg_init_table() in drivers for each command.
  136. */
  137. sg_unmark_end(*sg);
  138. *sg = sg_next(*sg);
  139. }
  140. sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  141. (*nsegs)++;
  142. }
  143. *bvprv = *bvec;
  144. }
  145. static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  146. struct scatterlist *sglist,
  147. struct scatterlist **sg)
  148. {
  149. struct bio_vec bvec, bvprv = { NULL };
  150. struct bvec_iter iter;
  151. int nsegs, cluster;
  152. nsegs = 0;
  153. cluster = blk_queue_cluster(q);
  154. if (bio->bi_rw & REQ_DISCARD) {
  155. /*
  156. * This is a hack - drivers should be neither modifying the
  157. * biovec, nor relying on bi_vcnt - but because of
  158. * blk_add_request_payload(), a discard bio may or may not have
  159. * a payload we need to set up here (thank you Christoph) and
  160. * bi_vcnt is really the only way of telling if we need to.
  161. */
  162. if (bio->bi_vcnt)
  163. goto single_segment;
  164. return 0;
  165. }
  166. if (bio->bi_rw & REQ_WRITE_SAME) {
  167. single_segment:
  168. *sg = sglist;
  169. bvec = bio_iovec(bio);
  170. sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
  171. return 1;
  172. }
  173. for_each_bio(bio)
  174. bio_for_each_segment(bvec, bio, iter)
  175. __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  176. &nsegs, &cluster);
  177. return nsegs;
  178. }
  179. /*
  180. * map a request to scatterlist, return number of sg entries setup. Caller
  181. * must make sure sg can hold rq->nr_phys_segments entries
  182. */
  183. int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  184. struct scatterlist *sglist)
  185. {
  186. struct scatterlist *sg = NULL;
  187. int nsegs = 0;
  188. if (rq->bio)
  189. nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
  190. if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
  191. (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  192. unsigned int pad_len =
  193. (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
  194. sg->length += pad_len;
  195. rq->extra_len += pad_len;
  196. }
  197. if (q->dma_drain_size && q->dma_drain_needed(rq)) {
  198. if (rq->cmd_flags & REQ_WRITE)
  199. memset(q->dma_drain_buffer, 0, q->dma_drain_size);
  200. sg->page_link &= ~0x02;
  201. sg = sg_next(sg);
  202. sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  203. q->dma_drain_size,
  204. ((unsigned long)q->dma_drain_buffer) &
  205. (PAGE_SIZE - 1));
  206. nsegs++;
  207. rq->extra_len += q->dma_drain_size;
  208. }
  209. if (sg)
  210. sg_mark_end(sg);
  211. return nsegs;
  212. }
  213. EXPORT_SYMBOL(blk_rq_map_sg);
  214. /**
  215. * blk_bio_map_sg - map a bio to a scatterlist
  216. * @q: request_queue in question
  217. * @bio: bio being mapped
  218. * @sglist: scatterlist being mapped
  219. *
  220. * Note:
  221. * Caller must make sure sg can hold bio->bi_phys_segments entries
  222. *
  223. * Will return the number of sg entries setup
  224. */
  225. int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
  226. struct scatterlist *sglist)
  227. {
  228. struct scatterlist *sg = NULL;
  229. int nsegs;
  230. struct bio *next = bio->bi_next;
  231. bio->bi_next = NULL;
  232. nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
  233. bio->bi_next = next;
  234. if (sg)
  235. sg_mark_end(sg);
  236. BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
  237. return nsegs;
  238. }
  239. EXPORT_SYMBOL(blk_bio_map_sg);
  240. static inline int ll_new_hw_segment(struct request_queue *q,
  241. struct request *req,
  242. struct bio *bio)
  243. {
  244. int nr_phys_segs = bio_phys_segments(q, bio);
  245. if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  246. goto no_merge;
  247. if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
  248. goto no_merge;
  249. /*
  250. * This will form the start of a new hw segment. Bump both
  251. * counters.
  252. */
  253. req->nr_phys_segments += nr_phys_segs;
  254. return 1;
  255. no_merge:
  256. req->cmd_flags |= REQ_NOMERGE;
  257. if (req == q->last_merge)
  258. q->last_merge = NULL;
  259. return 0;
  260. }
  261. int ll_back_merge_fn(struct request_queue *q, struct request *req,
  262. struct bio *bio)
  263. {
  264. if (blk_rq_sectors(req) + bio_sectors(bio) >
  265. blk_rq_get_max_sectors(req)) {
  266. req->cmd_flags |= REQ_NOMERGE;
  267. if (req == q->last_merge)
  268. q->last_merge = NULL;
  269. return 0;
  270. }
  271. if (!bio_flagged(req->biotail, BIO_SEG_VALID))
  272. blk_recount_segments(q, req->biotail);
  273. if (!bio_flagged(bio, BIO_SEG_VALID))
  274. blk_recount_segments(q, bio);
  275. return ll_new_hw_segment(q, req, bio);
  276. }
  277. int ll_front_merge_fn(struct request_queue *q, struct request *req,
  278. struct bio *bio)
  279. {
  280. if (blk_rq_sectors(req) + bio_sectors(bio) >
  281. blk_rq_get_max_sectors(req)) {
  282. req->cmd_flags |= REQ_NOMERGE;
  283. if (req == q->last_merge)
  284. q->last_merge = NULL;
  285. return 0;
  286. }
  287. if (!bio_flagged(bio, BIO_SEG_VALID))
  288. blk_recount_segments(q, bio);
  289. if (!bio_flagged(req->bio, BIO_SEG_VALID))
  290. blk_recount_segments(q, req->bio);
  291. return ll_new_hw_segment(q, req, bio);
  292. }
  293. /*
  294. * blk-mq uses req->special to carry normal driver per-request payload, it
  295. * does not indicate a prepared command that we cannot merge with.
  296. */
  297. static bool req_no_special_merge(struct request *req)
  298. {
  299. struct request_queue *q = req->q;
  300. return !q->mq_ops && req->special;
  301. }
  302. static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  303. struct request *next)
  304. {
  305. int total_phys_segments;
  306. unsigned int seg_size =
  307. req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
  308. /*
  309. * First check if the either of the requests are re-queued
  310. * requests. Can't merge them if they are.
  311. */
  312. if (req_no_special_merge(req) || req_no_special_merge(next))
  313. return 0;
  314. /*
  315. * Will it become too large?
  316. */
  317. if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  318. blk_rq_get_max_sectors(req))
  319. return 0;
  320. total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
  321. if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  322. if (req->nr_phys_segments == 1)
  323. req->bio->bi_seg_front_size = seg_size;
  324. if (next->nr_phys_segments == 1)
  325. next->biotail->bi_seg_back_size = seg_size;
  326. total_phys_segments--;
  327. }
  328. if (total_phys_segments > queue_max_segments(q))
  329. return 0;
  330. if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
  331. return 0;
  332. /* Merge is OK... */
  333. req->nr_phys_segments = total_phys_segments;
  334. return 1;
  335. }
  336. /**
  337. * blk_rq_set_mixed_merge - mark a request as mixed merge
  338. * @rq: request to mark as mixed merge
  339. *
  340. * Description:
  341. * @rq is about to be mixed merged. Make sure the attributes
  342. * which can be mixed are set in each bio and mark @rq as mixed
  343. * merged.
  344. */
  345. void blk_rq_set_mixed_merge(struct request *rq)
  346. {
  347. unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  348. struct bio *bio;
  349. if (rq->cmd_flags & REQ_MIXED_MERGE)
  350. return;
  351. /*
  352. * @rq will no longer represent mixable attributes for all the
  353. * contained bios. It will just track those of the first one.
  354. * Distributes the attributs to each bio.
  355. */
  356. for (bio = rq->bio; bio; bio = bio->bi_next) {
  357. WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
  358. (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
  359. bio->bi_rw |= ff;
  360. }
  361. rq->cmd_flags |= REQ_MIXED_MERGE;
  362. }
  363. static void blk_account_io_merge(struct request *req)
  364. {
  365. if (blk_do_io_stat(req)) {
  366. struct hd_struct *part;
  367. int cpu;
  368. cpu = part_stat_lock();
  369. part = req->part;
  370. part_round_stats(cpu, part);
  371. part_dec_in_flight(part, rq_data_dir(req));
  372. hd_struct_put(part);
  373. part_stat_unlock();
  374. }
  375. }
  376. /*
  377. * Has to be called with the request spinlock acquired
  378. */
  379. static int attempt_merge(struct request_queue *q, struct request *req,
  380. struct request *next)
  381. {
  382. if (!rq_mergeable(req) || !rq_mergeable(next))
  383. return 0;
  384. if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
  385. return 0;
  386. /*
  387. * not contiguous
  388. */
  389. if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
  390. return 0;
  391. if (rq_data_dir(req) != rq_data_dir(next)
  392. || req->rq_disk != next->rq_disk
  393. || req_no_special_merge(next))
  394. return 0;
  395. if (req->cmd_flags & REQ_WRITE_SAME &&
  396. !blk_write_same_mergeable(req->bio, next->bio))
  397. return 0;
  398. /*
  399. * If we are allowed to merge, then append bio list
  400. * from next to rq and release next. merge_requests_fn
  401. * will have updated segment counts, update sector
  402. * counts here.
  403. */
  404. if (!ll_merge_requests_fn(q, req, next))
  405. return 0;
  406. /*
  407. * If failfast settings disagree or any of the two is already
  408. * a mixed merge, mark both as mixed before proceeding. This
  409. * makes sure that all involved bios have mixable attributes
  410. * set properly.
  411. */
  412. if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  413. (req->cmd_flags & REQ_FAILFAST_MASK) !=
  414. (next->cmd_flags & REQ_FAILFAST_MASK)) {
  415. blk_rq_set_mixed_merge(req);
  416. blk_rq_set_mixed_merge(next);
  417. }
  418. /*
  419. * At this point we have either done a back merge
  420. * or front merge. We need the smaller start_time of
  421. * the merged requests to be the current request
  422. * for accounting purposes.
  423. */
  424. if (time_after(req->start_time, next->start_time))
  425. req->start_time = next->start_time;
  426. req->biotail->bi_next = next->bio;
  427. req->biotail = next->biotail;
  428. req->__data_len += blk_rq_bytes(next);
  429. elv_merge_requests(q, req, next);
  430. /*
  431. * 'next' is going away, so update stats accordingly
  432. */
  433. blk_account_io_merge(next);
  434. req->ioprio = ioprio_best(req->ioprio, next->ioprio);
  435. if (blk_rq_cpu_valid(next))
  436. req->cpu = next->cpu;
  437. /* owner-ship of bio passed from next to req */
  438. next->bio = NULL;
  439. __blk_put_request(q, next);
  440. return 1;
  441. }
  442. int attempt_back_merge(struct request_queue *q, struct request *rq)
  443. {
  444. struct request *next = elv_latter_request(q, rq);
  445. if (next)
  446. return attempt_merge(q, rq, next);
  447. return 0;
  448. }
  449. int attempt_front_merge(struct request_queue *q, struct request *rq)
  450. {
  451. struct request *prev = elv_former_request(q, rq);
  452. if (prev)
  453. return attempt_merge(q, prev, rq);
  454. return 0;
  455. }
  456. int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  457. struct request *next)
  458. {
  459. return attempt_merge(q, rq, next);
  460. }
  461. bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  462. {
  463. if (!rq_mergeable(rq) || !bio_mergeable(bio))
  464. return false;
  465. if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
  466. return false;
  467. /* different data direction or already started, don't merge */
  468. if (bio_data_dir(bio) != rq_data_dir(rq))
  469. return false;
  470. /* must be same device and not a special request */
  471. if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
  472. return false;
  473. /* only merge integrity protected bio into ditto rq */
  474. if (bio_integrity(bio) != blk_integrity_rq(rq))
  475. return false;
  476. /* must be using the same buffer */
  477. if (rq->cmd_flags & REQ_WRITE_SAME &&
  478. !blk_write_same_mergeable(rq->bio, bio))
  479. return false;
  480. return true;
  481. }
  482. int blk_try_merge(struct request *rq, struct bio *bio)
  483. {
  484. if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
  485. return ELEVATOR_BACK_MERGE;
  486. else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
  487. return ELEVATOR_FRONT_MERGE;
  488. return ELEVATOR_NO_MERGE;
  489. }