blk-map.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * Functions related to mapping data to requests
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/uio.h>
  9. #include "blk.h"
  10. int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  11. struct bio *bio)
  12. {
  13. if (!rq->bio)
  14. blk_rq_bio_prep(q, rq, bio);
  15. else if (!ll_back_merge_fn(q, rq, bio))
  16. return -EINVAL;
  17. else {
  18. rq->biotail->bi_next = bio;
  19. rq->biotail = bio;
  20. rq->__data_len += bio->bi_iter.bi_size;
  21. }
  22. return 0;
  23. }
  24. static int __blk_rq_unmap_user(struct bio *bio)
  25. {
  26. int ret = 0;
  27. if (bio) {
  28. if (bio_flagged(bio, BIO_USER_MAPPED))
  29. bio_unmap_user(bio);
  30. else
  31. ret = bio_uncopy_user(bio);
  32. }
  33. return ret;
  34. }
  35. static int __blk_rq_map_user_iov(struct request *rq,
  36. struct rq_map_data *map_data, struct iov_iter *iter,
  37. gfp_t gfp_mask, bool copy)
  38. {
  39. struct request_queue *q = rq->q;
  40. struct bio *bio, *orig_bio;
  41. int ret;
  42. if (copy)
  43. bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  44. else
  45. bio = bio_map_user_iov(q, iter, gfp_mask);
  46. if (IS_ERR(bio))
  47. return PTR_ERR(bio);
  48. if (map_data && map_data->null_mapped)
  49. bio_set_flag(bio, BIO_NULL_MAPPED);
  50. iov_iter_advance(iter, bio->bi_iter.bi_size);
  51. if (map_data)
  52. map_data->offset += bio->bi_iter.bi_size;
  53. orig_bio = bio;
  54. blk_queue_bounce(q, &bio);
  55. /*
  56. * We link the bounce buffer in and could have to traverse it
  57. * later so we have to get a ref to prevent it from being freed
  58. */
  59. bio_get(bio);
  60. ret = blk_rq_append_bio(q, rq, bio);
  61. if (ret) {
  62. bio_endio(bio);
  63. __blk_rq_unmap_user(orig_bio);
  64. bio_put(bio);
  65. return ret;
  66. }
  67. return 0;
  68. }
  69. /**
  70. * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  71. * @q: request queue where request should be inserted
  72. * @rq: request to map data to
  73. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  74. * @iter: iovec iterator
  75. * @gfp_mask: memory allocation flags
  76. *
  77. * Description:
  78. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  79. * a kernel bounce buffer is used.
  80. *
  81. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  82. * still in process context.
  83. *
  84. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  85. * before being submitted to the device, as pages mapped may be out of
  86. * reach. It's the callers responsibility to make sure this happens. The
  87. * original bio must be passed back in to blk_rq_unmap_user() for proper
  88. * unmapping.
  89. */
  90. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  91. struct rq_map_data *map_data,
  92. const struct iov_iter *iter, gfp_t gfp_mask)
  93. {
  94. bool copy = false;
  95. unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
  96. struct bio *bio = NULL;
  97. struct iov_iter i;
  98. int ret;
  99. if (map_data)
  100. copy = true;
  101. else if (iov_iter_alignment(iter) & align)
  102. copy = true;
  103. else if (queue_virt_boundary(q))
  104. copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
  105. i = *iter;
  106. do {
  107. ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
  108. if (ret)
  109. goto unmap_rq;
  110. if (!bio)
  111. bio = rq->bio;
  112. } while (iov_iter_count(&i));
  113. if (!bio_flagged(bio, BIO_USER_MAPPED))
  114. rq->cmd_flags |= REQ_COPY_USER;
  115. return 0;
  116. unmap_rq:
  117. __blk_rq_unmap_user(bio);
  118. rq->bio = NULL;
  119. return -EINVAL;
  120. }
  121. EXPORT_SYMBOL(blk_rq_map_user_iov);
  122. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  123. struct rq_map_data *map_data, void __user *ubuf,
  124. unsigned long len, gfp_t gfp_mask)
  125. {
  126. struct iovec iov;
  127. struct iov_iter i;
  128. int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
  129. if (unlikely(ret < 0))
  130. return ret;
  131. return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
  132. }
  133. EXPORT_SYMBOL(blk_rq_map_user);
  134. /**
  135. * blk_rq_unmap_user - unmap a request with user data
  136. * @bio: start of bio list
  137. *
  138. * Description:
  139. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  140. * supply the original rq->bio from the blk_rq_map_user() return, since
  141. * the I/O completion may have changed rq->bio.
  142. */
  143. int blk_rq_unmap_user(struct bio *bio)
  144. {
  145. struct bio *mapped_bio;
  146. int ret = 0, ret2;
  147. while (bio) {
  148. mapped_bio = bio;
  149. if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  150. mapped_bio = bio->bi_private;
  151. ret2 = __blk_rq_unmap_user(mapped_bio);
  152. if (ret2 && !ret)
  153. ret = ret2;
  154. mapped_bio = bio;
  155. bio = bio->bi_next;
  156. bio_put(mapped_bio);
  157. }
  158. return ret;
  159. }
  160. EXPORT_SYMBOL(blk_rq_unmap_user);
  161. /**
  162. * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
  163. * @q: request queue where request should be inserted
  164. * @rq: request to fill
  165. * @kbuf: the kernel buffer
  166. * @len: length of user data
  167. * @gfp_mask: memory allocation flags
  168. *
  169. * Description:
  170. * Data will be mapped directly if possible. Otherwise a bounce
  171. * buffer is used. Can be called multiple times to append multiple
  172. * buffers.
  173. */
  174. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  175. unsigned int len, gfp_t gfp_mask)
  176. {
  177. int reading = rq_data_dir(rq) == READ;
  178. unsigned long addr = (unsigned long) kbuf;
  179. int do_copy = 0;
  180. struct bio *bio;
  181. int ret;
  182. if (len > (queue_max_hw_sectors(q) << 9))
  183. return -EINVAL;
  184. if (!len || !kbuf)
  185. return -EINVAL;
  186. do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
  187. if (do_copy)
  188. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  189. else
  190. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  191. if (IS_ERR(bio))
  192. return PTR_ERR(bio);
  193. if (!reading)
  194. bio->bi_rw |= REQ_WRITE;
  195. if (do_copy)
  196. rq->cmd_flags |= REQ_COPY_USER;
  197. ret = blk_rq_append_bio(q, rq, bio);
  198. if (unlikely(ret)) {
  199. /* request is too big */
  200. bio_put(bio);
  201. return ret;
  202. }
  203. blk_queue_bounce(q, &rq->bio);
  204. return 0;
  205. }
  206. EXPORT_SYMBOL(blk_rq_map_kern);