blk-map.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * Functions related to mapping data to requests
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/uio.h>
  9. #include "blk.h"
  10. static bool iovec_gap_to_prv(struct request_queue *q,
  11. struct iovec *prv, struct iovec *cur)
  12. {
  13. unsigned long prev_end;
  14. if (!queue_virt_boundary(q))
  15. return false;
  16. if (prv->iov_base == NULL && prv->iov_len == 0)
  17. /* prv is not set - don't check */
  18. return false;
  19. prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
  20. return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
  21. prev_end & queue_virt_boundary(q));
  22. }
  23. int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  24. struct bio *bio)
  25. {
  26. if (!rq->bio)
  27. blk_rq_bio_prep(q, rq, bio);
  28. else if (!ll_back_merge_fn(q, rq, bio))
  29. return -EINVAL;
  30. else {
  31. rq->biotail->bi_next = bio;
  32. rq->biotail = bio;
  33. rq->__data_len += bio->bi_iter.bi_size;
  34. }
  35. return 0;
  36. }
  37. static int __blk_rq_unmap_user(struct bio *bio)
  38. {
  39. int ret = 0;
  40. if (bio) {
  41. if (bio_flagged(bio, BIO_USER_MAPPED))
  42. bio_unmap_user(bio);
  43. else
  44. ret = bio_uncopy_user(bio);
  45. }
  46. return ret;
  47. }
  48. static int __blk_rq_map_user_iov(struct request *rq,
  49. struct rq_map_data *map_data, struct iov_iter *iter,
  50. gfp_t gfp_mask, bool copy)
  51. {
  52. struct request_queue *q = rq->q;
  53. struct bio *bio, *orig_bio;
  54. int ret;
  55. if (copy)
  56. bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  57. else
  58. bio = bio_map_user_iov(q, iter, gfp_mask);
  59. if (IS_ERR(bio))
  60. return PTR_ERR(bio);
  61. if (map_data && map_data->null_mapped)
  62. bio_set_flag(bio, BIO_NULL_MAPPED);
  63. iov_iter_advance(iter, bio->bi_iter.bi_size);
  64. if (map_data)
  65. map_data->offset += bio->bi_iter.bi_size;
  66. orig_bio = bio;
  67. blk_queue_bounce(q, &bio);
  68. /*
  69. * We link the bounce buffer in and could have to traverse it
  70. * later so we have to get a ref to prevent it from being freed
  71. */
  72. bio_get(bio);
  73. ret = blk_rq_append_bio(q, rq, bio);
  74. if (ret) {
  75. bio_endio(bio);
  76. __blk_rq_unmap_user(orig_bio);
  77. bio_put(bio);
  78. return ret;
  79. }
  80. return 0;
  81. }
  82. /**
  83. * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  84. * @q: request queue where request should be inserted
  85. * @rq: request to map data to
  86. * @map_data: pointer to the rq_map_data holding pages (if necessary)
  87. * @iter: iovec iterator
  88. * @gfp_mask: memory allocation flags
  89. *
  90. * Description:
  91. * Data will be mapped directly for zero copy I/O, if possible. Otherwise
  92. * a kernel bounce buffer is used.
  93. *
  94. * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
  95. * still in process context.
  96. *
  97. * Note: The mapped bio may need to be bounced through blk_queue_bounce()
  98. * before being submitted to the device, as pages mapped may be out of
  99. * reach. It's the callers responsibility to make sure this happens. The
  100. * original bio must be passed back in to blk_rq_unmap_user() for proper
  101. * unmapping.
  102. */
  103. int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  104. struct rq_map_data *map_data,
  105. const struct iov_iter *iter, gfp_t gfp_mask)
  106. {
  107. struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
  108. bool copy = (q->dma_pad_mask & iter->count) || map_data;
  109. struct bio *bio = NULL;
  110. struct iov_iter i;
  111. int ret;
  112. if (!iter || !iter->count)
  113. return -EINVAL;
  114. iov_for_each(iov, i, *iter) {
  115. unsigned long uaddr = (unsigned long) iov.iov_base;
  116. if (!iov.iov_len)
  117. return -EINVAL;
  118. /*
  119. * Keep going so we check length of all segments
  120. */
  121. if ((uaddr & queue_dma_alignment(q)) ||
  122. iovec_gap_to_prv(q, &prv, &iov))
  123. copy = true;
  124. prv.iov_base = iov.iov_base;
  125. prv.iov_len = iov.iov_len;
  126. }
  127. i = *iter;
  128. do {
  129. ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
  130. if (ret)
  131. goto unmap_rq;
  132. if (!bio)
  133. bio = rq->bio;
  134. } while (iov_iter_count(&i));
  135. if (!bio_flagged(bio, BIO_USER_MAPPED))
  136. rq->cmd_flags |= REQ_COPY_USER;
  137. return 0;
  138. unmap_rq:
  139. __blk_rq_unmap_user(bio);
  140. rq->bio = NULL;
  141. return -EINVAL;
  142. }
  143. EXPORT_SYMBOL(blk_rq_map_user_iov);
  144. int blk_rq_map_user(struct request_queue *q, struct request *rq,
  145. struct rq_map_data *map_data, void __user *ubuf,
  146. unsigned long len, gfp_t gfp_mask)
  147. {
  148. struct iovec iov;
  149. struct iov_iter i;
  150. int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
  151. if (unlikely(ret < 0))
  152. return ret;
  153. return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
  154. }
  155. EXPORT_SYMBOL(blk_rq_map_user);
  156. /**
  157. * blk_rq_unmap_user - unmap a request with user data
  158. * @bio: start of bio list
  159. *
  160. * Description:
  161. * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
  162. * supply the original rq->bio from the blk_rq_map_user() return, since
  163. * the I/O completion may have changed rq->bio.
  164. */
  165. int blk_rq_unmap_user(struct bio *bio)
  166. {
  167. struct bio *mapped_bio;
  168. int ret = 0, ret2;
  169. while (bio) {
  170. mapped_bio = bio;
  171. if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  172. mapped_bio = bio->bi_private;
  173. ret2 = __blk_rq_unmap_user(mapped_bio);
  174. if (ret2 && !ret)
  175. ret = ret2;
  176. mapped_bio = bio;
  177. bio = bio->bi_next;
  178. bio_put(mapped_bio);
  179. }
  180. return ret;
  181. }
  182. EXPORT_SYMBOL(blk_rq_unmap_user);
  183. /**
  184. * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
  185. * @q: request queue where request should be inserted
  186. * @rq: request to fill
  187. * @kbuf: the kernel buffer
  188. * @len: length of user data
  189. * @gfp_mask: memory allocation flags
  190. *
  191. * Description:
  192. * Data will be mapped directly if possible. Otherwise a bounce
  193. * buffer is used. Can be called multiple times to append multiple
  194. * buffers.
  195. */
  196. int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  197. unsigned int len, gfp_t gfp_mask)
  198. {
  199. int reading = rq_data_dir(rq) == READ;
  200. unsigned long addr = (unsigned long) kbuf;
  201. int do_copy = 0;
  202. struct bio *bio;
  203. int ret;
  204. if (len > (queue_max_hw_sectors(q) << 9))
  205. return -EINVAL;
  206. if (!len || !kbuf)
  207. return -EINVAL;
  208. do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
  209. if (do_copy)
  210. bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  211. else
  212. bio = bio_map_kern(q, kbuf, len, gfp_mask);
  213. if (IS_ERR(bio))
  214. return PTR_ERR(bio);
  215. if (!reading)
  216. bio->bi_rw |= REQ_WRITE;
  217. if (do_copy)
  218. rq->cmd_flags |= REQ_COPY_USER;
  219. ret = blk_rq_append_bio(q, rq, bio);
  220. if (unlikely(ret)) {
  221. /* request is too big */
  222. bio_put(bio);
  223. return ret;
  224. }
  225. blk_queue_bounce(q, &rq->bio);
  226. return 0;
  227. }
  228. EXPORT_SYMBOL(blk_rq_map_kern);