iov_iter.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #include <linux/export.h>
  2. #include <linux/uio.h>
  3. #include <linux/pagemap.h>
  4. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  5. struct iov_iter *i)
  6. {
  7. size_t skip, copy, left, wanted;
  8. const struct iovec *iov;
  9. char __user *buf;
  10. void *kaddr, *from;
  11. if (unlikely(bytes > i->count))
  12. bytes = i->count;
  13. if (unlikely(!bytes))
  14. return 0;
  15. wanted = bytes;
  16. iov = i->iov;
  17. skip = i->iov_offset;
  18. buf = iov->iov_base + skip;
  19. copy = min(bytes, iov->iov_len - skip);
  20. if (!fault_in_pages_writeable(buf, copy)) {
  21. kaddr = kmap_atomic(page);
  22. from = kaddr + offset;
  23. /* first chunk, usually the only one */
  24. left = __copy_to_user_inatomic(buf, from, copy);
  25. copy -= left;
  26. skip += copy;
  27. from += copy;
  28. bytes -= copy;
  29. while (unlikely(!left && bytes)) {
  30. iov++;
  31. buf = iov->iov_base;
  32. copy = min(bytes, iov->iov_len);
  33. left = __copy_to_user_inatomic(buf, from, copy);
  34. copy -= left;
  35. skip = copy;
  36. from += copy;
  37. bytes -= copy;
  38. }
  39. if (likely(!bytes)) {
  40. kunmap_atomic(kaddr);
  41. goto done;
  42. }
  43. offset = from - kaddr;
  44. buf += copy;
  45. kunmap_atomic(kaddr);
  46. copy = min(bytes, iov->iov_len - skip);
  47. }
  48. /* Too bad - revert to non-atomic kmap */
  49. kaddr = kmap(page);
  50. from = kaddr + offset;
  51. left = __copy_to_user(buf, from, copy);
  52. copy -= left;
  53. skip += copy;
  54. from += copy;
  55. bytes -= copy;
  56. while (unlikely(!left && bytes)) {
  57. iov++;
  58. buf = iov->iov_base;
  59. copy = min(bytes, iov->iov_len);
  60. left = __copy_to_user(buf, from, copy);
  61. copy -= left;
  62. skip = copy;
  63. from += copy;
  64. bytes -= copy;
  65. }
  66. kunmap(page);
  67. done:
  68. i->count -= wanted - bytes;
  69. i->nr_segs -= iov - i->iov;
  70. i->iov = iov;
  71. i->iov_offset = skip;
  72. return wanted - bytes;
  73. }
  74. EXPORT_SYMBOL(copy_page_to_iter);
  75. static size_t __iovec_copy_from_user_inatomic(char *vaddr,
  76. const struct iovec *iov, size_t base, size_t bytes)
  77. {
  78. size_t copied = 0, left = 0;
  79. while (bytes) {
  80. char __user *buf = iov->iov_base + base;
  81. int copy = min(bytes, iov->iov_len - base);
  82. base = 0;
  83. left = __copy_from_user_inatomic(vaddr, buf, copy);
  84. copied += copy;
  85. bytes -= copy;
  86. vaddr += copy;
  87. iov++;
  88. if (unlikely(left))
  89. break;
  90. }
  91. return copied - left;
  92. }
  93. /*
  94. * Copy as much as we can into the page and return the number of bytes which
  95. * were successfully copied. If a fault is encountered then return the number of
  96. * bytes which were copied.
  97. */
  98. size_t iov_iter_copy_from_user_atomic(struct page *page,
  99. struct iov_iter *i, unsigned long offset, size_t bytes)
  100. {
  101. char *kaddr;
  102. size_t copied;
  103. kaddr = kmap_atomic(page);
  104. if (likely(i->nr_segs == 1)) {
  105. int left;
  106. char __user *buf = i->iov->iov_base + i->iov_offset;
  107. left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
  108. copied = bytes - left;
  109. } else {
  110. copied = __iovec_copy_from_user_inatomic(kaddr + offset,
  111. i->iov, i->iov_offset, bytes);
  112. }
  113. kunmap_atomic(kaddr);
  114. return copied;
  115. }
  116. EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
  117. /*
  118. * This has the same sideeffects and return value as
  119. * iov_iter_copy_from_user_atomic().
  120. * The difference is that it attempts to resolve faults.
  121. * Page must not be locked.
  122. */
  123. size_t iov_iter_copy_from_user(struct page *page,
  124. struct iov_iter *i, unsigned long offset, size_t bytes)
  125. {
  126. char *kaddr;
  127. size_t copied;
  128. kaddr = kmap(page);
  129. if (likely(i->nr_segs == 1)) {
  130. int left;
  131. char __user *buf = i->iov->iov_base + i->iov_offset;
  132. left = __copy_from_user(kaddr + offset, buf, bytes);
  133. copied = bytes - left;
  134. } else {
  135. copied = __iovec_copy_from_user_inatomic(kaddr + offset,
  136. i->iov, i->iov_offset, bytes);
  137. }
  138. kunmap(page);
  139. return copied;
  140. }
  141. EXPORT_SYMBOL(iov_iter_copy_from_user);
  142. void iov_iter_advance(struct iov_iter *i, size_t bytes)
  143. {
  144. BUG_ON(i->count < bytes);
  145. if (likely(i->nr_segs == 1)) {
  146. i->iov_offset += bytes;
  147. i->count -= bytes;
  148. } else {
  149. const struct iovec *iov = i->iov;
  150. size_t base = i->iov_offset;
  151. unsigned long nr_segs = i->nr_segs;
  152. /*
  153. * The !iov->iov_len check ensures we skip over unlikely
  154. * zero-length segments (without overruning the iovec).
  155. */
  156. while (bytes || unlikely(i->count && !iov->iov_len)) {
  157. int copy;
  158. copy = min(bytes, iov->iov_len - base);
  159. BUG_ON(!i->count || i->count < copy);
  160. i->count -= copy;
  161. bytes -= copy;
  162. base += copy;
  163. if (iov->iov_len == base) {
  164. iov++;
  165. nr_segs--;
  166. base = 0;
  167. }
  168. }
  169. i->iov = iov;
  170. i->iov_offset = base;
  171. i->nr_segs = nr_segs;
  172. }
  173. }
  174. EXPORT_SYMBOL(iov_iter_advance);
  175. /*
  176. * Fault in the first iovec of the given iov_iter, to a maximum length
  177. * of bytes. Returns 0 on success, or non-zero if the memory could not be
  178. * accessed (ie. because it is an invalid address).
  179. *
  180. * writev-intensive code may want this to prefault several iovecs -- that
  181. * would be possible (callers must not rely on the fact that _only_ the
  182. * first iovec will be faulted with the current implementation).
  183. */
  184. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
  185. {
  186. char __user *buf = i->iov->iov_base + i->iov_offset;
  187. bytes = min(bytes, i->iov->iov_len - i->iov_offset);
  188. return fault_in_pages_readable(buf, bytes);
  189. }
  190. EXPORT_SYMBOL(iov_iter_fault_in_readable);
  191. /*
  192. * Return the count of just the current iov_iter segment.
  193. */
  194. size_t iov_iter_single_seg_count(const struct iov_iter *i)
  195. {
  196. const struct iovec *iov = i->iov;
  197. if (i->nr_segs == 1)
  198. return i->count;
  199. else
  200. return min(i->count, iov->iov_len - i->iov_offset);
  201. }
  202. EXPORT_SYMBOL(iov_iter_single_seg_count);