uio.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Berkeley style UIO structures - Alan Cox 1994.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #ifndef __LINUX_UIO_H
  10. #define __LINUX_UIO_H
  11. #include <linux/kernel.h>
  12. #include <linux/thread_info.h>
  13. #include <uapi/linux/uio.h>
  14. struct page;
  15. struct pipe_inode_info;
  16. struct kvec {
  17. void *iov_base; /* and that should *never* hold a userland pointer */
  18. size_t iov_len;
  19. };
  20. enum {
  21. ITER_IOVEC = 0,
  22. ITER_KVEC = 2,
  23. ITER_BVEC = 4,
  24. ITER_PIPE = 8,
  25. };
  26. struct iov_iter {
  27. int type;
  28. size_t iov_offset;
  29. size_t count;
  30. union {
  31. const struct iovec *iov;
  32. const struct kvec *kvec;
  33. const struct bio_vec *bvec;
  34. struct pipe_inode_info *pipe;
  35. };
  36. union {
  37. unsigned long nr_segs;
  38. struct {
  39. int idx;
  40. int start_idx;
  41. };
  42. };
  43. };
  44. /*
  45. * Total number of bytes covered by an iovec.
  46. *
  47. * NOTE that it is not safe to use this function until all the iovec's
  48. * segment lengths have been validated. Because the individual lengths can
  49. * overflow a size_t when added together.
  50. */
  51. static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
  52. {
  53. unsigned long seg;
  54. size_t ret = 0;
  55. for (seg = 0; seg < nr_segs; seg++)
  56. ret += iov[seg].iov_len;
  57. return ret;
  58. }
  59. static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
  60. {
  61. return (struct iovec) {
  62. .iov_base = iter->iov->iov_base + iter->iov_offset,
  63. .iov_len = min(iter->count,
  64. iter->iov->iov_len - iter->iov_offset),
  65. };
  66. }
  67. #define iov_for_each(iov, iter, start) \
  68. if (!((start).type & (ITER_BVEC | ITER_PIPE))) \
  69. for (iter = (start); \
  70. (iter).count && \
  71. ((iov = iov_iter_iovec(&(iter))), 1); \
  72. iov_iter_advance(&(iter), (iov).iov_len))
  73. size_t iov_iter_copy_from_user_atomic(struct page *page,
  74. struct iov_iter *i, unsigned long offset, size_t bytes);
  75. void iov_iter_advance(struct iov_iter *i, size_t bytes);
  76. void iov_iter_revert(struct iov_iter *i, size_t bytes);
  77. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
  78. size_t iov_iter_single_seg_count(const struct iov_iter *i);
  79. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  80. struct iov_iter *i);
  81. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  82. struct iov_iter *i);
  83. size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
  84. size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
  85. bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
  86. size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
  87. bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
  88. static __always_inline __must_check
  89. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  90. {
  91. if (unlikely(!check_copy_size(addr, bytes, true)))
  92. return 0;
  93. else
  94. return _copy_to_iter(addr, bytes, i);
  95. }
  96. static __always_inline __must_check
  97. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  98. {
  99. if (unlikely(!check_copy_size(addr, bytes, false)))
  100. return 0;
  101. else
  102. return _copy_from_iter(addr, bytes, i);
  103. }
  104. static __always_inline __must_check
  105. bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
  106. {
  107. if (unlikely(!check_copy_size(addr, bytes, false)))
  108. return false;
  109. else
  110. return _copy_from_iter_full(addr, bytes, i);
  111. }
  112. static __always_inline __must_check
  113. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  114. {
  115. if (unlikely(!check_copy_size(addr, bytes, false)))
  116. return 0;
  117. else
  118. return _copy_from_iter_nocache(addr, bytes, i);
  119. }
  120. static __always_inline __must_check
  121. bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  122. {
  123. if (unlikely(!check_copy_size(addr, bytes, false)))
  124. return false;
  125. else
  126. return _copy_from_iter_full_nocache(addr, bytes, i);
  127. }
  128. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  129. /*
  130. * Note, users like pmem that depend on the stricter semantics of
  131. * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
  132. * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
  133. * destination is flushed from the cache on return.
  134. */
  135. size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
  136. #else
  137. #define _copy_from_iter_flushcache _copy_from_iter_nocache
  138. #endif
  139. static __always_inline __must_check
  140. size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
  141. {
  142. if (unlikely(!check_copy_size(addr, bytes, false)))
  143. return 0;
  144. else
  145. return _copy_from_iter_flushcache(addr, bytes, i);
  146. }
  147. size_t iov_iter_zero(size_t bytes, struct iov_iter *);
  148. unsigned long iov_iter_alignment(const struct iov_iter *i);
  149. unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
  150. void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
  151. unsigned long nr_segs, size_t count);
  152. void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
  153. unsigned long nr_segs, size_t count);
  154. void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
  155. unsigned long nr_segs, size_t count);
  156. void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
  157. size_t count);
  158. ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
  159. size_t maxsize, unsigned maxpages, size_t *start);
  160. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
  161. size_t maxsize, size_t *start);
  162. int iov_iter_npages(const struct iov_iter *i, int maxpages);
  163. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
  164. static inline size_t iov_iter_count(const struct iov_iter *i)
  165. {
  166. return i->count;
  167. }
  168. static inline bool iter_is_iovec(const struct iov_iter *i)
  169. {
  170. return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
  171. }
  172. /*
  173. * Get one of READ or WRITE out of iter->type without any other flags OR'd in
  174. * with it.
  175. *
  176. * The ?: is just for type safety.
  177. */
  178. #define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
  179. /*
  180. * Cap the iov_iter by given limit; note that the second argument is
  181. * *not* the new size - it's upper limit for such. Passing it a value
  182. * greater than the amount of data in iov_iter is fine - it'll just do
  183. * nothing in that case.
  184. */
  185. static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  186. {
  187. /*
  188. * count doesn't have to fit in size_t - comparison extends both
  189. * operands to u64 here and any value that would be truncated by
  190. * conversion in assignement is by definition greater than all
  191. * values of size_t, including old i->count.
  192. */
  193. if (i->count > count)
  194. i->count = count;
  195. }
  196. /*
  197. * reexpand a previously truncated iterator; count must be no more than how much
  198. * we had shrunk it.
  199. */
  200. static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
  201. {
  202. i->count = count;
  203. }
  204. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  205. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  206. bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  207. int import_iovec(int type, const struct iovec __user * uvector,
  208. unsigned nr_segs, unsigned fast_segs,
  209. struct iovec **iov, struct iov_iter *i);
  210. #ifdef CONFIG_COMPAT
  211. struct compat_iovec;
  212. int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  213. unsigned nr_segs, unsigned fast_segs,
  214. struct iovec **iov, struct iov_iter *i);
  215. #endif
  216. int import_single_range(int type, void __user *buf, size_t len,
  217. struct iovec *iov, struct iov_iter *i);
  218. int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
  219. int (*f)(struct kvec *vec, void *context),
  220. void *context);
  221. #endif