uio.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. /*
  2. * Berkeley style UIO structures - Alan Cox 1994.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #ifndef __LINUX_UIO_H
  10. #define __LINUX_UIO_H
  11. #include <linux/kernel.h>
  12. #include <linux/thread_info.h>
  13. #include <uapi/linux/uio.h>
  14. struct page;
  15. struct pipe_inode_info;
  16. struct kvec {
  17. void *iov_base; /* and that should *never* hold a userland pointer */
  18. size_t iov_len;
  19. };
  20. enum iter_type {
  21. ITER_IOVEC = 0,
  22. ITER_KVEC = 2,
  23. ITER_BVEC = 4,
  24. ITER_PIPE = 8,
  25. };
  26. struct iov_iter {
  27. unsigned int type;
  28. size_t iov_offset;
  29. size_t count;
  30. union {
  31. const struct iovec *iov;
  32. const struct kvec *kvec;
  33. const struct bio_vec *bvec;
  34. struct pipe_inode_info *pipe;
  35. };
  36. union {
  37. unsigned long nr_segs;
  38. struct {
  39. int idx;
  40. int start_idx;
  41. };
  42. };
  43. };
  44. static inline enum iter_type iov_iter_type(const struct iov_iter *i)
  45. {
  46. return i->type & ~(READ | WRITE);
  47. }
  48. static inline bool iter_is_iovec(const struct iov_iter *i)
  49. {
  50. return iov_iter_type(i) == ITER_IOVEC;
  51. }
  52. static inline bool iov_iter_is_kvec(const struct iov_iter *i)
  53. {
  54. return iov_iter_type(i) == ITER_KVEC;
  55. }
  56. static inline bool iov_iter_is_bvec(const struct iov_iter *i)
  57. {
  58. return iov_iter_type(i) == ITER_BVEC;
  59. }
  60. static inline bool iov_iter_is_pipe(const struct iov_iter *i)
  61. {
  62. return iov_iter_type(i) == ITER_PIPE;
  63. }
  64. static inline unsigned char iov_iter_rw(const struct iov_iter *i)
  65. {
  66. return i->type & (READ | WRITE);
  67. }
  68. /*
  69. * Total number of bytes covered by an iovec.
  70. *
  71. * NOTE that it is not safe to use this function until all the iovec's
  72. * segment lengths have been validated. Because the individual lengths can
  73. * overflow a size_t when added together.
  74. */
  75. static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
  76. {
  77. unsigned long seg;
  78. size_t ret = 0;
  79. for (seg = 0; seg < nr_segs; seg++)
  80. ret += iov[seg].iov_len;
  81. return ret;
  82. }
  83. static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
  84. {
  85. return (struct iovec) {
  86. .iov_base = iter->iov->iov_base + iter->iov_offset,
  87. .iov_len = min(iter->count,
  88. iter->iov->iov_len - iter->iov_offset),
  89. };
  90. }
  91. #define iov_for_each(iov, iter, start) \
  92. if (iov_iter_type(start) == ITER_IOVEC || \
  93. iov_iter_type(start) == ITER_KVEC) \
  94. for (iter = (start); \
  95. (iter).count && \
  96. ((iov = iov_iter_iovec(&(iter))), 1); \
  97. iov_iter_advance(&(iter), (iov).iov_len))
  98. size_t iov_iter_copy_from_user_atomic(struct page *page,
  99. struct iov_iter *i, unsigned long offset, size_t bytes);
  100. void iov_iter_advance(struct iov_iter *i, size_t bytes);
  101. void iov_iter_revert(struct iov_iter *i, size_t bytes);
  102. int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
  103. size_t iov_iter_single_seg_count(const struct iov_iter *i);
  104. size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
  105. struct iov_iter *i);
  106. size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
  107. struct iov_iter *i);
  108. size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
  109. size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
  110. bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i);
  111. size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
  112. bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i);
  113. static __always_inline __must_check
  114. size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
  115. {
  116. if (unlikely(!check_copy_size(addr, bytes, true)))
  117. return 0;
  118. else
  119. return _copy_to_iter(addr, bytes, i);
  120. }
  121. static __always_inline __must_check
  122. size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
  123. {
  124. if (unlikely(!check_copy_size(addr, bytes, false)))
  125. return 0;
  126. else
  127. return _copy_from_iter(addr, bytes, i);
  128. }
  129. static __always_inline __must_check
  130. bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
  131. {
  132. if (unlikely(!check_copy_size(addr, bytes, false)))
  133. return false;
  134. else
  135. return _copy_from_iter_full(addr, bytes, i);
  136. }
  137. static __always_inline __must_check
  138. size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
  139. {
  140. if (unlikely(!check_copy_size(addr, bytes, false)))
  141. return 0;
  142. else
  143. return _copy_from_iter_nocache(addr, bytes, i);
  144. }
  145. static __always_inline __must_check
  146. bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
  147. {
  148. if (unlikely(!check_copy_size(addr, bytes, false)))
  149. return false;
  150. else
  151. return _copy_from_iter_full_nocache(addr, bytes, i);
  152. }
  153. #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
  154. /*
  155. * Note, users like pmem that depend on the stricter semantics of
  156. * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
  157. * IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
  158. * destination is flushed from the cache on return.
  159. */
  160. size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
  161. #else
  162. #define _copy_from_iter_flushcache _copy_from_iter_nocache
  163. #endif
  164. #ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
  165. size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i);
  166. #else
  167. #define _copy_to_iter_mcsafe _copy_to_iter
  168. #endif
  169. static __always_inline __must_check
  170. size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
  171. {
  172. if (unlikely(!check_copy_size(addr, bytes, false)))
  173. return 0;
  174. else
  175. return _copy_from_iter_flushcache(addr, bytes, i);
  176. }
  177. static __always_inline __must_check
  178. size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
  179. {
  180. if (unlikely(!check_copy_size(addr, bytes, true)))
  181. return 0;
  182. else
  183. return _copy_to_iter_mcsafe(addr, bytes, i);
  184. }
  185. size_t iov_iter_zero(size_t bytes, struct iov_iter *);
  186. unsigned long iov_iter_alignment(const struct iov_iter *i);
  187. unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
  188. void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
  189. unsigned long nr_segs, size_t count);
  190. void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
  191. unsigned long nr_segs, size_t count);
  192. void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
  193. unsigned long nr_segs, size_t count);
  194. void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
  195. size_t count);
  196. ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
  197. size_t maxsize, unsigned maxpages, size_t *start);
  198. ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
  199. size_t maxsize, size_t *start);
  200. int iov_iter_npages(const struct iov_iter *i, int maxpages);
  201. const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
  202. static inline size_t iov_iter_count(const struct iov_iter *i)
  203. {
  204. return i->count;
  205. }
  206. /*
  207. * Cap the iov_iter by given limit; note that the second argument is
  208. * *not* the new size - it's upper limit for such. Passing it a value
  209. * greater than the amount of data in iov_iter is fine - it'll just do
  210. * nothing in that case.
  211. */
  212. static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
  213. {
  214. /*
  215. * count doesn't have to fit in size_t - comparison extends both
  216. * operands to u64 here and any value that would be truncated by
  217. * conversion in assignement is by definition greater than all
  218. * values of size_t, including old i->count.
  219. */
  220. if (i->count > count)
  221. i->count = count;
  222. }
  223. /*
  224. * reexpand a previously truncated iterator; count must be no more than how much
  225. * we had shrunk it.
  226. */
  227. static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
  228. {
  229. i->count = count;
  230. }
  231. size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  232. size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  233. bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
  234. int import_iovec(int type, const struct iovec __user * uvector,
  235. unsigned nr_segs, unsigned fast_segs,
  236. struct iovec **iov, struct iov_iter *i);
  237. #ifdef CONFIG_COMPAT
  238. struct compat_iovec;
  239. int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
  240. unsigned nr_segs, unsigned fast_segs,
  241. struct iovec **iov, struct iov_iter *i);
  242. #endif
  243. int import_single_range(int type, void __user *buf, size_t len,
  244. struct iovec *iov, struct iov_iter *i);
  245. int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
  246. int (*f)(struct kvec *vec, void *context),
  247. void *context);
  248. #endif