pagevec.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/file.h>
  6. #include <linux/namei.h>
  7. #include <linux/writeback.h>
  8. #include <linux/ceph/libceph.h>
  9. /*
  10. * build a vector of user pages
  11. */
  12. struct page **ceph_get_direct_page_vector(const void __user *data,
  13. int num_pages, bool write_page)
  14. {
  15. struct page **pages;
  16. int got = 0;
  17. int rc = 0;
  18. pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
  19. if (!pages)
  20. return ERR_PTR(-ENOMEM);
  21. while (got < num_pages) {
  22. rc = get_user_pages_unlocked(current, current->mm,
  23. (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
  24. num_pages - got, write_page, 0, pages + got);
  25. if (rc < 0)
  26. break;
  27. BUG_ON(rc == 0);
  28. got += rc;
  29. }
  30. if (rc < 0)
  31. goto fail;
  32. return pages;
  33. fail:
  34. ceph_put_page_vector(pages, got, false);
  35. return ERR_PTR(rc);
  36. }
  37. EXPORT_SYMBOL(ceph_get_direct_page_vector);
  38. void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
  39. {
  40. int i;
  41. for (i = 0; i < num_pages; i++) {
  42. if (dirty)
  43. set_page_dirty_lock(pages[i]);
  44. put_page(pages[i]);
  45. }
  46. if (is_vmalloc_addr(pages))
  47. vfree(pages);
  48. else
  49. kfree(pages);
  50. }
  51. EXPORT_SYMBOL(ceph_put_page_vector);
  52. void ceph_release_page_vector(struct page **pages, int num_pages)
  53. {
  54. int i;
  55. for (i = 0; i < num_pages; i++)
  56. __free_pages(pages[i], 0);
  57. kfree(pages);
  58. }
  59. EXPORT_SYMBOL(ceph_release_page_vector);
  60. /*
  61. * allocate a vector new pages
  62. */
  63. struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
  64. {
  65. struct page **pages;
  66. int i;
  67. pages = kmalloc(sizeof(*pages) * num_pages, flags);
  68. if (!pages)
  69. return ERR_PTR(-ENOMEM);
  70. for (i = 0; i < num_pages; i++) {
  71. pages[i] = __page_cache_alloc(flags);
  72. if (pages[i] == NULL) {
  73. ceph_release_page_vector(pages, i);
  74. return ERR_PTR(-ENOMEM);
  75. }
  76. }
  77. return pages;
  78. }
  79. EXPORT_SYMBOL(ceph_alloc_page_vector);
  80. /*
  81. * copy user data into a page vector
  82. */
  83. int ceph_copy_user_to_page_vector(struct page **pages,
  84. const void __user *data,
  85. loff_t off, size_t len)
  86. {
  87. int i = 0;
  88. int po = off & ~PAGE_CACHE_MASK;
  89. int left = len;
  90. int l, bad;
  91. while (left > 0) {
  92. l = min_t(int, PAGE_CACHE_SIZE-po, left);
  93. bad = copy_from_user(page_address(pages[i]) + po, data, l);
  94. if (bad == l)
  95. return -EFAULT;
  96. data += l - bad;
  97. left -= l - bad;
  98. po += l - bad;
  99. if (po == PAGE_CACHE_SIZE) {
  100. po = 0;
  101. i++;
  102. }
  103. }
  104. return len;
  105. }
  106. EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
  107. void ceph_copy_to_page_vector(struct page **pages,
  108. const void *data,
  109. loff_t off, size_t len)
  110. {
  111. int i = 0;
  112. size_t po = off & ~PAGE_CACHE_MASK;
  113. size_t left = len;
  114. while (left > 0) {
  115. size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
  116. memcpy(page_address(pages[i]) + po, data, l);
  117. data += l;
  118. left -= l;
  119. po += l;
  120. if (po == PAGE_CACHE_SIZE) {
  121. po = 0;
  122. i++;
  123. }
  124. }
  125. }
  126. EXPORT_SYMBOL(ceph_copy_to_page_vector);
  127. void ceph_copy_from_page_vector(struct page **pages,
  128. void *data,
  129. loff_t off, size_t len)
  130. {
  131. int i = 0;
  132. size_t po = off & ~PAGE_CACHE_MASK;
  133. size_t left = len;
  134. while (left > 0) {
  135. size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
  136. memcpy(data, page_address(pages[i]) + po, l);
  137. data += l;
  138. left -= l;
  139. po += l;
  140. if (po == PAGE_CACHE_SIZE) {
  141. po = 0;
  142. i++;
  143. }
  144. }
  145. }
  146. EXPORT_SYMBOL(ceph_copy_from_page_vector);
  147. /*
  148. * Zero an extent within a page vector. Offset is relative to the
  149. * start of the first page.
  150. */
  151. void ceph_zero_page_vector_range(int off, int len, struct page **pages)
  152. {
  153. int i = off >> PAGE_CACHE_SHIFT;
  154. off &= ~PAGE_CACHE_MASK;
  155. dout("zero_page_vector_page %u~%u\n", off, len);
  156. /* leading partial page? */
  157. if (off) {
  158. int end = min((int)PAGE_CACHE_SIZE, off + len);
  159. dout("zeroing %d %p head from %d\n", i, pages[i],
  160. (int)off);
  161. zero_user_segment(pages[i], off, end);
  162. len -= (end - off);
  163. i++;
  164. }
  165. while (len >= PAGE_CACHE_SIZE) {
  166. dout("zeroing %d %p len=%d\n", i, pages[i], len);
  167. zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
  168. len -= PAGE_CACHE_SIZE;
  169. i++;
  170. }
  171. /* trailing partial page? */
  172. if (len) {
  173. dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
  174. zero_user_segment(pages[i], 0, len);
  175. }
  176. }
  177. EXPORT_SYMBOL(ceph_zero_page_vector_range);