pagevec.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/module.h>
  3. #include <linux/sched.h>
  4. #include <linux/slab.h>
  5. #include <linux/file.h>
  6. #include <linux/namei.h>
  7. #include <linux/writeback.h>
  8. #include <linux/ceph/libceph.h>
  9. /*
  10. * build a vector of user pages
  11. */
  12. struct page **ceph_get_direct_page_vector(const void __user *data,
  13. int num_pages, bool write_page)
  14. {
  15. struct page **pages;
  16. int got = 0;
  17. int rc = 0;
  18. pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
  19. if (!pages)
  20. return ERR_PTR(-ENOMEM);
  21. down_read(&current->mm->mmap_sem);
  22. while (got < num_pages) {
  23. rc = get_user_pages(current, current->mm,
  24. (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
  25. num_pages - got, write_page, 0, pages + got, NULL);
  26. if (rc < 0)
  27. break;
  28. BUG_ON(rc == 0);
  29. got += rc;
  30. }
  31. up_read(&current->mm->mmap_sem);
  32. if (rc < 0)
  33. goto fail;
  34. return pages;
  35. fail:
  36. ceph_put_page_vector(pages, got, false);
  37. return ERR_PTR(rc);
  38. }
  39. EXPORT_SYMBOL(ceph_get_direct_page_vector);
  40. void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty)
  41. {
  42. int i;
  43. for (i = 0; i < num_pages; i++) {
  44. if (dirty)
  45. set_page_dirty_lock(pages[i]);
  46. put_page(pages[i]);
  47. }
  48. if (is_vmalloc_addr(pages))
  49. vfree(pages);
  50. else
  51. kfree(pages);
  52. }
  53. EXPORT_SYMBOL(ceph_put_page_vector);
  54. void ceph_release_page_vector(struct page **pages, int num_pages)
  55. {
  56. int i;
  57. for (i = 0; i < num_pages; i++)
  58. __free_pages(pages[i], 0);
  59. kfree(pages);
  60. }
  61. EXPORT_SYMBOL(ceph_release_page_vector);
  62. /*
  63. * allocate a vector new pages
  64. */
  65. struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
  66. {
  67. struct page **pages;
  68. int i;
  69. pages = kmalloc(sizeof(*pages) * num_pages, flags);
  70. if (!pages)
  71. return ERR_PTR(-ENOMEM);
  72. for (i = 0; i < num_pages; i++) {
  73. pages[i] = __page_cache_alloc(flags);
  74. if (pages[i] == NULL) {
  75. ceph_release_page_vector(pages, i);
  76. return ERR_PTR(-ENOMEM);
  77. }
  78. }
  79. return pages;
  80. }
  81. EXPORT_SYMBOL(ceph_alloc_page_vector);
  82. /*
  83. * copy user data into a page vector
  84. */
  85. int ceph_copy_user_to_page_vector(struct page **pages,
  86. const void __user *data,
  87. loff_t off, size_t len)
  88. {
  89. int i = 0;
  90. int po = off & ~PAGE_CACHE_MASK;
  91. int left = len;
  92. int l, bad;
  93. while (left > 0) {
  94. l = min_t(int, PAGE_CACHE_SIZE-po, left);
  95. bad = copy_from_user(page_address(pages[i]) + po, data, l);
  96. if (bad == l)
  97. return -EFAULT;
  98. data += l - bad;
  99. left -= l - bad;
  100. po += l - bad;
  101. if (po == PAGE_CACHE_SIZE) {
  102. po = 0;
  103. i++;
  104. }
  105. }
  106. return len;
  107. }
  108. EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
  109. void ceph_copy_to_page_vector(struct page **pages,
  110. const void *data,
  111. loff_t off, size_t len)
  112. {
  113. int i = 0;
  114. size_t po = off & ~PAGE_CACHE_MASK;
  115. size_t left = len;
  116. while (left > 0) {
  117. size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
  118. memcpy(page_address(pages[i]) + po, data, l);
  119. data += l;
  120. left -= l;
  121. po += l;
  122. if (po == PAGE_CACHE_SIZE) {
  123. po = 0;
  124. i++;
  125. }
  126. }
  127. }
  128. EXPORT_SYMBOL(ceph_copy_to_page_vector);
  129. void ceph_copy_from_page_vector(struct page **pages,
  130. void *data,
  131. loff_t off, size_t len)
  132. {
  133. int i = 0;
  134. size_t po = off & ~PAGE_CACHE_MASK;
  135. size_t left = len;
  136. while (left > 0) {
  137. size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left);
  138. memcpy(data, page_address(pages[i]) + po, l);
  139. data += l;
  140. left -= l;
  141. po += l;
  142. if (po == PAGE_CACHE_SIZE) {
  143. po = 0;
  144. i++;
  145. }
  146. }
  147. }
  148. EXPORT_SYMBOL(ceph_copy_from_page_vector);
  149. /*
  150. * Zero an extent within a page vector. Offset is relative to the
  151. * start of the first page.
  152. */
  153. void ceph_zero_page_vector_range(int off, int len, struct page **pages)
  154. {
  155. int i = off >> PAGE_CACHE_SHIFT;
  156. off &= ~PAGE_CACHE_MASK;
  157. dout("zero_page_vector_page %u~%u\n", off, len);
  158. /* leading partial page? */
  159. if (off) {
  160. int end = min((int)PAGE_CACHE_SIZE, off + len);
  161. dout("zeroing %d %p head from %d\n", i, pages[i],
  162. (int)off);
  163. zero_user_segment(pages[i], off, end);
  164. len -= (end - off);
  165. i++;
  166. }
  167. while (len >= PAGE_CACHE_SIZE) {
  168. dout("zeroing %d %p len=%d\n", i, pages[i], len);
  169. zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
  170. len -= PAGE_CACHE_SIZE;
  171. i++;
  172. }
  173. /* trailing partial page? */
  174. if (len) {
  175. dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
  176. zero_user_segment(pages[i], 0, len);
  177. }
  178. }
  179. EXPORT_SYMBOL(ceph_zero_page_vector_range);