|
@@ -4,7 +4,7 @@
|
|
|
#include <linux/slab.h>
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
|
|
-size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
|
|
+static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
|
|
|
struct iov_iter *i)
|
|
|
{
|
|
|
size_t skip, copy, left, wanted;
|
|
@@ -84,9 +84,8 @@ done:
|
|
|
i->iov_offset = skip;
|
|
|
return wanted - bytes;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(copy_page_to_iter);
|
|
|
|
|
|
-size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
|
|
+static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
|
|
|
struct iov_iter *i)
|
|
|
{
|
|
|
size_t skip, copy, left, wanted;
|
|
@@ -166,7 +165,6 @@ done:
|
|
|
i->iov_offset = skip;
|
|
|
return wanted - bytes;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(copy_page_from_iter);
|
|
|
|
|
|
static size_t __iovec_copy_from_user_inatomic(char *vaddr,
|
|
|
const struct iovec *iov, size_t base, size_t bytes)
|
|
@@ -195,7 +193,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
|
|
|
* were successfully copied. If a fault is encountered then return the number of
|
|
|
* bytes which were copied.
|
|
|
*/
|
|
|
-size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
|
+static size_t copy_from_user_atomic_iovec(struct page *page,
|
|
|
struct iov_iter *i, unsigned long offset, size_t bytes)
|
|
|
{
|
|
|
char *kaddr;
|
|
@@ -215,9 +213,8 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
|
|
|
|
return copied;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
|
|
|
|
|
|
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
|
|
|
+static void advance_iovec(struct iov_iter *i, size_t bytes)
|
|
|
{
|
|
|
BUG_ON(i->count < bytes);
|
|
|
|
|
@@ -252,7 +249,6 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
|
|
|
i->nr_segs = nr_segs;
|
|
|
}
|
|
|
}
|
|
|
-EXPORT_SYMBOL(iov_iter_advance);
|
|
|
|
|
|
/*
|
|
|
* Fault in the first iovec of the given iov_iter, to a maximum length
|
|
@@ -265,26 +261,16 @@ EXPORT_SYMBOL(iov_iter_advance);
|
|
|
*/
|
|
|
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
|
|
|
{
|
|
|
- char __user *buf = i->iov->iov_base + i->iov_offset;
|
|
|
- bytes = min(bytes, i->iov->iov_len - i->iov_offset);
|
|
|
- return fault_in_pages_readable(buf, bytes);
|
|
|
+ if (!(i->type & ITER_BVEC)) {
|
|
|
+ char __user *buf = i->iov->iov_base + i->iov_offset;
|
|
|
+ bytes = min(bytes, i->iov->iov_len - i->iov_offset);
|
|
|
+ return fault_in_pages_readable(buf, bytes);
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
}
|
|
|
EXPORT_SYMBOL(iov_iter_fault_in_readable);
|
|
|
|
|
|
-/*
|
|
|
- * Return the count of just the current iov_iter segment.
|
|
|
- */
|
|
|
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
|
|
|
-{
|
|
|
- const struct iovec *iov = i->iov;
|
|
|
- if (i->nr_segs == 1)
|
|
|
- return i->count;
|
|
|
- else
|
|
|
- return min(i->count, iov->iov_len - i->iov_offset);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(iov_iter_single_seg_count);
|
|
|
-
|
|
|
-unsigned long iov_iter_alignment(const struct iov_iter *i)
|
|
|
+static unsigned long alignment_iovec(const struct iov_iter *i)
|
|
|
{
|
|
|
const struct iovec *iov = i->iov;
|
|
|
unsigned long res;
|
|
@@ -307,7 +293,6 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
|
|
|
res |= (unsigned long)iov->iov_base | size;
|
|
|
return res;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(iov_iter_alignment);
|
|
|
|
|
|
void iov_iter_init(struct iov_iter *i, int direction,
|
|
|
const struct iovec *iov, unsigned long nr_segs,
|
|
@@ -315,7 +300,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
|
|
|
{
|
|
|
/* It will get better. Eventually... */
|
|
|
if (segment_eq(get_fs(), KERNEL_DS))
|
|
|
- direction |= REQ_KERNEL;
|
|
|
+ direction |= ITER_KVEC;
|
|
|
i->type = direction;
|
|
|
i->iov = iov;
|
|
|
i->nr_segs = nr_segs;
|
|
@@ -324,7 +309,7 @@ void iov_iter_init(struct iov_iter *i, int direction,
|
|
|
}
|
|
|
EXPORT_SYMBOL(iov_iter_init);
|
|
|
|
|
|
-ssize_t iov_iter_get_pages(struct iov_iter *i,
|
|
|
+static ssize_t get_pages_iovec(struct iov_iter *i,
|
|
|
struct page **pages, size_t maxsize,
|
|
|
size_t *start)
|
|
|
{
|
|
@@ -349,9 +334,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
|
|
|
return res;
|
|
|
return (res == n ? len : res * PAGE_SIZE) - *start;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(iov_iter_get_pages);
|
|
|
|
|
|
-ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
|
|
|
+static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
|
|
|
struct page ***pages, size_t maxsize,
|
|
|
size_t *start)
|
|
|
{
|
|
@@ -387,9 +371,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
|
|
|
*pages = p;
|
|
|
return (res == n ? len : res * PAGE_SIZE) - *start;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(iov_iter_get_pages_alloc);
|
|
|
|
|
|
-int iov_iter_npages(const struct iov_iter *i, int maxpages)
|
|
|
+static int iov_iter_npages_iovec(const struct iov_iter *i, int maxpages)
|
|
|
{
|
|
|
size_t offset = i->iov_offset;
|
|
|
size_t size = i->count;
|
|
@@ -414,4 +397,347 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
|
|
|
}
|
|
|
return min(npages, maxpages);
|
|
|
}
|
|
|
+
|
|
|
+static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
|
|
|
+{
|
|
|
+ char *from = kmap_atomic(page);
|
|
|
+ memcpy(to, from + offset, len);
|
|
|
+ kunmap_atomic(from);
|
|
|
+}
|
|
|
+
|
|
|
+static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
|
|
|
+{
|
|
|
+ char *to = kmap_atomic(page);
|
|
|
+ memcpy(to + offset, from, len);
|
|
|
+ kunmap_atomic(to);
|
|
|
+}
|
|
|
+
|
|
|
+static size_t copy_page_to_iter_bvec(struct page *page, size_t offset, size_t bytes,
|
|
|
+ struct iov_iter *i)
|
|
|
+{
|
|
|
+ size_t skip, copy, wanted;
|
|
|
+ const struct bio_vec *bvec;
|
|
|
+ void *kaddr, *from;
|
|
|
+
|
|
|
+ if (unlikely(bytes > i->count))
|
|
|
+ bytes = i->count;
|
|
|
+
|
|
|
+ if (unlikely(!bytes))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ wanted = bytes;
|
|
|
+ bvec = i->bvec;
|
|
|
+ skip = i->iov_offset;
|
|
|
+ copy = min_t(size_t, bytes, bvec->bv_len - skip);
|
|
|
+
|
|
|
+ kaddr = kmap_atomic(page);
|
|
|
+ from = kaddr + offset;
|
|
|
+ memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
|
|
|
+ skip += copy;
|
|
|
+ from += copy;
|
|
|
+ bytes -= copy;
|
|
|
+ while (bytes) {
|
|
|
+ bvec++;
|
|
|
+ copy = min(bytes, (size_t)bvec->bv_len);
|
|
|
+ memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
|
|
|
+ skip = copy;
|
|
|
+ from += copy;
|
|
|
+ bytes -= copy;
|
|
|
+ }
|
|
|
+ kunmap_atomic(kaddr);
|
|
|
+ if (skip == bvec->bv_len) {
|
|
|
+ bvec++;
|
|
|
+ skip = 0;
|
|
|
+ }
|
|
|
+ i->count -= wanted - bytes;
|
|
|
+ i->nr_segs -= bvec - i->bvec;
|
|
|
+ i->bvec = bvec;
|
|
|
+ i->iov_offset = skip;
|
|
|
+ return wanted - bytes;
|
|
|
+}
|
|
|
+
|
|
|
+static size_t copy_page_from_iter_bvec(struct page *page, size_t offset, size_t bytes,
|
|
|
+ struct iov_iter *i)
|
|
|
+{
|
|
|
+ size_t skip, copy, wanted;
|
|
|
+ const struct bio_vec *bvec;
|
|
|
+ void *kaddr, *to;
|
|
|
+
|
|
|
+ if (unlikely(bytes > i->count))
|
|
|
+ bytes = i->count;
|
|
|
+
|
|
|
+ if (unlikely(!bytes))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ wanted = bytes;
|
|
|
+ bvec = i->bvec;
|
|
|
+ skip = i->iov_offset;
|
|
|
+
|
|
|
+ kaddr = kmap_atomic(page);
|
|
|
+
|
|
|
+ to = kaddr + offset;
|
|
|
+
|
|
|
+ copy = min(bytes, bvec->bv_len - skip);
|
|
|
+
|
|
|
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
|
|
|
+
|
|
|
+ to += copy;
|
|
|
+ skip += copy;
|
|
|
+ bytes -= copy;
|
|
|
+
|
|
|
+ while (bytes) {
|
|
|
+ bvec++;
|
|
|
+ copy = min(bytes, (size_t)bvec->bv_len);
|
|
|
+ memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
|
|
|
+ skip = copy;
|
|
|
+ to += copy;
|
|
|
+ bytes -= copy;
|
|
|
+ }
|
|
|
+ kunmap_atomic(kaddr);
|
|
|
+ if (skip == bvec->bv_len) {
|
|
|
+ bvec++;
|
|
|
+ skip = 0;
|
|
|
+ }
|
|
|
+ i->count -= wanted;
|
|
|
+ i->nr_segs -= bvec - i->bvec;
|
|
|
+ i->bvec = bvec;
|
|
|
+ i->iov_offset = skip;
|
|
|
+ return wanted;
|
|
|
+}
|
|
|
+
|
|
|
+static size_t copy_from_user_bvec(struct page *page,
|
|
|
+ struct iov_iter *i, unsigned long offset, size_t bytes)
|
|
|
+{
|
|
|
+ char *kaddr;
|
|
|
+ size_t left;
|
|
|
+ const struct bio_vec *bvec;
|
|
|
+ size_t base = i->iov_offset;
|
|
|
+
|
|
|
+ kaddr = kmap_atomic(page);
|
|
|
+ for (left = bytes, bvec = i->bvec; left; bvec++, base = 0) {
|
|
|
+ size_t copy = min(left, bvec->bv_len - base);
|
|
|
+ if (!bvec->bv_len)
|
|
|
+ continue;
|
|
|
+ memcpy_from_page(kaddr + offset, bvec->bv_page,
|
|
|
+ bvec->bv_offset + base, copy);
|
|
|
+ offset += copy;
|
|
|
+ left -= copy;
|
|
|
+ }
|
|
|
+ kunmap_atomic(kaddr);
|
|
|
+ return bytes;
|
|
|
+}
|
|
|
+
|
|
|
+static void advance_bvec(struct iov_iter *i, size_t bytes)
|
|
|
+{
|
|
|
+ BUG_ON(i->count < bytes);
|
|
|
+
|
|
|
+ if (likely(i->nr_segs == 1)) {
|
|
|
+ i->iov_offset += bytes;
|
|
|
+ i->count -= bytes;
|
|
|
+ } else {
|
|
|
+ const struct bio_vec *bvec = i->bvec;
|
|
|
+ size_t base = i->iov_offset;
|
|
|
+ unsigned long nr_segs = i->nr_segs;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The !iov->iov_len check ensures we skip over unlikely
|
|
|
+ * zero-length segments (without overruning the iovec).
|
|
|
+ */
|
|
|
+ while (bytes || unlikely(i->count && !bvec->bv_len)) {
|
|
|
+ int copy;
|
|
|
+
|
|
|
+ copy = min(bytes, bvec->bv_len - base);
|
|
|
+ BUG_ON(!i->count || i->count < copy);
|
|
|
+ i->count -= copy;
|
|
|
+ bytes -= copy;
|
|
|
+ base += copy;
|
|
|
+ if (bvec->bv_len == base) {
|
|
|
+ bvec++;
|
|
|
+ nr_segs--;
|
|
|
+ base = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ i->bvec = bvec;
|
|
|
+ i->iov_offset = base;
|
|
|
+ i->nr_segs = nr_segs;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long alignment_bvec(const struct iov_iter *i)
|
|
|
+{
|
|
|
+ const struct bio_vec *bvec = i->bvec;
|
|
|
+ unsigned long res;
|
|
|
+ size_t size = i->count;
|
|
|
+ size_t n;
|
|
|
+
|
|
|
+ if (!size)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ res = bvec->bv_offset + i->iov_offset;
|
|
|
+ n = bvec->bv_len - i->iov_offset;
|
|
|
+ if (n >= size)
|
|
|
+ return res | size;
|
|
|
+ size -= n;
|
|
|
+ res |= n;
|
|
|
+ while (size > (++bvec)->bv_len) {
|
|
|
+ res |= bvec->bv_offset | bvec->bv_len;
|
|
|
+ size -= bvec->bv_len;
|
|
|
+ }
|
|
|
+ res |= bvec->bv_offset | size;
|
|
|
+ return res;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t get_pages_bvec(struct iov_iter *i,
|
|
|
+ struct page **pages, size_t maxsize,
|
|
|
+ size_t *start)
|
|
|
+{
|
|
|
+ const struct bio_vec *bvec = i->bvec;
|
|
|
+ size_t len = bvec->bv_len - i->iov_offset;
|
|
|
+ if (len > i->count)
|
|
|
+ len = i->count;
|
|
|
+ if (len > maxsize)
|
|
|
+ len = maxsize;
|
|
|
+ *start = bvec->bv_offset + i->iov_offset;
|
|
|
+
|
|
|
+ get_page(*pages = bvec->bv_page);
|
|
|
+
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
|
|
|
+ struct page ***pages, size_t maxsize,
|
|
|
+ size_t *start)
|
|
|
+{
|
|
|
+ const struct bio_vec *bvec = i->bvec;
|
|
|
+ size_t len = bvec->bv_len - i->iov_offset;
|
|
|
+ if (len > i->count)
|
|
|
+ len = i->count;
|
|
|
+ if (len > maxsize)
|
|
|
+ len = maxsize;
|
|
|
+ *start = bvec->bv_offset + i->iov_offset;
|
|
|
+
|
|
|
+ *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
|
|
|
+ if (!*pages)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ get_page(**pages = bvec->bv_page);
|
|
|
+
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
+static int iov_iter_npages_bvec(const struct iov_iter *i, int maxpages)
|
|
|
+{
|
|
|
+ size_t offset = i->iov_offset;
|
|
|
+ size_t size = i->count;
|
|
|
+ const struct bio_vec *bvec = i->bvec;
|
|
|
+ int npages = 0;
|
|
|
+ int n;
|
|
|
+
|
|
|
+ for (n = 0; size && n < i->nr_segs; n++, bvec++) {
|
|
|
+ size_t len = bvec->bv_len - offset;
|
|
|
+ offset = 0;
|
|
|
+ if (unlikely(!len)) /* empty segment */
|
|
|
+ continue;
|
|
|
+ if (len > size)
|
|
|
+ len = size;
|
|
|
+ npages++;
|
|
|
+ if (npages >= maxpages) /* don't bother going further */
|
|
|
+ return maxpages;
|
|
|
+ size -= len;
|
|
|
+ offset = 0;
|
|
|
+ }
|
|
|
+ return min(npages, maxpages);
|
|
|
+}
|
|
|
+
|
|
|
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
|
|
|
+ struct iov_iter *i)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return copy_page_to_iter_bvec(page, offset, bytes, i);
|
|
|
+ else
|
|
|
+ return copy_page_to_iter_iovec(page, offset, bytes, i);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(copy_page_to_iter);
|
|
|
+
|
|
|
+size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
|
|
|
+ struct iov_iter *i)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return copy_page_from_iter_bvec(page, offset, bytes, i);
|
|
|
+ else
|
|
|
+ return copy_page_from_iter_iovec(page, offset, bytes, i);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(copy_page_from_iter);
|
|
|
+
|
|
|
+size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
|
+ struct iov_iter *i, unsigned long offset, size_t bytes)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return copy_from_user_bvec(page, i, offset, bytes);
|
|
|
+ else
|
|
|
+ return copy_from_user_atomic_iovec(page, i, offset, bytes);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
|
|
|
+
|
|
|
+void iov_iter_advance(struct iov_iter *i, size_t size)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ advance_bvec(i, size);
|
|
|
+ else
|
|
|
+ advance_iovec(i, size);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iov_iter_advance);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Return the count of just the current iov_iter segment.
|
|
|
+ */
|
|
|
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
|
|
|
+{
|
|
|
+ if (i->nr_segs == 1)
|
|
|
+ return i->count;
|
|
|
+ else if (i->type & ITER_BVEC)
|
|
|
+ return min(i->count, i->iov->iov_len - i->iov_offset);
|
|
|
+ else
|
|
|
+ return min(i->count, i->bvec->bv_len - i->iov_offset);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iov_iter_single_seg_count);
|
|
|
+
|
|
|
+unsigned long iov_iter_alignment(const struct iov_iter *i)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return alignment_bvec(i);
|
|
|
+ else
|
|
|
+ return alignment_iovec(i);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iov_iter_alignment);
|
|
|
+
|
|
|
+ssize_t iov_iter_get_pages(struct iov_iter *i,
|
|
|
+ struct page **pages, size_t maxsize,
|
|
|
+ size_t *start)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return get_pages_bvec(i, pages, maxsize, start);
|
|
|
+ else
|
|
|
+ return get_pages_iovec(i, pages, maxsize, start);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iov_iter_get_pages);
|
|
|
+
|
|
|
+ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
|
|
|
+ struct page ***pages, size_t maxsize,
|
|
|
+ size_t *start)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return get_pages_alloc_bvec(i, pages, maxsize, start);
|
|
|
+ else
|
|
|
+ return get_pages_alloc_iovec(i, pages, maxsize, start);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iov_iter_get_pages_alloc);
|
|
|
+
|
|
|
+int iov_iter_npages(const struct iov_iter *i, int maxpages)
|
|
|
+{
|
|
|
+ if (i->type & ITER_BVEC)
|
|
|
+ return iov_iter_npages_bvec(i, maxpages);
|
|
|
+ else
|
|
|
+ return iov_iter_npages_iovec(i, maxpages);
|
|
|
+}
|
|
|
EXPORT_SYMBOL(iov_iter_npages);
|