|
@@ -129,33 +129,6 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
|
|
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
|
|
|
|
|
|
-/*
|
|
|
|
- * This has the same sideeffects and return value as
|
|
|
|
- * iov_iter_copy_from_user_atomic().
|
|
|
|
- * The difference is that it attempts to resolve faults.
|
|
|
|
- * Page must not be locked.
|
|
|
|
- */
|
|
|
|
-size_t iov_iter_copy_from_user(struct page *page,
|
|
|
|
- struct iov_iter *i, unsigned long offset, size_t bytes)
|
|
|
|
-{
|
|
|
|
- char *kaddr;
|
|
|
|
- size_t copied;
|
|
|
|
-
|
|
|
|
- kaddr = kmap(page);
|
|
|
|
- if (likely(i->nr_segs == 1)) {
|
|
|
|
- int left;
|
|
|
|
- char __user *buf = i->iov->iov_base + i->iov_offset;
|
|
|
|
- left = __copy_from_user(kaddr + offset, buf, bytes);
|
|
|
|
- copied = bytes - left;
|
|
|
|
- } else {
|
|
|
|
- copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
|
|
|
- i->iov, i->iov_offset, bytes);
|
|
|
|
- }
|
|
|
|
- kunmap(page);
|
|
|
|
- return copied;
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(iov_iter_copy_from_user);
|
|
|
|
-
|
|
|
|
void iov_iter_advance(struct iov_iter *i, size_t bytes)
|
|
void iov_iter_advance(struct iov_iter *i, size_t bytes)
|
|
{
|
|
{
|
|
BUG_ON(i->count < bytes);
|
|
BUG_ON(i->count < bytes);
|