|
@@ -492,8 +492,7 @@ static inline size_t head_to_end(size_t head, size_t size)
|
|
|
return size - head;
|
|
|
}
|
|
|
|
|
|
-static inline void new_iov(struct iovec **iov, int *iov_cnt,
|
|
|
- struct tcmu_dev *udev)
|
|
|
+static inline void new_iov(struct iovec **iov, int *iov_cnt)
|
|
|
{
|
|
|
struct iovec *iovec;
|
|
|
|
|
@@ -546,19 +545,38 @@ static void scatter_data_area(struct tcmu_dev *udev,
|
|
|
to = kmap_atomic(page);
|
|
|
}
|
|
|
|
|
|
- copy_bytes = min_t(size_t, sg_remaining,
|
|
|
- block_remaining);
|
|
|
+ /*
|
|
|
+ * Covert to virtual offset of the ring data area.
|
|
|
+ */
|
|
|
to_offset = get_block_offset_user(udev, dbi,
|
|
|
block_remaining);
|
|
|
|
|
|
+ /*
|
|
|
+ * The following code will gather and map the blocks
|
|
|
+ * to the same iovec when the blocks are all next to
|
|
|
+ * each other.
|
|
|
+ */
|
|
|
+ copy_bytes = min_t(size_t, sg_remaining,
|
|
|
+ block_remaining);
|
|
|
if (*iov_cnt != 0 &&
|
|
|
to_offset == iov_tail(*iov)) {
|
|
|
+ /*
|
|
|
+ * Will append to the current iovec, because
|
|
|
+ * the current block page is next to the
|
|
|
+ * previous one.
|
|
|
+ */
|
|
|
(*iov)->iov_len += copy_bytes;
|
|
|
} else {
|
|
|
- new_iov(iov, iov_cnt, udev);
|
|
|
+ /*
|
|
|
+ * Will allocate a new iovec because we are
|
|
|
+ * first time here or the current block page
|
|
|
+ * is not next to the previous one.
|
|
|
+ */
|
|
|
+ new_iov(iov, iov_cnt);
|
|
|
(*iov)->iov_base = (void __user *)to_offset;
|
|
|
(*iov)->iov_len = copy_bytes;
|
|
|
}
|
|
|
+
|
|
|
if (copy_data) {
|
|
|
offset = DATA_BLOCK_SIZE - block_remaining;
|
|
|
memcpy(to + offset,
|
|
@@ -566,11 +584,13 @@ static void scatter_data_area(struct tcmu_dev *udev,
|
|
|
copy_bytes);
|
|
|
tcmu_flush_dcache_range(to, copy_bytes);
|
|
|
}
|
|
|
+
|
|
|
sg_remaining -= copy_bytes;
|
|
|
block_remaining -= copy_bytes;
|
|
|
}
|
|
|
kunmap_atomic(from - sg->offset);
|
|
|
}
|
|
|
+
|
|
|
if (to)
|
|
|
kunmap_atomic(to);
|
|
|
}
|