|
@@ -9,24 +9,6 @@
|
|
|
|
|
|
#include "blk.h"
|
|
|
|
|
|
-static bool iovec_gap_to_prv(struct request_queue *q,
|
|
|
- struct iovec *prv, struct iovec *cur)
|
|
|
-{
|
|
|
- unsigned long prev_end;
|
|
|
-
|
|
|
- if (!queue_virt_boundary(q))
|
|
|
- return false;
|
|
|
-
|
|
|
- if (prv->iov_base == NULL && prv->iov_len == 0)
|
|
|
- /* prv is not set - don't check */
|
|
|
- return false;
|
|
|
-
|
|
|
- prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
|
|
|
-
|
|
|
- return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
|
|
|
- prev_end & queue_virt_boundary(q));
|
|
|
-}
|
|
|
-
|
|
|
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
|
|
struct bio *bio)
|
|
|
{
|
|
@@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|
|
struct rq_map_data *map_data,
|
|
|
const struct iov_iter *iter, gfp_t gfp_mask)
|
|
|
{
|
|
|
- struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
|
|
|
- bool copy = (q->dma_pad_mask & iter->count) || map_data;
|
|
|
+ bool copy = false;
|
|
|
+ unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
|
|
|
struct bio *bio = NULL;
|
|
|
struct iov_iter i;
|
|
|
int ret;
|
|
|
|
|
|
- if (!iter || !iter->count)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- iov_for_each(iov, i, *iter) {
|
|
|
- unsigned long uaddr = (unsigned long) iov.iov_base;
|
|
|
-
|
|
|
- if (!iov.iov_len)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- /*
|
|
|
- * Keep going so we check length of all segments
|
|
|
- */
|
|
|
- if ((uaddr & queue_dma_alignment(q)) ||
|
|
|
- iovec_gap_to_prv(q, &prv, &iov))
|
|
|
- copy = true;
|
|
|
-
|
|
|
- prv.iov_base = iov.iov_base;
|
|
|
- prv.iov_len = iov.iov_len;
|
|
|
- }
|
|
|
+ if (map_data)
|
|
|
+ copy = true;
|
|
|
+ else if (iov_iter_alignment(iter) & align)
|
|
|
+ copy = true;
|
|
|
+ else if (queue_virt_boundary(q))
|
|
|
+ copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
|
|
|
|
|
|
i = *iter;
|
|
|
do {
|