|
@@ -39,130 +39,6 @@ static int __blk_rq_unmap_user(struct bio *bio)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|
|
- struct rq_map_data *map_data, void __user *ubuf,
|
|
|
- unsigned int len, gfp_t gfp_mask)
|
|
|
-{
|
|
|
- unsigned long uaddr;
|
|
|
- struct bio *bio, *orig_bio;
|
|
|
- int reading, ret;
|
|
|
-
|
|
|
- reading = rq_data_dir(rq) == READ;
|
|
|
-
|
|
|
- /*
|
|
|
- * if alignment requirement is satisfied, map in user pages for
|
|
|
- * direct dma. else, set up kernel bounce buffers
|
|
|
- */
|
|
|
- uaddr = (unsigned long) ubuf;
|
|
|
- if (blk_rq_aligned(q, uaddr, len) && !map_data)
|
|
|
- bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
|
|
|
- else
|
|
|
- bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
|
|
|
-
|
|
|
- if (IS_ERR(bio))
|
|
|
- return PTR_ERR(bio);
|
|
|
-
|
|
|
- if (map_data && map_data->null_mapped)
|
|
|
- bio->bi_flags |= (1 << BIO_NULL_MAPPED);
|
|
|
-
|
|
|
- orig_bio = bio;
|
|
|
- blk_queue_bounce(q, &bio);
|
|
|
-
|
|
|
- /*
|
|
|
- * We link the bounce buffer in and could have to traverse it
|
|
|
- * later so we have to get a ref to prevent it from being freed
|
|
|
- */
|
|
|
- bio_get(bio);
|
|
|
-
|
|
|
- ret = blk_rq_append_bio(q, rq, bio);
|
|
|
- if (!ret)
|
|
|
- return bio->bi_iter.bi_size;
|
|
|
-
|
|
|
- /* if it was boucned we must call the end io function */
|
|
|
- bio_endio(bio, 0);
|
|
|
- __blk_rq_unmap_user(orig_bio);
|
|
|
- bio_put(bio);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
|
|
- * @q: request queue where request should be inserted
|
|
|
- * @rq: request structure to fill
|
|
|
- * @map_data: pointer to the rq_map_data holding pages (if necessary)
|
|
|
- * @ubuf: the user buffer
|
|
|
- * @len: length of user data
|
|
|
- * @gfp_mask: memory allocation flags
|
|
|
- *
|
|
|
- * Description:
|
|
|
- * Data will be mapped directly for zero copy I/O, if possible. Otherwise
|
|
|
- * a kernel bounce buffer is used.
|
|
|
- *
|
|
|
- * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
|
|
|
- * still in process context.
|
|
|
- *
|
|
|
- * Note: The mapped bio may need to be bounced through blk_queue_bounce()
|
|
|
- * before being submitted to the device, as pages mapped may be out of
|
|
|
- * reach. It's the callers responsibility to make sure this happens. The
|
|
|
- * original bio must be passed back in to blk_rq_unmap_user() for proper
|
|
|
- * unmapping.
|
|
|
- */
|
|
|
-int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|
|
- struct rq_map_data *map_data, void __user *ubuf,
|
|
|
- unsigned long len, gfp_t gfp_mask)
|
|
|
-{
|
|
|
- unsigned long bytes_read = 0;
|
|
|
- struct bio *bio = NULL;
|
|
|
- int ret;
|
|
|
-
|
|
|
- if (len > (queue_max_hw_sectors(q) << 9))
|
|
|
- return -EINVAL;
|
|
|
- if (!len)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (!ubuf && (!map_data || !map_data->null_mapped))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- while (bytes_read != len) {
|
|
|
- unsigned long map_len, end, start;
|
|
|
-
|
|
|
- map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
|
|
|
- end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
|
|
|
- >> PAGE_SHIFT;
|
|
|
- start = (unsigned long)ubuf >> PAGE_SHIFT;
|
|
|
-
|
|
|
- /*
|
|
|
- * A bad offset could cause us to require BIO_MAX_PAGES + 1
|
|
|
- * pages. If this happens we just lower the requested
|
|
|
- * mapping len by a page so that we can fit
|
|
|
- */
|
|
|
- if (end - start > BIO_MAX_PAGES)
|
|
|
- map_len -= PAGE_SIZE;
|
|
|
-
|
|
|
- ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
|
|
|
- gfp_mask);
|
|
|
- if (ret < 0)
|
|
|
- goto unmap_rq;
|
|
|
- if (!bio)
|
|
|
- bio = rq->bio;
|
|
|
- bytes_read += ret;
|
|
|
- ubuf += ret;
|
|
|
-
|
|
|
- if (map_data)
|
|
|
- map_data->offset += ret;
|
|
|
- }
|
|
|
-
|
|
|
- if (!bio_flagged(bio, BIO_USER_MAPPED))
|
|
|
- rq->cmd_flags |= REQ_COPY_USER;
|
|
|
-
|
|
|
- return 0;
|
|
|
-unmap_rq:
|
|
|
- blk_rq_unmap_user(bio);
|
|
|
- rq->bio = NULL;
|
|
|
- return ret;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(blk_rq_map_user);
|
|
|
-
|
|
|
/**
|
|
|
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
|
|
* @q: request queue where request should be inserted
|
|
@@ -241,6 +117,19 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|
|
}
|
|
|
EXPORT_SYMBOL(blk_rq_map_user_iov);
|
|
|
|
|
|
+int blk_rq_map_user(struct request_queue *q, struct request *rq,
|
|
|
+ struct rq_map_data *map_data, void __user *ubuf,
|
|
|
+ unsigned long len, gfp_t gfp_mask)
|
|
|
+{
|
|
|
+ struct sg_iovec iov;
|
|
|
+
|
|
|
+ iov.iov_base = (void __user *)ubuf;
|
|
|
+ iov.iov_len = len;
|
|
|
+
|
|
|
+ return blk_rq_map_user_iov(q, rq, map_data, &iov, 1, len, gfp_mask);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(blk_rq_map_user);
|
|
|
+
|
|
|
/**
|
|
|
* blk_rq_unmap_user - unmap a request with user data
|
|
|
* @bio: start of bio list
|