|
@@ -9,21 +9,26 @@
|
|
|
|
|
|
#include "blk.h"
|
|
#include "blk.h"
|
|
|
|
|
|
-int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
|
|
|
- struct bio *bio)
|
|
|
|
|
|
+/*
|
|
|
|
+ * Append a bio to a passthrough request. Only works can be merged into
|
|
|
|
+ * the request based on the driver constraints.
|
|
|
|
+ */
|
|
|
|
+int blk_rq_append_bio(struct request *rq, struct bio *bio)
|
|
{
|
|
{
|
|
- if (!rq->bio)
|
|
|
|
- blk_rq_bio_prep(q, rq, bio);
|
|
|
|
- else if (!ll_back_merge_fn(q, rq, bio))
|
|
|
|
- return -EINVAL;
|
|
|
|
- else {
|
|
|
|
|
|
+ if (!rq->bio) {
|
|
|
|
+ blk_rq_bio_prep(rq->q, rq, bio);
|
|
|
|
+ } else {
|
|
|
|
+ if (!ll_back_merge_fn(rq->q, rq, bio))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
rq->biotail->bi_next = bio;
|
|
rq->biotail->bi_next = bio;
|
|
rq->biotail = bio;
|
|
rq->biotail = bio;
|
|
-
|
|
|
|
rq->__data_len += bio->bi_iter.bi_size;
|
|
rq->__data_len += bio->bi_iter.bi_size;
|
|
}
|
|
}
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
+EXPORT_SYMBOL(blk_rq_append_bio);
|
|
|
|
|
|
static int __blk_rq_unmap_user(struct bio *bio)
|
|
static int __blk_rq_unmap_user(struct bio *bio)
|
|
{
|
|
{
|
|
@@ -71,7 +76,7 @@ static int __blk_rq_map_user_iov(struct request *rq,
|
|
*/
|
|
*/
|
|
bio_get(bio);
|
|
bio_get(bio);
|
|
|
|
|
|
- ret = blk_rq_append_bio(q, rq, bio);
|
|
|
|
|
|
+ ret = blk_rq_append_bio(rq, bio);
|
|
if (ret) {
|
|
if (ret) {
|
|
bio_endio(bio);
|
|
bio_endio(bio);
|
|
__blk_rq_unmap_user(orig_bio);
|
|
__blk_rq_unmap_user(orig_bio);
|
|
@@ -229,7 +234,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
|
if (do_copy)
|
|
if (do_copy)
|
|
rq->cmd_flags |= REQ_COPY_USER;
|
|
rq->cmd_flags |= REQ_COPY_USER;
|
|
|
|
|
|
- ret = blk_rq_append_bio(q, rq, bio);
|
|
|
|
|
|
+ ret = blk_rq_append_bio(rq, bio);
|
|
if (unlikely(ret)) {
|
|
if (unlikely(ret)) {
|
|
/* request is too big */
|
|
/* request is too big */
|
|
bio_put(bio);
|
|
bio_put(bio);
|