|
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
|
|
int bio_uncopy_user(struct bio *bio)
|
|
int bio_uncopy_user(struct bio *bio)
|
|
{
|
|
{
|
|
struct bio_map_data *bmd = bio->bi_private;
|
|
struct bio_map_data *bmd = bio->bi_private;
|
|
- int ret = 0;
|
|
|
|
|
|
+ struct bio_vec *bvec;
|
|
|
|
+ int ret = 0, i;
|
|
|
|
|
|
- if (!bio_flagged(bio, BIO_NULL_MAPPED))
|
|
|
|
- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
|
|
|
|
- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
|
|
|
|
- 0, bmd->is_our_pages);
|
|
|
|
|
|
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
|
|
|
|
+ /*
|
|
|
|
+ * if we're in a workqueue, the request is orphaned, so
|
|
|
|
+ * don't copy into a random user address space, just free.
|
|
|
|
+ */
|
|
|
|
+ if (current->mm)
|
|
|
|
+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
|
|
|
|
+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
|
|
|
|
+ 0, bmd->is_our_pages);
|
|
|
|
+ else if (bmd->is_our_pages)
|
|
|
|
+ bio_for_each_segment_all(bvec, bio, i)
|
|
|
|
+ __free_page(bvec->bv_page);
|
|
|
|
+ }
|
|
bio_free_map_data(bmd);
|
|
bio_free_map_data(bmd);
|
|
bio_put(bio);
|
|
bio_put(bio);
|
|
return ret;
|
|
return ret;
|