|
@@ -26,6 +26,7 @@
|
|
|
#define POOL_SIZE 64
|
|
|
#define ISA_POOL_SIZE 16
|
|
|
|
|
|
+struct bio_set *bounce_bio_set, *bounce_bio_split;
|
|
|
static mempool_t *page_pool, *isa_page_pool;
|
|
|
|
|
|
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
|
|
@@ -40,6 +41,14 @@ static __init int init_emergency_pool(void)
|
|
|
BUG_ON(!page_pool);
|
|
|
pr_info("pool size: %d pages\n", POOL_SIZE);
|
|
|
|
|
|
+ bounce_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
|
|
+ BUG_ON(!bounce_bio_set);
|
|
|
+ if (bioset_integrity_create(bounce_bio_set, BIO_POOL_SIZE))
|
|
|
+ BUG_ON(1);
|
|
|
+
|
|
|
+ bounce_bio_split = bioset_create(BIO_POOL_SIZE, 0, 0);
|
|
|
+ BUG_ON(!bounce_bio_split);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -186,15 +195,26 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|
|
int rw = bio_data_dir(*bio_orig);
|
|
|
struct bio_vec *to, from;
|
|
|
struct bvec_iter iter;
|
|
|
- unsigned i;
|
|
|
+ unsigned i = 0;
|
|
|
+ bool bounce = false;
|
|
|
+ int sectors = 0;
|
|
|
|
|
|
- bio_for_each_segment(from, *bio_orig, iter)
|
|
|
+ bio_for_each_segment(from, *bio_orig, iter) {
|
|
|
+ if (i++ < BIO_MAX_PAGES)
|
|
|
+ sectors += from.bv_len >> 9;
|
|
|
if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
|
|
|
- goto bounce;
|
|
|
+ bounce = true;
|
|
|
+ }
|
|
|
+ if (!bounce)
|
|
|
+ return;
|
|
|
|
|
|
- return;
|
|
|
-bounce:
|
|
|
- bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
|
|
|
+ if (sectors < bio_sectors(*bio_orig)) {
|
|
|
+ bio = bio_split(*bio_orig, sectors, GFP_NOIO, bounce_bio_split);
|
|
|
+ bio_chain(bio, *bio_orig);
|
|
|
+ generic_make_request(*bio_orig);
|
|
|
+ *bio_orig = bio;
|
|
|
+ }
|
|
|
+ bio = bio_clone_bioset(*bio_orig, GFP_NOIO, bounce_bio_set);
|
|
|
|
|
|
bio_for_each_segment_all(to, bio, i) {
|
|
|
struct page *page = to->bv_page;
|