|
@@ -108,24 +108,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|
|
bool do_split = true;
|
|
|
struct bio *new = NULL;
|
|
|
const unsigned max_sectors = get_max_io_size(q, bio);
|
|
|
- unsigned bvecs = 0;
|
|
|
|
|
|
bio_for_each_segment(bv, bio, iter) {
|
|
|
- /*
|
|
|
- * With arbitrary bio size, the incoming bio may be very
|
|
|
- * big. We have to split the bio into small bios so that
|
|
|
- * each holds at most BIO_MAX_PAGES bvecs because
|
|
|
- * bio_clone_bioset() can fail to allocate big bvecs.
|
|
|
- *
|
|
|
- * Those drivers which will need to use bio_clone_bioset()
|
|
|
- * should tell us in some way. For now, impose the
|
|
|
- * BIO_MAX_PAGES limit on all queues.
|
|
|
- *
|
|
|
- * TODO: handle users of bio_clone_bioset() differently.
|
|
|
- */
|
|
|
- if (bvecs++ >= BIO_MAX_PAGES)
|
|
|
- goto split;
|
|
|
-
|
|
|
/*
|
|
|
* If the queue doesn't support SG gaps and adding this
|
|
|
* offset would create a gap, disallow it.
|