|
@@ -137,8 +137,7 @@ static inline bool valid_io_request(struct zram *zram,
|
|
|
|
|
|
static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
|
|
|
{
|
|
|
- if (*offset + bvec->bv_len >= PAGE_SIZE)
|
|
|
- (*index)++;
|
|
|
+ *index += (*offset + bvec->bv_len) / PAGE_SIZE;
|
|
|
*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
|
|
|
}
|
|
|
|
|
@@ -840,34 +839,21 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
|
|
|
}
|
|
|
|
|
|
bio_for_each_segment(bvec, bio, iter) {
|
|
|
- int max_transfer_size = PAGE_SIZE - offset;
|
|
|
-
|
|
|
- if (bvec.bv_len > max_transfer_size) {
|
|
|
- /*
|
|
|
- * zram_bvec_rw() can only make operation on a single
|
|
|
- * zram page. Split the bio vector.
|
|
|
- */
|
|
|
- struct bio_vec bv;
|
|
|
-
|
|
|
- bv.bv_page = bvec.bv_page;
|
|
|
- bv.bv_len = max_transfer_size;
|
|
|
- bv.bv_offset = bvec.bv_offset;
|
|
|
+ struct bio_vec bv = bvec;
|
|
|
+ unsigned int unwritten = bvec.bv_len;
|
|
|
|
|
|
+ do {
|
|
|
+ bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
|
|
|
+ unwritten);
|
|
|
if (zram_bvec_rw(zram, &bv, index, offset,
|
|
|
- op_is_write(bio_op(bio))) < 0)
|
|
|
+ op_is_write(bio_op(bio))) < 0)
|
|
|
goto out;
|
|
|
|
|
|
- bv.bv_len = bvec.bv_len - max_transfer_size;
|
|
|
- bv.bv_offset += max_transfer_size;
|
|
|
- if (zram_bvec_rw(zram, &bv, index + 1, 0,
|
|
|
- op_is_write(bio_op(bio))) < 0)
|
|
|
- goto out;
|
|
|
- } else
|
|
|
- if (zram_bvec_rw(zram, &bvec, index, offset,
|
|
|
- op_is_write(bio_op(bio))) < 0)
|
|
|
- goto out;
|
|
|
+ bv.bv_offset += bv.bv_len;
|
|
|
+ unwritten -= bv.bv_len;
|
|
|
|
|
|
- update_position(&index, &offset, &bvec);
|
|
|
+ update_position(&index, &offset, &bv);
|
|
|
+ } while (unwritten);
|
|
|
}
|
|
|
|
|
|
bio_endio(bio);
|
|
@@ -884,8 +870,6 @@ static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
|
|
|
{
|
|
|
struct zram *zram = queue->queuedata;
|
|
|
|
|
|
- blk_queue_split(queue, &bio, queue->bio_split);
|
|
|
-
|
|
|
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
|
|
|
bio->bi_iter.bi_size)) {
|
|
|
atomic64_inc(&zram->stats.invalid_io);
|
|
@@ -1193,8 +1177,6 @@ static int zram_add(void)
|
|
|
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
|
|
|
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
|
|
|
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
|
|
|
- zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
|
|
|
- zram->disk->queue->limits.chunk_sectors = 0;
|
|
|
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
|
|
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
|
|
|
|