Ver código fonte

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull more block fixes from Jens Axboe:
 "As mentioned in the pull the other day, a few more fixes for this
  round, all related to the bio op changes in this series.

  Two fixes, and then a cleanup, renaming bio->bi_rw to bio->bi_opf.  I
  wanted to do that change right after or right before -rc1, so that
  risk of conflict was reduced.  I just rebased the series on top of
  current master, and no new ->bi_rw usage has snuck in"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: rename bio bi_rw to bi_opf
  target: iblock_execute_sync_cache() should use bio_set_op_attrs()
  mm: make __swap_writepage() use bio_set_op_attrs()
  block/mm: make bdev_ops->rw_page() take a bool for read/write
Linus Torvalds 9 anos atrás
pai
commit
857953d72f
62 arquivos alterados com 213 adições e 213 exclusões
  1. 2 2
      Documentation/block/biodoc.txt
  2. 1 1
      Documentation/device-mapper/dm-flakey.txt
  3. 1 1
      block/bio-integrity.c
  4. 3 3
      block/bio.c
  5. 13 13
      block/blk-core.c
  6. 4 4
      block/blk-merge.c
  7. 5 5
      block/blk-mq.c
  8. 4 4
      block/blk-throttle.c
  9. 2 2
      block/cfq-iosched.c
  10. 8 8
      drivers/block/brd.c
  11. 4 4
      drivers/block/drbd/drbd_main.c
  12. 1 1
      drivers/block/drbd/drbd_receiver.c
  13. 3 3
      drivers/block/drbd/drbd_req.c
  14. 1 1
      drivers/block/drbd/drbd_worker.c
  15. 1 1
      drivers/block/pktcdvd.c
  16. 1 1
      drivers/block/umem.c
  17. 12 11
      drivers/block/zram/zram_drv.c
  18. 6 6
      drivers/md/bcache/request.c
  19. 1 1
      drivers/md/bcache/super.c
  20. 1 1
      drivers/md/bcache/writeback.h
  21. 4 4
      drivers/md/dm-cache-target.c
  22. 2 2
      drivers/md/dm-crypt.c
  23. 1 1
      drivers/md/dm-era-target.c
  24. 3 3
      drivers/md/dm-flakey.c
  25. 3 3
      drivers/md/dm-io.c
  26. 2 2
      drivers/md/dm-log-writes.c
  27. 1 1
      drivers/md/dm-mpath.c
  28. 5 5
      drivers/md/dm-raid1.c
  29. 2 2
      drivers/md/dm-region-hash.c
  30. 3 3
      drivers/md/dm-snap.c
  31. 2 2
      drivers/md/dm-stripe.c
  32. 4 4
      drivers/md/dm-thin.c
  33. 1 1
      drivers/md/dm-zero.c
  34. 5 5
      drivers/md/dm.c
  35. 1 1
      drivers/md/linear.c
  36. 2 2
      drivers/md/md.c
  37. 4 4
      drivers/md/multipath.c
  38. 1 1
      drivers/md/raid0.c
  39. 3 3
      drivers/md/raid1.c
  40. 4 4
      drivers/md/raid10.c
  41. 1 1
      drivers/md/raid5-cache.c
  42. 10 10
      drivers/md/raid5.c
  43. 6 6
      drivers/nvdimm/btt.c
  44. 8 8
      drivers/nvdimm/pmem.c
  45. 1 1
      drivers/target/target_core_iblock.c
  46. 2 4
      fs/block_dev.c
  47. 5 5
      fs/btrfs/check-integrity.c
  48. 1 1
      fs/btrfs/disk-io.c
  49. 3 3
      fs/btrfs/inode.c
  50. 3 3
      fs/btrfs/volumes.c
  51. 1 1
      fs/mpage.c
  52. 2 2
      include/linux/bio.h
  53. 2 2
      include/linux/blk-cgroup.h
  54. 19 18
      include/linux/blk_types.h
  55. 1 1
      include/linux/blkdev.h
  56. 1 2
      include/linux/fs.h
  57. 1 1
      include/linux/pagemap.h
  58. 4 4
      include/trace/events/bcache.h
  59. 7 7
      include/trace/events/block.h
  60. 3 3
      kernel/trace/blktrace.c
  61. 2 2
      mm/filemap.c
  62. 3 2
      mm/page_io.c

+ 2 - 2
Documentation/block/biodoc.txt

@@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad
   requests which haven't aged too much on the queue. Potentially this priority
   requests which haven't aged too much on the queue. Potentially this priority
   could even be exposed to applications in some manner, providing higher level
   could even be exposed to applications in some manner, providing higher level
   tunability. Time based aging avoids starvation of lower priority
   tunability. Time based aging avoids starvation of lower priority
-  requests. Some bits in the bi_rw flags field in the bio structure are
+  requests. Some bits in the bi_opf flags field in the bio structure are
   intended to be used for this priority information.
   intended to be used for this priority information.
 
 
 
 
@@ -432,7 +432,7 @@ struct bio {
        struct bio          *bi_next;    /* request queue link */
        struct bio          *bi_next;    /* request queue link */
        struct block_device *bi_bdev;	/* target device */
        struct block_device *bi_bdev;	/* target device */
        unsigned long       bi_flags;    /* status, command, etc */
        unsigned long       bi_flags;    /* status, command, etc */
-       unsigned long       bi_rw;       /* low bits: r/w, high: priority */
+       unsigned long       bi_opf;       /* low bits: r/w, high: priority */
 
 
        unsigned int	bi_vcnt;     /* how may bio_vec's */
        unsigned int	bi_vcnt;     /* how may bio_vec's */
        struct bvec_iter	bi_iter;	/* current index into bio_vec array */
        struct bvec_iter	bi_iter;	/* current index into bio_vec array */

+ 1 - 1
Documentation/device-mapper/dm-flakey.txt

@@ -42,7 +42,7 @@ Optional feature parameters:
     <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
     <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
 		 'w' is incompatible with drop_writes.
 		 'w' is incompatible with drop_writes.
     <value>: The value (from 0-255) to write.
     <value>: The value (from 0-255) to write.
-    <flags>: Perform the replacement only if bio->bi_rw has all the
+    <flags>: Perform the replacement only if bio->bi_opf has all the
 	     selected flags set.
 	     selected flags set.
 
 
 Examples:
 Examples:

+ 1 - 1
block/bio-integrity.c

@@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
 
 
 	bip->bip_bio = bio;
 	bip->bip_bio = bio;
 	bio->bi_integrity = bip;
 	bio->bi_integrity = bip;
-	bio->bi_rw |= REQ_INTEGRITY;
+	bio->bi_opf |= REQ_INTEGRITY;
 
 
 	return bip;
 	return bip;
 err:
 err:

+ 3 - 3
block/bio.c

@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
 	 */
 	 */
 	bio->bi_bdev = bio_src->bi_bdev;
 	bio->bi_bdev = bio_src->bi_bdev;
 	bio_set_flag(bio, BIO_CLONED);
 	bio_set_flag(bio, BIO_CLONED);
-	bio->bi_rw = bio_src->bi_rw;
+	bio->bi_opf = bio_src->bi_opf;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
 	bio->bi_io_vec = bio_src->bi_io_vec;
 
 
@@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
 	if (!bio)
 	if (!bio)
 		return NULL;
 		return NULL;
 	bio->bi_bdev		= bio_src->bi_bdev;
 	bio->bi_bdev		= bio_src->bi_bdev;
-	bio->bi_rw		= bio_src->bi_rw;
+	bio->bi_opf		= bio_src->bi_opf;
 	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
 	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
 	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
 	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
 
 
@@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
 	init_completion(&ret.event);
 	init_completion(&ret.event);
 	bio->bi_private = &ret;
 	bio->bi_private = &ret;
 	bio->bi_end_io = submit_bio_wait_endio;
 	bio->bi_end_io = submit_bio_wait_endio;
-	bio->bi_rw |= REQ_SYNC;
+	bio->bi_opf |= REQ_SYNC;
 	submit_bio(bio);
 	submit_bio(bio);
 	wait_for_completion_io(&ret.event);
 	wait_for_completion_io(&ret.event);
 
 

+ 13 - 13
block/blk-core.c

@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
 	 * Flush requests do not use the elevator so skip initialization.
 	 * Flush requests do not use the elevator so skip initialization.
 	 * This allows a request to share the flush and elevator data.
 	 * This allows a request to share the flush and elevator data.
 	 */
 	 */
-	if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
+	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
 		return false;
 		return false;
 
 
 	return true;
 	return true;
@@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 			    struct bio *bio)
 			    struct bio *bio)
 {
 {
-	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
 
 	if (!ll_back_merge_fn(q, req, bio))
 	if (!ll_back_merge_fn(q, req, bio))
 		return false;
 		return false;
@@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
 bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
 			     struct bio *bio)
 			     struct bio *bio)
 {
 {
-	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
+	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 
 
 	if (!ll_front_merge_fn(q, req, bio))
 	if (!ll_front_merge_fn(q, req, bio))
 		return false;
 		return false;
@@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 {
 {
 	req->cmd_type = REQ_TYPE_FS;
 	req->cmd_type = REQ_TYPE_FS;
 
 
-	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
-	if (bio->bi_rw & REQ_RAHEAD)
+	req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
+	if (bio->bi_opf & REQ_RAHEAD)
 		req->cmd_flags |= REQ_FAILFAST_MASK;
 		req->cmd_flags |= REQ_FAILFAST_MASK;
 
 
 	req->errors = 0;
 	req->errors = 0;
@@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 
 
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
 {
-	const bool sync = !!(bio->bi_rw & REQ_SYNC);
+	const bool sync = !!(bio->bi_opf & REQ_SYNC);
 	struct blk_plug *plug;
 	struct blk_plug *plug;
 	int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
 	int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
 	struct request *req;
 	struct request *req;
@@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 		return BLK_QC_T_NONE;
 		return BLK_QC_T_NONE;
 	}
 	}
 
 
-	if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
+	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
 		spin_lock_irq(q->queue_lock);
 		spin_lock_irq(q->queue_lock);
 		where = ELEVATOR_INSERT_FLUSH;
 		where = ELEVATOR_INSERT_FLUSH;
 		goto get_rq;
 		goto get_rq;
@@ -1728,7 +1728,7 @@ get_rq:
 	/*
 	/*
 	 * Add in META/PRIO flags, if set, before we get to the IO scheduler
 	 * Add in META/PRIO flags, if set, before we get to the IO scheduler
 	 */
 	 */
-	rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
+	rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));
 
 
 	/*
 	/*
 	 * Grab a free request. This is might sleep but can not fail.
 	 * Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
 	printk(KERN_INFO "attempt to access beyond end of device\n");
 	printk(KERN_INFO "attempt to access beyond end of device\n");
 	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
 	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
 			bdevname(bio->bi_bdev, b),
 			bdevname(bio->bi_bdev, b),
-			bio->bi_rw,
+			bio->bi_opf,
 			(unsigned long long)bio_end_sector(bio),
 			(unsigned long long)bio_end_sector(bio),
 			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 }
 }
@@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
 	 * drivers without flush support don't have to worry
 	 * drivers without flush support don't have to worry
 	 * about them.
 	 * about them.
 	 */
 	 */
-	if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+	if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
 	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
-		bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
+		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
 		if (!nr_sectors) {
 		if (!nr_sectors) {
 			err = 0;
 			err = 0;
 			goto end_io;
 			goto end_io;
@@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
 	 * one.
 	 * one.
 	 */
 	 */
 	for (bio = rq->bio; bio; bio = bio->bi_next) {
 	for (bio = rq->bio; bio; bio = bio->bi_next) {
-		if ((bio->bi_rw & ff) != ff)
+		if ((bio->bi_opf & ff) != ff)
 			break;
 			break;
 		bytes += bio->bi_iter.bi_size;
 		bytes += bio->bi_iter.bi_size;
 	}
 	}
@@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 	/* mixed attributes always follow the first bio */
 	/* mixed attributes always follow the first bio */
 	if (req->cmd_flags & REQ_MIXED_MERGE) {
 	if (req->cmd_flags & REQ_MIXED_MERGE) {
 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
 		req->cmd_flags &= ~REQ_FAILFAST_MASK;
-		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
+		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
 	}
 	}
 
 
 	/*
 	/*

+ 4 - 4
block/blk-merge.c

@@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
 
 
 	if (split) {
 	if (split) {
 		/* there isn't chance to merge the splitted bio */
 		/* there isn't chance to merge the splitted bio */
-		split->bi_rw |= REQ_NOMERGE;
+		split->bi_opf |= REQ_NOMERGE;
 
 
 		bio_chain(split, *bio);
 		bio_chain(split, *bio);
 		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
 		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
@@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq)
 	 * Distributes the attributs to each bio.
 	 * Distributes the attributs to each bio.
 	 */
 	 */
 	for (bio = rq->bio; bio; bio = bio->bi_next) {
 	for (bio = rq->bio; bio; bio = bio->bi_next) {
-		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
-			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
-		bio->bi_rw |= ff;
+		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
+			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
+		bio->bi_opf |= ff;
 	}
 	}
 	rq->cmd_flags |= REQ_MIXED_MERGE;
 	rq->cmd_flags |= REQ_MIXED_MERGE;
 }
 }

+ 5 - 5
block/blk-mq.c

@@ -1234,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
 	ctx = blk_mq_get_ctx(q);
 	ctx = blk_mq_get_ctx(q);
 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
 	hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
 
-	if (rw_is_sync(bio_op(bio), bio->bi_rw))
+	if (rw_is_sync(bio_op(bio), bio->bi_opf))
 		op_flags |= REQ_SYNC;
 		op_flags |= REQ_SYNC;
 
 
 	trace_block_getrq(q, bio, op);
 	trace_block_getrq(q, bio, op);
@@ -1302,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
  */
  */
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 {
 {
-	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
-	const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
 	struct blk_map_ctx data;
 	struct blk_map_ctx data;
 	struct request *rq;
 	struct request *rq;
 	unsigned int request_count = 0;
 	unsigned int request_count = 0;
@@ -1396,8 +1396,8 @@ done:
  */
  */
 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
 {
 {
-	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
-	const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+	const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
+	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
 	struct blk_plug *plug;
 	struct blk_plug *plug;
 	unsigned int request_count = 0;
 	unsigned int request_count = 0;
 	struct blk_map_ctx data;
 	struct blk_map_ctx data;

+ 4 - 4
block/blk-throttle.c

@@ -821,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 	 * second time when it eventually gets issued.  Set it when a bio
 	 * second time when it eventually gets issued.  Set it when a bio
 	 * is being charged to a tg.
 	 * is being charged to a tg.
 	 */
 	 */
-	if (!(bio->bi_rw & REQ_THROTTLED))
-		bio->bi_rw |= REQ_THROTTLED;
+	if (!(bio->bi_opf & REQ_THROTTLED))
+		bio->bi_opf |= REQ_THROTTLED;
 }
 }
 
 
 /**
 /**
@@ -1399,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 	WARN_ON_ONCE(!rcu_read_lock_held());
 	WARN_ON_ONCE(!rcu_read_lock_held());
 
 
 	/* see throtl_charge_bio() */
 	/* see throtl_charge_bio() */
-	if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
+	if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
 		goto out;
 		goto out;
 
 
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
@@ -1478,7 +1478,7 @@ out:
 	 * being issued.
 	 * being issued.
 	 */
 	 */
 	if (!throttled)
 	if (!throttled)
-		bio->bi_rw &= ~REQ_THROTTLED;
+		bio->bi_opf &= ~REQ_THROTTLED;
 	return throttled;
 	return throttled;
 }
 }
 
 

+ 2 - 2
block/cfq-iosched.c

@@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
  */
  */
 static inline bool cfq_bio_sync(struct bio *bio)
 static inline bool cfq_bio_sync(struct bio *bio)
 {
 {
-	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
+	return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
 }
 }
 
 
 /*
 /*
@@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
 static void cfq_bio_merged(struct request_queue *q, struct request *req,
 				struct bio *bio)
 				struct bio *bio)
 {
 {
-	cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
+	cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
 }
 }
 
 
 static void
 static void

+ 8 - 8
drivers/block/brd.c

@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
  * Process a single bvec of a bio.
  * Process a single bvec of a bio.
  */
  */
 static int brd_do_bvec(struct brd_device *brd, struct page *page,
 static int brd_do_bvec(struct brd_device *brd, struct page *page,
-			unsigned int len, unsigned int off, int op,
+			unsigned int len, unsigned int off, bool is_write,
 			sector_t sector)
 			sector_t sector)
 {
 {
 	void *mem;
 	void *mem;
 	int err = 0;
 	int err = 0;
 
 
-	if (op_is_write(op)) {
+	if (is_write) {
 		err = copy_to_brd_setup(brd, sector, len);
 		err = copy_to_brd_setup(brd, sector, len);
 		if (err)
 		if (err)
 			goto out;
 			goto out;
 	}
 	}
 
 
 	mem = kmap_atomic(page);
 	mem = kmap_atomic(page);
-	if (!op_is_write(op)) {
+	if (!is_write) {
 		copy_from_brd(mem + off, brd, sector, len);
 		copy_from_brd(mem + off, brd, sector, len);
 		flush_dcache_page(page);
 		flush_dcache_page(page);
 	} else {
 	} else {
@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
 		unsigned int len = bvec.bv_len;
 		unsigned int len = bvec.bv_len;
 		int err;
 		int err;
 
 
-		err = brd_do_bvec(brd, bvec.bv_page, len,
-					bvec.bv_offset, bio_op(bio), sector);
+		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
+					op_is_write(bio_op(bio)), sector);
 		if (err)
 		if (err)
 			goto io_error;
 			goto io_error;
 		sector += len >> SECTOR_SHIFT;
 		sector += len >> SECTOR_SHIFT;
@@ -366,11 +366,11 @@ io_error:
 }
 }
 
 
 static int brd_rw_page(struct block_device *bdev, sector_t sector,
 static int brd_rw_page(struct block_device *bdev, sector_t sector,
-		       struct page *page, int op)
+		       struct page *page, bool is_write)
 {
 {
 	struct brd_device *brd = bdev->bd_disk->private_data;
 	struct brd_device *brd = bdev->bd_disk->private_data;
-	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
-	page_endio(page, op, err);
+	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+	page_endio(page, is_write, err);
 	return err;
 	return err;
 }
 }
 
 

+ 4 - 4
drivers/block/drbd/drbd_main.c

@@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
 			     struct bio *bio)
 			     struct bio *bio)
 {
 {
 	if (connection->agreed_pro_version >= 95)
 	if (connection->agreed_pro_version >= 95)
-		return  (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
-			(bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
-			(bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
+		return  (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
+			(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
+			(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
 			(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
 			(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
 			(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
 			(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
 	else
 	else
-		return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
+		return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
 }
 }
 
 
 /* Used to send write or TRIM aka REQ_DISCARD requests
 /* Used to send write or TRIM aka REQ_DISCARD requests

+ 1 - 1
drivers/block/drbd/drbd_receiver.c

@@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
  * drbd_submit_peer_request()
  * drbd_submit_peer_request()
  * @device:	DRBD device.
  * @device:	DRBD device.
  * @peer_req:	peer request
  * @peer_req:	peer request
- * @rw:		flag field, see bio->bi_rw
+ * @rw:		flag field, see bio->bi_opf
  *
  *
  * May spread the pages to multiple bios,
  * May spread the pages to multiple bios,
  * depending on bio_add_page restrictions.
  * depending on bio_add_page restrictions.

+ 3 - 3
drivers/block/drbd/drbd_req.c

@@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
 	 */
 	 */
 	if (!ok &&
 	if (!ok &&
 	    bio_op(req->master_bio) == REQ_OP_READ &&
 	    bio_op(req->master_bio) == REQ_OP_READ &&
-	    !(req->master_bio->bi_rw & REQ_RAHEAD) &&
+	    !(req->master_bio->bi_opf & REQ_RAHEAD) &&
 	    !list_empty(&req->tl_requests))
 	    !list_empty(&req->tl_requests))
 		req->rq_state |= RQ_POSTPONED;
 		req->rq_state |= RQ_POSTPONED;
 
 
@@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
 	 * replicating, in which case there is no point. */
 	 * replicating, in which case there is no point. */
 	if (unlikely(req->i.size == 0)) {
 	if (unlikely(req->i.size == 0)) {
 		/* The only size==0 bios we expect are empty flushes. */
 		/* The only size==0 bios we expect are empty flushes. */
-		D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
+		D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
 		if (remote)
 		if (remote)
 			_req_mod(req, QUEUE_AS_DRBD_BARRIER);
 			_req_mod(req, QUEUE_AS_DRBD_BARRIER);
 		return remote;
 		return remote;
@@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
 
 
 	if (bio_op(bio) != REQ_OP_READ)
 	if (bio_op(bio) != REQ_OP_READ)
 		type = DRBD_FAULT_DT_WR;
 		type = DRBD_FAULT_DT_WR;
-	else if (bio->bi_rw & REQ_RAHEAD)
+	else if (bio->bi_opf & REQ_RAHEAD)
 		type = DRBD_FAULT_DT_RA;
 		type = DRBD_FAULT_DT_RA;
 	else
 	else
 		type = DRBD_FAULT_DT_RD;
 		type = DRBD_FAULT_DT_RD;

+ 1 - 1
drivers/block/drbd/drbd_worker.c

@@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
 				what = DISCARD_COMPLETED_WITH_ERROR;
 				what = DISCARD_COMPLETED_WITH_ERROR;
 			break;
 			break;
 		case REQ_OP_READ:
 		case REQ_OP_READ:
-			if (bio->bi_rw & REQ_RAHEAD)
+			if (bio->bi_opf & REQ_RAHEAD)
 				what = READ_AHEAD_COMPLETED_WITH_ERROR;
 				what = READ_AHEAD_COMPLETED_WITH_ERROR;
 			else
 			else
 				what = READ_COMPLETED_WITH_ERROR;
 				what = READ_COMPLETED_WITH_ERROR;

+ 1 - 1
drivers/block/pktcdvd.c

@@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
 
 
 	bio_reset(pkt->bio);
 	bio_reset(pkt->bio);
 	pkt->bio->bi_bdev = pd->bdev;
 	pkt->bio->bi_bdev = pd->bdev;
-	pkt->bio->bi_rw = REQ_WRITE;
+	bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
 	pkt->bio->bi_iter.bi_sector = new_sector;
 	pkt->bio->bi_iter.bi_sector = new_sector;
 	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
 	pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
 	pkt->bio->bi_vcnt = pkt->frames;
 	pkt->bio->bi_vcnt = pkt->frames;

+ 1 - 1
drivers/block/umem.c

@@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
 	*card->biotail = bio;
 	*card->biotail = bio;
 	bio->bi_next = NULL;
 	bio->bi_next = NULL;
 	card->biotail = &bio->bi_next;
 	card->biotail = &bio->bi_next;
-	if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+	if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
 		activate(card);
 		activate(card);
 	spin_unlock_irq(&card->lock);
 	spin_unlock_irq(&card->lock);
 
 

+ 12 - 11
drivers/block/zram/zram_drv.c

@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
 }
 }
 
 
 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
-			int offset, int op)
+			int offset, bool is_write)
 {
 {
 	unsigned long start_time = jiffies;
 	unsigned long start_time = jiffies;
+	int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
 	int ret;
 	int ret;
 
 
-	generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
+	generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
 			&zram->disk->part0);
 			&zram->disk->part0);
 
 
-	if (!op_is_write(op)) {
+	if (!is_write) {
 		atomic64_inc(&zram->stats.num_reads);
 		atomic64_inc(&zram->stats.num_reads);
 		ret = zram_bvec_read(zram, bvec, index, offset);
 		ret = zram_bvec_read(zram, bvec, index, offset);
 	} else {
 	} else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
 		ret = zram_bvec_write(zram, bvec, index, offset);
 		ret = zram_bvec_write(zram, bvec, index, offset);
 	}
 	}
 
 
-	generic_end_io_acct(op, &zram->disk->part0, start_time);
+	generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
 
 
 	if (unlikely(ret)) {
 	if (unlikely(ret)) {
-		if (!op_is_write(op))
+		if (!is_write)
 			atomic64_inc(&zram->stats.failed_reads);
 			atomic64_inc(&zram->stats.failed_reads);
 		else
 		else
 			atomic64_inc(&zram->stats.failed_writes);
 			atomic64_inc(&zram->stats.failed_writes);
@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
 			bv.bv_offset = bvec.bv_offset;
 			bv.bv_offset = bvec.bv_offset;
 
 
 			if (zram_bvec_rw(zram, &bv, index, offset,
 			if (zram_bvec_rw(zram, &bv, index, offset,
-					 bio_op(bio)) < 0)
+					 op_is_write(bio_op(bio))) < 0)
 				goto out;
 				goto out;
 
 
 			bv.bv_len = bvec.bv_len - max_transfer_size;
 			bv.bv_len = bvec.bv_len - max_transfer_size;
 			bv.bv_offset += max_transfer_size;
 			bv.bv_offset += max_transfer_size;
 			if (zram_bvec_rw(zram, &bv, index + 1, 0,
 			if (zram_bvec_rw(zram, &bv, index + 1, 0,
-					 bio_op(bio)) < 0)
+					 op_is_write(bio_op(bio))) < 0)
 				goto out;
 				goto out;
 		} else
 		} else
 			if (zram_bvec_rw(zram, &bvec, index, offset,
 			if (zram_bvec_rw(zram, &bvec, index, offset,
-					 bio_op(bio)) < 0)
+					 op_is_write(bio_op(bio))) < 0)
 				goto out;
 				goto out;
 
 
 		update_position(&index, &offset, &bvec);
 		update_position(&index, &offset, &bvec);
@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
 }
 }
 
 
 static int zram_rw_page(struct block_device *bdev, sector_t sector,
 static int zram_rw_page(struct block_device *bdev, sector_t sector,
-		       struct page *page, int op)
+		       struct page *page, bool is_write)
 {
 {
 	int offset, err = -EIO;
 	int offset, err = -EIO;
 	u32 index;
 	u32 index;
@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
 	bv.bv_len = PAGE_SIZE;
 	bv.bv_len = PAGE_SIZE;
 	bv.bv_offset = 0;
 	bv.bv_offset = 0;
 
 
-	err = zram_bvec_rw(zram, &bv, index, offset, op);
+	err = zram_bvec_rw(zram, &bv, index, offset, is_write);
 put_zram:
 put_zram:
 	zram_meta_put(zram);
 	zram_meta_put(zram);
 out:
 out:
@@ -1007,7 +1008,7 @@ out:
 	 * (e.g., SetPageError, set_page_dirty and extra works).
 	 * (e.g., SetPageError, set_page_dirty and extra works).
 	 */
 	 */
 	if (err == 0)
 	if (err == 0)
-		page_endio(page, op, 0);
+		page_endio(page, is_write, 0);
 	return err;
 	return err;
 }
 }
 
 

+ 6 - 6
drivers/md/bcache/request.c

@@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
 	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
 	 * Journal writes are marked REQ_PREFLUSH; if the original write was a
 	 * flush, it'll wait on the journal write.
 	 * flush, it'll wait on the journal write.
 	 */
 	 */
-	bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
+	bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
 
 
 	do {
 	do {
 		unsigned i;
 		unsigned i;
@@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
 	if (!congested &&
 	if (!congested &&
 	    mode == CACHE_MODE_WRITEBACK &&
 	    mode == CACHE_MODE_WRITEBACK &&
 	    op_is_write(bio_op(bio)) &&
 	    op_is_write(bio_op(bio)) &&
-	    (bio->bi_rw & REQ_SYNC))
+	    (bio->bi_opf & REQ_SYNC))
 		goto rescale;
 		goto rescale;
 
 
 	spin_lock(&dc->io_lock);
 	spin_lock(&dc->io_lock);
@@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
 	s->iop.write_prio	= 0;
 	s->iop.write_prio	= 0;
 	s->iop.error		= 0;
 	s->iop.error		= 0;
 	s->iop.flags		= 0;
 	s->iop.flags		= 0;
-	s->iop.flush_journal	= (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
+	s->iop.flush_journal	= (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
 	s->iop.wq		= bcache_wq;
 	s->iop.wq		= bcache_wq;
 
 
 	return s;
 	return s;
@@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 		goto out_submit;
 		goto out_submit;
 	}
 	}
 
 
-	if (!(bio->bi_rw & REQ_RAHEAD) &&
-	    !(bio->bi_rw & REQ_META) &&
+	if (!(bio->bi_opf & REQ_RAHEAD) &&
+	    !(bio->bi_opf & REQ_META) &&
 	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
 		reada = min_t(sector_t, dc->readahead >> 9,
 		reada = min_t(sector_t, dc->readahead >> 9,
 			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
 			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
@@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
 		bch_writeback_add(dc);
 		bch_writeback_add(dc);
 		s->iop.bio = bio;
 		s->iop.bio = bio;
 
 
-		if (bio->bi_rw & REQ_PREFLUSH) {
+		if (bio->bi_opf & REQ_PREFLUSH) {
 			/* Also need to send a flush to the backing device */
 			/* Also need to send a flush to the backing device */
 			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
 			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
 							     dc->disk.bio_split);
 							     dc->disk.bio_split);

+ 1 - 1
drivers/md/bcache/super.c

@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
 	for (i = 0; i < KEY_PTRS(k); i++) {
 	for (i = 0; i < KEY_PTRS(k); i++) {
 		struct bio *bio = bch_bbio_alloc(c);
 		struct bio *bio = bch_bbio_alloc(c);
 
 
-		bio->bi_rw	= REQ_SYNC|REQ_META|op_flags;
+		bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
 		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 		bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
 
 		bio->bi_end_io	= uuid_endio;
 		bio->bi_end_io	= uuid_endio;

+ 1 - 1
drivers/md/bcache/writeback.h

@@ -57,7 +57,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
 	if (would_skip)
 	if (would_skip)
 		return false;
 		return false;
 
 
-	return bio->bi_rw & REQ_SYNC ||
+	return bio->bi_opf & REQ_SYNC ||
 		in_use <= CUTOFF_WRITEBACK;
 		in_use <= CUTOFF_WRITEBACK;
 }
 }
 
 

+ 4 - 4
drivers/md/dm-cache-target.c

@@ -788,7 +788,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
 
 
 	spin_lock_irqsave(&cache->lock, flags);
 	spin_lock_irqsave(&cache->lock, flags);
 	if (cache->need_tick_bio &&
 	if (cache->need_tick_bio &&
-	    !(bio->bi_rw & (REQ_FUA | REQ_PREFLUSH)) &&
+	    !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
 	    bio_op(bio) != REQ_OP_DISCARD) {
 	    bio_op(bio) != REQ_OP_DISCARD) {
 		pb->tick = true;
 		pb->tick = true;
 		cache->need_tick_bio = false;
 		cache->need_tick_bio = false;
@@ -830,7 +830,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
 
 
 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
 {
 {
-	return bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+	return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
 }
 }
 
 
 /*
 /*
@@ -1069,7 +1069,7 @@ static void dec_io_migrations(struct cache *cache)
 static bool discard_or_flush(struct bio *bio)
 static bool discard_or_flush(struct bio *bio)
 {
 {
 	return bio_op(bio) == REQ_OP_DISCARD ||
 	return bio_op(bio) == REQ_OP_DISCARD ||
-	       bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
+	       bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
 }
 }
 
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1980,7 +1980,7 @@ static void process_deferred_bios(struct cache *cache)
 
 
 		bio = bio_list_pop(&bios);
 		bio = bio_list_pop(&bios);
 
 
-		if (bio->bi_rw & REQ_PREFLUSH)
+		if (bio->bi_opf & REQ_PREFLUSH)
 			process_flush_bio(cache, bio);
 			process_flush_bio(cache, bio);
 		else if (bio_op(bio) == REQ_OP_DISCARD)
 		else if (bio_op(bio) == REQ_OP_DISCARD)
 			process_discard_bio(cache, &structs, bio);
 			process_discard_bio(cache, &structs, bio);

+ 2 - 2
drivers/md/dm-crypt.c

@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
 	clone->bi_private = io;
 	clone->bi_private = io;
 	clone->bi_end_io  = crypt_endio;
 	clone->bi_end_io  = crypt_endio;
 	clone->bi_bdev    = cc->dev->bdev;
 	clone->bi_bdev    = cc->dev->bdev;
-	bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
+	bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf);
 }
 }
 
 
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1915,7 +1915,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 	 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
 	 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
 	 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
 	 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
 	 */
 	 */
-	if (unlikely(bio->bi_rw & REQ_PREFLUSH ||
+	if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
 	    bio_op(bio) == REQ_OP_DISCARD)) {
 	    bio_op(bio) == REQ_OP_DISCARD)) {
 		bio->bi_bdev = cc->dev->bdev;
 		bio->bi_bdev = cc->dev->bdev;
 		if (bio_sectors(bio))
 		if (bio_sectors(bio))

+ 1 - 1
drivers/md/dm-era-target.c

@@ -1542,7 +1542,7 @@ static int era_map(struct dm_target *ti, struct bio *bio)
 	/*
 	/*
 	 * REQ_PREFLUSH bios carry no data, so we're not interested in them.
 	 * REQ_PREFLUSH bios carry no data, so we're not interested in them.
 	 */
 	 */
-	if (!(bio->bi_rw & REQ_PREFLUSH) &&
+	if (!(bio->bi_opf & REQ_PREFLUSH) &&
 	    (bio_data_dir(bio) == WRITE) &&
 	    (bio_data_dir(bio) == WRITE) &&
 	    !metadata_current_marked(era->md, block)) {
 	    !metadata_current_marked(era->md, block)) {
 		defer_bio(era, bio);
 		defer_bio(era, bio);

+ 3 - 3
drivers/md/dm-flakey.c

@@ -16,7 +16,7 @@
 #define DM_MSG_PREFIX "flakey"
 #define DM_MSG_PREFIX "flakey"
 
 
 #define all_corrupt_bio_flags_match(bio, fc)	\
 #define all_corrupt_bio_flags_match(bio, fc)	\
-	(((bio)->bi_rw & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
+	(((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags)
 
 
 /*
 /*
  * Flakey: Used for testing only, simulates intermittent,
  * Flakey: Used for testing only, simulates intermittent,
@@ -266,9 +266,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
 		data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
 		data[fc->corrupt_bio_byte - 1] = fc->corrupt_bio_value;
 
 
 		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
 		DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
-			"(rw=%c bi_rw=%u bi_sector=%llu cur_bytes=%u)\n",
+			"(rw=%c bi_opf=%u bi_sector=%llu cur_bytes=%u)\n",
 			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
 			bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
-			(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
+			(bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf,
 			(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
 			(unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
 	}
 	}
 }
 }

+ 3 - 3
drivers/md/dm-io.c

@@ -505,9 +505,9 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
  * New collapsed (a)synchronous interface.
  * New collapsed (a)synchronous interface.
  *
  *
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
- * If you fail to do one of these, the IO will be submitted to the disk after
- * q->unplug_delay, which defaults to 3ms in blk-settings.c.
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+ * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
+ * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  */
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
 	  struct dm_io_region *where, unsigned long *sync_error_bits)
 	  struct dm_io_region *where, unsigned long *sync_error_bits)

+ 2 - 2
drivers/md/dm-log-writes.c

@@ -555,8 +555,8 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
 	struct bio_vec bv;
 	struct bio_vec bv;
 	size_t alloc_size;
 	size_t alloc_size;
 	int i = 0;
 	int i = 0;
-	bool flush_bio = (bio->bi_rw & REQ_PREFLUSH);
-	bool fua_bio = (bio->bi_rw & REQ_FUA);
+	bool flush_bio = (bio->bi_opf & REQ_PREFLUSH);
+	bool fua_bio = (bio->bi_opf & REQ_FUA);
 	bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
 	bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD);
 
 
 	pb->block = NULL;
 	pb->block = NULL;

+ 1 - 1
drivers/md/dm-mpath.c

@@ -661,7 +661,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
 
 
 	bio->bi_error = 0;
 	bio->bi_error = 0;
 	bio->bi_bdev = pgpath->path.dev->bdev;
 	bio->bi_bdev = pgpath->path.dev->bdev;
-	bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+	bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 
 
 	if (pgpath->pg->ps.type->start_io)
 	if (pgpath->pg->ps.type->start_io)
 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
 		pgpath->pg->ps.type->start_io(&pgpath->pg->ps,

+ 5 - 5
drivers/md/dm-raid1.c

@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
 	struct mirror *m;
 	struct mirror *m;
 	struct dm_io_request io_req = {
 	struct dm_io_request io_req = {
 		.bi_op = REQ_OP_WRITE,
 		.bi_op = REQ_OP_WRITE,
-		.bi_op_flags = bio->bi_rw & WRITE_FLUSH_FUA,
+		.bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
 		.mem.type = DM_IO_BIO,
 		.mem.type = DM_IO_BIO,
 		.mem.ptr.bio = bio,
 		.mem.ptr.bio = bio,
 		.notify.fn = write_callback,
 		.notify.fn = write_callback,
@@ -704,7 +704,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
 	bio_list_init(&requeue);
 	bio_list_init(&requeue);
 
 
 	while ((bio = bio_list_pop(writes))) {
 	while ((bio = bio_list_pop(writes))) {
-		if ((bio->bi_rw & REQ_PREFLUSH) ||
+		if ((bio->bi_opf & REQ_PREFLUSH) ||
 		    (bio_op(bio) == REQ_OP_DISCARD)) {
 		    (bio_op(bio) == REQ_OP_DISCARD)) {
 			bio_list_add(&sync, bio);
 			bio_list_add(&sync, bio);
 			continue;
 			continue;
@@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 	 * If region is not in-sync queue the bio.
 	 * If region is not in-sync queue the bio.
 	 */
 	 */
 	if (!r || (r == -EWOULDBLOCK)) {
 	if (!r || (r == -EWOULDBLOCK)) {
-		if (bio->bi_rw & REQ_RAHEAD)
+		if (bio->bi_opf & REQ_RAHEAD)
 			return -EWOULDBLOCK;
 			return -EWOULDBLOCK;
 
 
 		queue_bio(ms, bio, rw);
 		queue_bio(ms, bio, rw);
@@ -1253,7 +1253,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
 	 * We need to dec pending if this was a write.
 	 * We need to dec pending if this was a write.
 	 */
 	 */
 	if (rw == WRITE) {
 	if (rw == WRITE) {
-		if (!(bio->bi_rw & REQ_PREFLUSH) &&
+		if (!(bio->bi_opf & REQ_PREFLUSH) &&
 		    bio_op(bio) != REQ_OP_DISCARD)
 		    bio_op(bio) != REQ_OP_DISCARD)
 			dm_rh_dec(ms->rh, bio_record->write_region);
 			dm_rh_dec(ms->rh, bio_record->write_region);
 		return error;
 		return error;
@@ -1262,7 +1262,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
 	if (error == -EOPNOTSUPP)
 	if (error == -EOPNOTSUPP)
 		goto out;
 		goto out;
 
 
-	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+	if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
 		goto out;
 		goto out;
 
 
 	if (unlikely(error)) {
 	if (unlikely(error)) {

+ 2 - 2
drivers/md/dm-region-hash.c

@@ -398,7 +398,7 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
 	region_t region = dm_rh_bio_to_region(rh, bio);
 	region_t region = dm_rh_bio_to_region(rh, bio);
 	int recovering = 0;
 	int recovering = 0;
 
 
-	if (bio->bi_rw & REQ_PREFLUSH) {
+	if (bio->bi_opf & REQ_PREFLUSH) {
 		rh->flush_failure = 1;
 		rh->flush_failure = 1;
 		return;
 		return;
 	}
 	}
@@ -526,7 +526,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
 	struct bio *bio;
 	struct bio *bio;
 
 
 	for (bio = bios->head; bio; bio = bio->bi_next) {
 	for (bio = bios->head; bio; bio = bio->bi_next) {
-		if (bio->bi_rw & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
+		if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD)
 			continue;
 			continue;
 		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
 		rh_inc(rh, dm_rh_bio_to_region(rh, bio));
 	}
 	}

+ 3 - 3
drivers/md/dm-snap.c

@@ -1680,7 +1680,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
 
 	init_tracked_chunk(bio);
 	init_tracked_chunk(bio);
 
 
-	if (bio->bi_rw & REQ_PREFLUSH) {
+	if (bio->bi_opf & REQ_PREFLUSH) {
 		bio->bi_bdev = s->cow->bdev;
 		bio->bi_bdev = s->cow->bdev;
 		return DM_MAPIO_REMAPPED;
 		return DM_MAPIO_REMAPPED;
 	}
 	}
@@ -1800,7 +1800,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
 
 
 	init_tracked_chunk(bio);
 	init_tracked_chunk(bio);
 
 
-	if (bio->bi_rw & REQ_PREFLUSH) {
+	if (bio->bi_opf & REQ_PREFLUSH) {
 		if (!dm_bio_get_target_bio_nr(bio))
 		if (!dm_bio_get_target_bio_nr(bio))
 			bio->bi_bdev = s->origin->bdev;
 			bio->bi_bdev = s->origin->bdev;
 		else
 		else
@@ -2286,7 +2286,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
 
 
 	bio->bi_bdev = o->dev->bdev;
 	bio->bi_bdev = o->dev->bdev;
 
 
-	if (unlikely(bio->bi_rw & REQ_PREFLUSH))
+	if (unlikely(bio->bi_opf & REQ_PREFLUSH))
 		return DM_MAPIO_REMAPPED;
 		return DM_MAPIO_REMAPPED;
 
 
 	if (bio_data_dir(bio) != WRITE)
 	if (bio_data_dir(bio) != WRITE)

+ 2 - 2
drivers/md/dm-stripe.c

@@ -286,7 +286,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
 	uint32_t stripe;
 	uint32_t stripe;
 	unsigned target_bio_nr;
 	unsigned target_bio_nr;
 
 
-	if (bio->bi_rw & REQ_PREFLUSH) {
+	if (bio->bi_opf & REQ_PREFLUSH) {
 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
 		target_bio_nr = dm_bio_get_target_bio_nr(bio);
 		BUG_ON(target_bio_nr >= sc->stripes);
 		BUG_ON(target_bio_nr >= sc->stripes);
 		bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
 		bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
@@ -383,7 +383,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
 	if (!error)
 	if (!error)
 		return 0; /* I/O complete */
 		return 0; /* I/O complete */
 
 
-	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
+	if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
 		return error;
 		return error;
 
 
 	if (error == -EOPNOTSUPP)
 	if (error == -EOPNOTSUPP)

+ 4 - 4
drivers/md/dm-thin.c

@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 
 
 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 {
 {
-	return (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
+	return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
 		dm_thin_changed_this_transaction(tc->td);
 		dm_thin_changed_this_transaction(tc->td);
 }
 }
 
 
@@ -870,7 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
 	struct bio *bio;
 	struct bio *bio;
 
 
 	while ((bio = bio_list_pop(&cell->bios))) {
 	while ((bio = bio_list_pop(&cell->bios))) {
-		if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+		if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
 		    bio_op(bio) == REQ_OP_DISCARD)
 		    bio_op(bio) == REQ_OP_DISCARD)
 			bio_list_add(&info->defer_bios, bio);
 			bio_list_add(&info->defer_bios, bio);
 		else {
 		else {
@@ -1717,7 +1717,7 @@ static void __remap_and_issue_shared_cell(void *context,
 
 
 	while ((bio = bio_list_pop(&cell->bios))) {
 	while ((bio = bio_list_pop(&cell->bios))) {
 		if ((bio_data_dir(bio) == WRITE) ||
 		if ((bio_data_dir(bio) == WRITE) ||
-		    (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+		    (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
 		     bio_op(bio) == REQ_OP_DISCARD))
 		     bio_op(bio) == REQ_OP_DISCARD))
 			bio_list_add(&info->defer_bios, bio);
 			bio_list_add(&info->defer_bios, bio);
 		else {
 		else {
@@ -2635,7 +2635,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
 		return DM_MAPIO_SUBMITTED;
 		return DM_MAPIO_SUBMITTED;
 	}
 	}
 
 
-	if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA) ||
+	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) ||
 	    bio_op(bio) == REQ_OP_DISCARD) {
 	    bio_op(bio) == REQ_OP_DISCARD) {
 		thin_defer_bio_with_throttle(tc, bio);
 		thin_defer_bio_with_throttle(tc, bio);
 		return DM_MAPIO_SUBMITTED;
 		return DM_MAPIO_SUBMITTED;

+ 1 - 1
drivers/md/dm-zero.c

@@ -37,7 +37,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
 {
 {
 	switch (bio_op(bio)) {
 	switch (bio_op(bio)) {
 	case REQ_OP_READ:
 	case REQ_OP_READ:
-		if (bio->bi_rw & REQ_RAHEAD) {
+		if (bio->bi_opf & REQ_RAHEAD) {
 			/* readahead of null bytes only wastes buffer cache */
 			/* readahead of null bytes only wastes buffer cache */
 			return -EIO;
 			return -EIO;
 		}
 		}

+ 5 - 5
drivers/md/dm.c

@@ -798,12 +798,12 @@ static void dec_pending(struct dm_io *io, int error)
 		if (io_error == DM_ENDIO_REQUEUE)
 		if (io_error == DM_ENDIO_REQUEUE)
 			return;
 			return;
 
 
-		if ((bio->bi_rw & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
+		if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
 			/*
 			/*
 			 * Preflush done for flush with data, reissue
 			 * Preflush done for flush with data, reissue
 			 * without REQ_PREFLUSH.
 			 * without REQ_PREFLUSH.
 			 */
 			 */
-			bio->bi_rw &= ~REQ_PREFLUSH;
+			bio->bi_opf &= ~REQ_PREFLUSH;
 			queue_io(md, bio);
 			queue_io(md, bio);
 		} else {
 		} else {
 			/* done with normal IO or empty flush */
 			/* done with normal IO or empty flush */
@@ -964,7 +964,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 {
 {
 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
 	unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
-	BUG_ON(bio->bi_rw & REQ_PREFLUSH);
+	BUG_ON(bio->bi_opf & REQ_PREFLUSH);
 	BUG_ON(bi_size > *tio->len_ptr);
 	BUG_ON(bi_size > *tio->len_ptr);
 	BUG_ON(n_sectors > bi_size);
 	BUG_ON(n_sectors > bi_size);
 	*tio->len_ptr -= bi_size - n_sectors;
 	*tio->len_ptr -= bi_size - n_sectors;
@@ -1252,7 +1252,7 @@ static void __split_and_process_bio(struct mapped_device *md,
 
 
 	start_io_acct(ci.io);
 	start_io_acct(ci.io);
 
 
-	if (bio->bi_rw & REQ_PREFLUSH) {
+	if (bio->bi_opf & REQ_PREFLUSH) {
 		ci.bio = &ci.md->flush_bio;
 		ci.bio = &ci.md->flush_bio;
 		ci.sector_count = 0;
 		ci.sector_count = 0;
 		error = __send_empty_flush(&ci);
 		error = __send_empty_flush(&ci);
@@ -1290,7 +1290,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
 	if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
 		dm_put_live_table(md, srcu_idx);
 		dm_put_live_table(md, srcu_idx);
 
 
-		if (!(bio->bi_rw & REQ_RAHEAD))
+		if (!(bio->bi_opf & REQ_RAHEAD))
 			queue_io(md, bio);
 			queue_io(md, bio);
 		else
 		else
 			bio_io_error(bio);
 			bio_io_error(bio);

+ 1 - 1
drivers/md/linear.c

@@ -221,7 +221,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
 	struct bio *split;
 	struct bio *split;
 	sector_t start_sector, end_sector, data_offset;
 	sector_t start_sector, end_sector, data_offset;
 
 
-	if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
 		md_flush_request(mddev, bio);
 		md_flush_request(mddev, bio);
 		return;
 		return;
 	}
 	}

+ 2 - 2
drivers/md/md.c

@@ -285,7 +285,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
 	 */
 	 */
 	sectors = bio_sectors(bio);
 	sectors = bio_sectors(bio);
 	/* bio could be mergeable after passing to underlayer */
 	/* bio could be mergeable after passing to underlayer */
-	bio->bi_rw &= ~REQ_NOMERGE;
+	bio->bi_opf &= ~REQ_NOMERGE;
 	mddev->pers->make_request(mddev, bio);
 	mddev->pers->make_request(mddev, bio);
 
 
 	cpu = part_stat_lock();
 	cpu = part_stat_lock();
@@ -414,7 +414,7 @@ static void md_submit_flush_data(struct work_struct *ws)
 		/* an empty barrier - all done */
 		/* an empty barrier - all done */
 		bio_endio(bio);
 		bio_endio(bio);
 	else {
 	else {
-		bio->bi_rw &= ~REQ_PREFLUSH;
+		bio->bi_opf &= ~REQ_PREFLUSH;
 		mddev->pers->make_request(mddev, bio);
 		mddev->pers->make_request(mddev, bio);
 	}
 	}
 
 

+ 4 - 4
drivers/md/multipath.c

@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio)
 
 
 	if (!bio->bi_error)
 	if (!bio->bi_error)
 		multipath_end_bh_io(mp_bh, 0);
 		multipath_end_bh_io(mp_bh, 0);
-	else if (!(bio->bi_rw & REQ_RAHEAD)) {
+	else if (!(bio->bi_opf & REQ_RAHEAD)) {
 		/*
 		/*
 		 * oops, IO error:
 		 * oops, IO error:
 		 */
 		 */
@@ -112,7 +112,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
 	struct multipath_bh * mp_bh;
 	struct multipath_bh * mp_bh;
 	struct multipath_info *multipath;
 	struct multipath_info *multipath;
 
 
-	if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
 		md_flush_request(mddev, bio);
 		md_flush_request(mddev, bio);
 		return;
 		return;
 	}
 	}
@@ -135,7 +135,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
 
 
 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
-	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+	mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
 	mp_bh->bio.bi_end_io = multipath_end_request;
 	mp_bh->bio.bi_end_io = multipath_end_request;
 	mp_bh->bio.bi_private = mp_bh;
 	mp_bh->bio.bi_private = mp_bh;
 	generic_make_request(&mp_bh->bio);
 	generic_make_request(&mp_bh->bio);
@@ -360,7 +360,7 @@ static void multipathd(struct md_thread *thread)
 			bio->bi_iter.bi_sector +=
 			bio->bi_iter.bi_sector +=
 				conf->multipaths[mp_bh->path].rdev->data_offset;
 				conf->multipaths[mp_bh->path].rdev->data_offset;
 			bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
 			bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
-			bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
+			bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 			bio->bi_end_io = multipath_end_request;
 			bio->bi_end_io = multipath_end_request;
 			bio->bi_private = mp_bh;
 			bio->bi_private = mp_bh;
 			generic_make_request(bio);
 			generic_make_request(bio);

+ 1 - 1
drivers/md/raid0.c

@@ -458,7 +458,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 	struct md_rdev *tmp_dev;
 	struct md_rdev *tmp_dev;
 	struct bio *split;
 	struct bio *split;
 
 
-	if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
 		md_flush_request(mddev, bio);
 		md_flush_request(mddev, bio);
 		return;
 		return;
 	}
 	}

+ 3 - 3
drivers/md/raid1.c

@@ -1043,8 +1043,8 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 	unsigned long flags;
 	unsigned long flags;
 	const int op = bio_op(bio);
 	const int op = bio_op(bio);
 	const int rw = bio_data_dir(bio);
 	const int rw = bio_data_dir(bio);
-	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
-	const unsigned long do_flush_fua = (bio->bi_rw &
+	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+	const unsigned long do_flush_fua = (bio->bi_opf &
 						(REQ_PREFLUSH | REQ_FUA));
 						(REQ_PREFLUSH | REQ_FUA));
 	struct md_rdev *blocked_rdev;
 	struct md_rdev *blocked_rdev;
 	struct blk_plug_cb *cb;
 	struct blk_plug_cb *cb;
@@ -2318,7 +2318,7 @@ read_more:
 		raid_end_bio_io(r1_bio);
 		raid_end_bio_io(r1_bio);
 	} else {
 	} else {
 		const unsigned long do_sync
 		const unsigned long do_sync
-			= r1_bio->master_bio->bi_rw & REQ_SYNC;
+			= r1_bio->master_bio->bi_opf & REQ_SYNC;
 		if (bio) {
 		if (bio) {
 			r1_bio->bios[r1_bio->read_disk] =
 			r1_bio->bios[r1_bio->read_disk] =
 				mddev->ro ? IO_BLOCKED : NULL;
 				mddev->ro ? IO_BLOCKED : NULL;

+ 4 - 4
drivers/md/raid10.c

@@ -1054,8 +1054,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
 	int i;
 	int i;
 	const int op = bio_op(bio);
 	const int op = bio_op(bio);
 	const int rw = bio_data_dir(bio);
 	const int rw = bio_data_dir(bio);
-	const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
-	const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
+	const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+	const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
 	unsigned long flags;
 	unsigned long flags;
 	struct md_rdev *blocked_rdev;
 	struct md_rdev *blocked_rdev;
 	struct blk_plug_cb *cb;
 	struct blk_plug_cb *cb;
@@ -1440,7 +1440,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 
 
 	struct bio *split;
 	struct bio *split;
 
 
-	if (unlikely(bio->bi_rw & REQ_PREFLUSH)) {
+	if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
 		md_flush_request(mddev, bio);
 		md_flush_request(mddev, bio);
 		return;
 		return;
 	}
 	}
@@ -2533,7 +2533,7 @@ read_more:
 		return;
 		return;
 	}
 	}
 
 
-	do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
+	do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
 	slot = r10_bio->read_slot;
 	slot = r10_bio->read_slot;
 	printk_ratelimited(
 	printk_ratelimited(
 		KERN_ERR
 		KERN_ERR

+ 1 - 1
drivers/md/raid5-cache.c

@@ -536,7 +536,7 @@ int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
 		bio_endio(bio);
 		bio_endio(bio);
 		return 0;
 		return 0;
 	}
 	}
-	bio->bi_rw &= ~REQ_PREFLUSH;
+	bio->bi_opf &= ~REQ_PREFLUSH;
 	return -EAGAIN;
 	return -EAGAIN;
 }
 }
 
 

+ 10 - 10
drivers/md/raid5.c

@@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
 	dd_idx = 0;
 	dd_idx = 0;
 	while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
 	while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
 		dd_idx++;
 		dd_idx++;
-	if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
+	if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
 	    bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
 	    bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
 		goto unlock_out;
 		goto unlock_out;
 
 
@@ -1003,7 +1003,7 @@ again:
 
 
 			pr_debug("%s: for %llu schedule op %d on disc %d\n",
 			pr_debug("%s: for %llu schedule op %d on disc %d\n",
 				__func__, (unsigned long long)sh->sector,
 				__func__, (unsigned long long)sh->sector,
-				bi->bi_rw, i);
+				bi->bi_opf, i);
 			atomic_inc(&sh->count);
 			atomic_inc(&sh->count);
 			if (sh != head_sh)
 			if (sh != head_sh)
 				atomic_inc(&head_sh->count);
 				atomic_inc(&head_sh->count);
@@ -1014,7 +1014,7 @@ again:
 				bi->bi_iter.bi_sector = (sh->sector
 				bi->bi_iter.bi_sector = (sh->sector
 						 + rdev->data_offset);
 						 + rdev->data_offset);
 			if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
 			if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
-				bi->bi_rw |= REQ_NOMERGE;
+				bi->bi_opf |= REQ_NOMERGE;
 
 
 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
 			if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
 				WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
@@ -1055,7 +1055,7 @@ again:
 			pr_debug("%s: for %llu schedule op %d on "
 			pr_debug("%s: for %llu schedule op %d on "
 				 "replacement disc %d\n",
 				 "replacement disc %d\n",
 				__func__, (unsigned long long)sh->sector,
 				__func__, (unsigned long long)sh->sector,
-				rbi->bi_rw, i);
+				rbi->bi_opf, i);
 			atomic_inc(&sh->count);
 			atomic_inc(&sh->count);
 			if (sh != head_sh)
 			if (sh != head_sh)
 				atomic_inc(&head_sh->count);
 				atomic_inc(&head_sh->count);
@@ -1088,7 +1088,7 @@ again:
 			if (op_is_write(op))
 			if (op_is_write(op))
 				set_bit(STRIPE_DEGRADED, &sh->state);
 				set_bit(STRIPE_DEGRADED, &sh->state);
 			pr_debug("skip op %d on disc %d for sector %llu\n",
 			pr_debug("skip op %d on disc %d for sector %llu\n",
-				bi->bi_rw, i, (unsigned long long)sh->sector);
+				bi->bi_opf, i, (unsigned long long)sh->sector);
 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
 			clear_bit(R5_LOCKED, &sh->dev[i].flags);
 			set_bit(STRIPE_HANDLE, &sh->state);
 			set_bit(STRIPE_HANDLE, &sh->state);
 		}
 		}
@@ -1619,9 +1619,9 @@ again:
 
 
 			while (wbi && wbi->bi_iter.bi_sector <
 			while (wbi && wbi->bi_iter.bi_sector <
 				dev->sector + STRIPE_SECTORS) {
 				dev->sector + STRIPE_SECTORS) {
-				if (wbi->bi_rw & REQ_FUA)
+				if (wbi->bi_opf & REQ_FUA)
 					set_bit(R5_WantFUA, &dev->flags);
 					set_bit(R5_WantFUA, &dev->flags);
-				if (wbi->bi_rw & REQ_SYNC)
+				if (wbi->bi_opf & REQ_SYNC)
 					set_bit(R5_SyncIO, &dev->flags);
 					set_bit(R5_SyncIO, &dev->flags);
 				if (bio_op(wbi) == REQ_OP_DISCARD)
 				if (bio_op(wbi) == REQ_OP_DISCARD)
 					set_bit(R5_Discard, &dev->flags);
 					set_bit(R5_Discard, &dev->flags);
@@ -5154,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 	DEFINE_WAIT(w);
 	DEFINE_WAIT(w);
 	bool do_prepare;
 	bool do_prepare;
 
 
-	if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
+	if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
 		int ret = r5l_handle_flush_request(conf->log, bi);
 		int ret = r5l_handle_flush_request(conf->log, bi);
 
 
 		if (ret == 0)
 		if (ret == 0)
@@ -5237,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 			(unsigned long long)logical_sector);
 			(unsigned long long)logical_sector);
 
 
 		sh = raid5_get_active_stripe(conf, new_sector, previous,
 		sh = raid5_get_active_stripe(conf, new_sector, previous,
-				       (bi->bi_rw & REQ_RAHEAD), 0);
+				       (bi->bi_opf & REQ_RAHEAD), 0);
 		if (sh) {
 		if (sh) {
 			if (unlikely(previous)) {
 			if (unlikely(previous)) {
 				/* expansion might have moved on while waiting for a
 				/* expansion might have moved on while waiting for a
@@ -5305,7 +5305,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 			set_bit(STRIPE_HANDLE, &sh->state);
 			set_bit(STRIPE_HANDLE, &sh->state);
 			clear_bit(STRIPE_DELAYED, &sh->state);
 			clear_bit(STRIPE_DELAYED, &sh->state);
 			if ((!sh->batch_head || sh == sh->batch_head) &&
 			if ((!sh->batch_head || sh == sh->batch_head) &&
-			    (bi->bi_rw & REQ_SYNC) &&
+			    (bi->bi_opf & REQ_SYNC) &&
 			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 			    !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
 				atomic_inc(&conf->preread_active_stripes);
 				atomic_inc(&conf->preread_active_stripes);
 			release_stripe_plug(mddev, sh);
 			release_stripe_plug(mddev, sh);

+ 6 - 6
drivers/nvdimm/btt.c

@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
 
 
 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
 			struct page *page, unsigned int len, unsigned int off,
 			struct page *page, unsigned int len, unsigned int off,
-			int op, sector_t sector)
+			bool is_write, sector_t sector)
 {
 {
 	int ret;
 	int ret;
 
 
-	if (!op_is_write(op)) {
+	if (!is_write) {
 		ret = btt_read_pg(btt, bip, page, off, sector, len);
 		ret = btt_read_pg(btt, bip, page, off, sector, len);
 		flush_dcache_page(page);
 		flush_dcache_page(page);
 	} else {
 	} else {
@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
 		BUG_ON(len % btt->sector_size);
 		BUG_ON(len % btt->sector_size);
 
 
 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
-				  bio_op(bio), iter.bi_sector);
+				  op_is_write(bio_op(bio)), iter.bi_sector);
 		if (err) {
 		if (err) {
 			dev_info(&btt->nd_btt->dev,
 			dev_info(&btt->nd_btt->dev,
 					"io error in %s sector %lld, len %d,\n",
 					"io error in %s sector %lld, len %d,\n",
@@ -1200,12 +1200,12 @@ out:
 }
 }
 
 
 static int btt_rw_page(struct block_device *bdev, sector_t sector,
 static int btt_rw_page(struct block_device *bdev, sector_t sector,
-		struct page *page, int op)
+		struct page *page, bool is_write)
 {
 {
 	struct btt *btt = bdev->bd_disk->private_data;
 	struct btt *btt = bdev->bd_disk->private_data;
 
 
-	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector);
-	page_endio(page, op, 0);
+	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
+	page_endio(page, is_write, 0);
 	return 0;
 	return 0;
 }
 }
 
 

+ 8 - 8
drivers/nvdimm/pmem.c

@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
 }
 }
 
 
 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
-			unsigned int len, unsigned int off, int op,
+			unsigned int len, unsigned int off, bool is_write,
 			sector_t sector)
 			sector_t sector)
 {
 {
 	int rc = 0;
 	int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
 	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
 		bad_pmem = true;
 		bad_pmem = true;
 
 
-	if (!op_is_write(op)) {
+	if (!is_write) {
 		if (unlikely(bad_pmem))
 		if (unlikely(bad_pmem))
 			rc = -EIO;
 			rc = -EIO;
 		else {
 		else {
@@ -128,13 +128,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 	struct pmem_device *pmem = q->queuedata;
 	struct pmem_device *pmem = q->queuedata;
 	struct nd_region *nd_region = to_region(pmem);
 	struct nd_region *nd_region = to_region(pmem);
 
 
-	if (bio->bi_rw & REQ_FLUSH)
+	if (bio->bi_opf & REQ_FLUSH)
 		nvdimm_flush(nd_region);
 		nvdimm_flush(nd_region);
 
 
 	do_acct = nd_iostat_start(bio, &start);
 	do_acct = nd_iostat_start(bio, &start);
 	bio_for_each_segment(bvec, bio, iter) {
 	bio_for_each_segment(bvec, bio, iter) {
 		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
 		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
-				bvec.bv_offset, bio_op(bio),
+				bvec.bv_offset, op_is_write(bio_op(bio)),
 				iter.bi_sector);
 				iter.bi_sector);
 		if (rc) {
 		if (rc) {
 			bio->bi_error = rc;
 			bio->bi_error = rc;
@@ -144,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 	if (do_acct)
 	if (do_acct)
 		nd_iostat_end(bio, start);
 		nd_iostat_end(bio, start);
 
 
-	if (bio->bi_rw & REQ_FUA)
+	if (bio->bi_opf & REQ_FUA)
 		nvdimm_flush(nd_region);
 		nvdimm_flush(nd_region);
 
 
 	bio_endio(bio);
 	bio_endio(bio);
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 }
 }
 
 
 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
-		       struct page *page, int op)
+		       struct page *page, bool is_write)
 {
 {
 	struct pmem_device *pmem = bdev->bd_queue->queuedata;
 	struct pmem_device *pmem = bdev->bd_queue->queuedata;
 	int rc;
 	int rc;
 
 
-	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector);
+	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
 
 
 	/*
 	/*
 	 * The ->rw_page interface is subtle and tricky.  The core
 	 * The ->rw_page interface is subtle and tricky.  The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
 	 * caused by double completion.
 	 * caused by double completion.
 	 */
 	 */
 	if (rc == 0)
 	if (rc == 0)
-		page_endio(page, op, 0);
+		page_endio(page, is_write, 0);
 
 
 	return rc;
 	return rc;
 }
 }

+ 1 - 1
drivers/target/target_core_iblock.c

@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
 	bio = bio_alloc(GFP_KERNEL, 0);
 	bio = bio_alloc(GFP_KERNEL, 0);
 	bio->bi_end_io = iblock_end_io_flush;
 	bio->bi_end_io = iblock_end_io_flush;
 	bio->bi_bdev = ib_dev->ibd_bd;
 	bio->bi_bdev = ib_dev->ibd_bd;
-	bio->bi_rw = WRITE_FLUSH;
+	bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
 	if (!immed)
 	if (!immed)
 		bio->bi_private = cmd;
 		bio->bi_private = cmd;
 	submit_bio(bio);
 	submit_bio(bio);

+ 2 - 4
fs/block_dev.c

@@ -416,8 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
 	result = blk_queue_enter(bdev->bd_queue, false);
 	result = blk_queue_enter(bdev->bd_queue, false);
 	if (result)
 	if (result)
 		return result;
 		return result;
-	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
-			      REQ_OP_READ);
+	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
 	blk_queue_exit(bdev->bd_queue);
 	blk_queue_exit(bdev->bd_queue);
 	return result;
 	return result;
 }
 }
@@ -455,8 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
 		return result;
 		return result;
 
 
 	set_page_writeback(page);
 	set_page_writeback(page);
-	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
-			      REQ_OP_WRITE);
+	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
 	if (result)
 	if (result)
 		end_page_writeback(page);
 		end_page_writeback(page);
 	else
 	else

+ 5 - 5
fs/btrfs/check-integrity.c

@@ -2945,7 +2945,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
 			printk(KERN_INFO
 			printk(KERN_INFO
 			       "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
 			       "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
 			       " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
 			       " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
-			       bio_op(bio), bio->bi_rw, bio->bi_vcnt,
+			       bio_op(bio), bio->bi_opf, bio->bi_vcnt,
 			       (unsigned long long)bio->bi_iter.bi_sector,
 			       (unsigned long long)bio->bi_iter.bi_sector,
 			       dev_bytenr, bio->bi_bdev);
 			       dev_bytenr, bio->bi_bdev);
 
 
@@ -2976,18 +2976,18 @@ static void __btrfsic_submit_bio(struct bio *bio)
 		btrfsic_process_written_block(dev_state, dev_bytenr,
 		btrfsic_process_written_block(dev_state, dev_bytenr,
 					      mapped_datav, bio->bi_vcnt,
 					      mapped_datav, bio->bi_vcnt,
 					      bio, &bio_is_patched,
 					      bio, &bio_is_patched,
-					      NULL, bio->bi_rw);
+					      NULL, bio->bi_opf);
 		while (i > 0) {
 		while (i > 0) {
 			i--;
 			i--;
 			kunmap(bio->bi_io_vec[i].bv_page);
 			kunmap(bio->bi_io_vec[i].bv_page);
 		}
 		}
 		kfree(mapped_datav);
 		kfree(mapped_datav);
-	} else if (NULL != dev_state && (bio->bi_rw & REQ_PREFLUSH)) {
+	} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
 		if (dev_state->state->print_mask &
 		if (dev_state->state->print_mask &
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
 		    BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
 			printk(KERN_INFO
 			printk(KERN_INFO
 			       "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
 			       "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
-			       bio_op(bio), bio->bi_rw, bio->bi_bdev);
+			       bio_op(bio), bio->bi_opf, bio->bi_bdev);
 		if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
 		if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
 			if ((dev_state->state->print_mask &
 			if ((dev_state->state->print_mask &
 			     (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
 			     (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -3005,7 +3005,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
 			block->never_written = 0;
 			block->never_written = 0;
 			block->iodone_w_error = 0;
 			block->iodone_w_error = 0;
 			block->flush_gen = dev_state->last_flush_gen + 1;
 			block->flush_gen = dev_state->last_flush_gen + 1;
-			block->submit_bio_bh_rw = bio->bi_rw;
+			block->submit_bio_bh_rw = bio->bi_opf;
 			block->orig_bio_bh_private = bio->bi_private;
 			block->orig_bio_bh_private = bio->bi_private;
 			block->orig_bio_bh_end_io.bio = bio->bi_end_io;
 			block->orig_bio_bh_end_io.bio = bio->bi_end_io;
 			block->next_in_same_bio = NULL;
 			block->next_in_same_bio = NULL;

+ 1 - 1
fs/btrfs/disk-io.c

@@ -870,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
 
 	atomic_inc(&fs_info->nr_async_submits);
 	atomic_inc(&fs_info->nr_async_submits);
 
 
-	if (bio->bi_rw & REQ_SYNC)
+	if (bio->bi_opf & REQ_SYNC)
 		btrfs_set_work_high_priority(&async->work);
 		btrfs_set_work_high_priority(&async->work);
 
 
 	btrfs_queue_work(fs_info->workers, &async->work);
 	btrfs_queue_work(fs_info->workers, &async->work);

+ 3 - 3
fs/btrfs/inode.c

@@ -8209,7 +8209,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
 	if (err)
 	if (err)
 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
 			   "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
-			   btrfs_ino(dip->inode), bio_op(bio), bio->bi_rw,
+			   btrfs_ino(dip->inode), bio_op(bio), bio->bi_opf,
 			   (unsigned long long)bio->bi_iter.bi_sector,
 			   (unsigned long long)bio->bi_iter.bi_sector,
 			   bio->bi_iter.bi_size, err);
 			   bio->bi_iter.bi_size, err);
 
 
@@ -8373,7 +8373,7 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
 	if (!bio)
 	if (!bio)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
+	bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
 	bio->bi_private = dip;
 	bio->bi_private = dip;
 	bio->bi_end_io = btrfs_end_dio_bio;
 	bio->bi_end_io = btrfs_end_dio_bio;
 	btrfs_io_bio(bio)->logical = file_offset;
 	btrfs_io_bio(bio)->logical = file_offset;
@@ -8411,7 +8411,7 @@ next_block:
 						  start_sector, GFP_NOFS);
 						  start_sector, GFP_NOFS);
 			if (!bio)
 			if (!bio)
 				goto out_err;
 				goto out_err;
-			bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_rw);
+			bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
 			bio->bi_private = dip;
 			bio->bi_private = dip;
 			bio->bi_end_io = btrfs_end_dio_bio;
 			bio->bi_end_io = btrfs_end_dio_bio;
 			btrfs_io_bio(bio)->logical = file_offset;
 			btrfs_io_bio(bio)->logical = file_offset;

+ 3 - 3
fs/btrfs/volumes.c

@@ -6012,7 +6012,7 @@ static void btrfs_end_bio(struct bio *bio)
 				else
 				else
 					btrfs_dev_stat_inc(dev,
 					btrfs_dev_stat_inc(dev,
 						BTRFS_DEV_STAT_READ_ERRS);
 						BTRFS_DEV_STAT_READ_ERRS);
-				if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+				if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
 					btrfs_dev_stat_inc(dev,
 					btrfs_dev_stat_inc(dev,
 						BTRFS_DEV_STAT_FLUSH_ERRS);
 						BTRFS_DEV_STAT_FLUSH_ERRS);
 				btrfs_dev_stat_print_on_error(dev);
 				btrfs_dev_stat_print_on_error(dev);
@@ -6089,7 +6089,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
 	bio->bi_next = NULL;
 	bio->bi_next = NULL;
 
 
 	spin_lock(&device->io_lock);
 	spin_lock(&device->io_lock);
-	if (bio->bi_rw & REQ_SYNC)
+	if (bio->bi_opf & REQ_SYNC)
 		pending_bios = &device->pending_sync_bios;
 		pending_bios = &device->pending_sync_bios;
 	else
 	else
 		pending_bios = &device->pending_bios;
 		pending_bios = &device->pending_bios;
@@ -6127,7 +6127,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
 		rcu_read_lock();
 		rcu_read_lock();
 		name = rcu_dereference(dev->name);
 		name = rcu_dereference(dev->name);
 		pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
 		pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
-			 "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_rw,
+			 "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf,
 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
 			 (u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
 			 name->str, dev->devid, bio->bi_iter.bi_size);
 			 name->str, dev->devid, bio->bi_iter.bi_size);
 		rcu_read_unlock();
 		rcu_read_unlock();

+ 1 - 1
fs/mpage.c

@@ -50,7 +50,7 @@ static void mpage_end_io(struct bio *bio)
 
 
 	bio_for_each_segment_all(bv, bio, i) {
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
 		struct page *page = bv->bv_page;
-		page_endio(page, bio_op(bio), bio->bi_error);
+		page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
 	}
 	}
 
 
 	bio_put(bio);
 	bio_put(bio);

+ 2 - 2
include/linux/bio.h

@@ -95,7 +95,7 @@ static inline bool bio_is_rw(struct bio *bio)
 
 
 static inline bool bio_mergeable(struct bio *bio)
 static inline bool bio_mergeable(struct bio *bio)
 {
 {
-	if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+	if (bio->bi_opf & REQ_NOMERGE_FLAGS)
 		return false;
 		return false;
 
 
 	return true;
 	return true;
@@ -318,7 +318,7 @@ struct bio_integrity_payload {
 
 
 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
 {
 {
-	if (bio->bi_rw & REQ_INTEGRITY)
+	if (bio->bi_opf & REQ_INTEGRITY)
 		return bio->bi_integrity;
 		return bio->bi_integrity;
 
 
 	return NULL;
 	return NULL;

+ 2 - 2
include/linux/blk-cgroup.h

@@ -714,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
 
 
 	if (!throtl) {
 	if (!throtl) {
 		blkg = blkg ?: q->root_blkg;
 		blkg = blkg ?: q->root_blkg;
-		blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw,
+		blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf,
 				bio->bi_iter.bi_size);
 				bio->bi_iter.bi_size);
-		blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1);
+		blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1);
 	}
 	}
 
 
 	rcu_read_unlock();
 	rcu_read_unlock();

+ 19 - 18
include/linux/blk_types.h

@@ -18,17 +18,6 @@ struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *);
 typedef void (bio_end_io_t) (struct bio *);
 typedef void (bio_destructor_t) (struct bio *);
 typedef void (bio_destructor_t) (struct bio *);
 
 
-enum req_op {
-	REQ_OP_READ,
-	REQ_OP_WRITE,
-	REQ_OP_DISCARD,		/* request to discard sectors */
-	REQ_OP_SECURE_ERASE,	/* request to securely erase sectors */
-	REQ_OP_WRITE_SAME,	/* write same block many times */
-	REQ_OP_FLUSH,		/* request for cache flush */
-};
-
-#define REQ_OP_BITS 3
-
 #ifdef CONFIG_BLOCK
 #ifdef CONFIG_BLOCK
 /*
 /*
  * main unit of I/O for the block layer and lower layers (ie drivers and
  * main unit of I/O for the block layer and lower layers (ie drivers and
@@ -38,8 +27,9 @@ struct bio {
 	struct bio		*bi_next;	/* request queue link */
 	struct bio		*bi_next;	/* request queue link */
 	struct block_device	*bi_bdev;
 	struct block_device	*bi_bdev;
 	int			bi_error;
 	int			bi_error;
-	unsigned int		bi_rw;		/* bottom bits req flags,
-						 * top bits REQ_OP
+	unsigned int		bi_opf;		/* bottom bits req flags,
+						 * top bits REQ_OP. Use
+						 * accessors.
 						 */
 						 */
 	unsigned short		bi_flags;	/* status, command, etc */
 	unsigned short		bi_flags;	/* status, command, etc */
 	unsigned short		bi_ioprio;
 	unsigned short		bi_ioprio;
@@ -100,13 +90,13 @@ struct bio {
 };
 };
 
 
 #define BIO_OP_SHIFT	(8 * sizeof(unsigned int) - REQ_OP_BITS)
 #define BIO_OP_SHIFT	(8 * sizeof(unsigned int) - REQ_OP_BITS)
-#define bio_op(bio)	((bio)->bi_rw >> BIO_OP_SHIFT)
+#define bio_op(bio)	((bio)->bi_opf >> BIO_OP_SHIFT)
 
 
 #define bio_set_op_attrs(bio, op, op_flags) do {		\
 #define bio_set_op_attrs(bio, op, op_flags) do {		\
 	WARN_ON(op >= (1 << REQ_OP_BITS));			\
 	WARN_ON(op >= (1 << REQ_OP_BITS));			\
-	(bio)->bi_rw &= ((1 << BIO_OP_SHIFT) - 1);		\
-	(bio)->bi_rw |= ((unsigned int) (op) << BIO_OP_SHIFT);	\
-	(bio)->bi_rw |= op_flags;				\
+	(bio)->bi_opf &= ((1 << BIO_OP_SHIFT) - 1);		\
+	(bio)->bi_opf |= ((unsigned int) (op) << BIO_OP_SHIFT);	\
+	(bio)->bi_opf |= op_flags;				\
 } while (0)
 } while (0)
 
 
 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
 #define BIO_RESET_BYTES		offsetof(struct bio, bi_max_vecs)
@@ -149,7 +139,7 @@ struct bio {
 
 
 /*
 /*
  * Request flags.  For use in the cmd_flags field of struct request, and in
  * Request flags.  For use in the cmd_flags field of struct request, and in
- * bi_rw of struct bio.  Note that some flags are only valid in either one.
+ * bi_opf of struct bio.  Note that some flags are only valid in either one.
  */
  */
 enum rq_flag_bits {
 enum rq_flag_bits {
 	/* common flags */
 	/* common flags */
@@ -239,6 +229,17 @@ enum rq_flag_bits {
 #define REQ_HASHED		(1ULL << __REQ_HASHED)
 #define REQ_HASHED		(1ULL << __REQ_HASHED)
 #define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT)
 #define REQ_MQ_INFLIGHT		(1ULL << __REQ_MQ_INFLIGHT)
 
 
+enum req_op {
+	REQ_OP_READ,
+	REQ_OP_WRITE,
+	REQ_OP_DISCARD,		/* request to discard sectors */
+	REQ_OP_SECURE_ERASE,	/* request to securely erase sectors */
+	REQ_OP_WRITE_SAME,	/* write same block many times */
+	REQ_OP_FLUSH,		/* request for cache flush */
+};
+
+#define REQ_OP_BITS 3
+
 typedef unsigned int blk_qc_t;
 typedef unsigned int blk_qc_t;
 #define BLK_QC_T_NONE	-1U
 #define BLK_QC_T_NONE	-1U
 #define BLK_QC_T_SHIFT	16
 #define BLK_QC_T_SHIFT	16

+ 1 - 1
include/linux/blkdev.h

@@ -1672,7 +1672,7 @@ struct blk_dax_ctl {
 struct block_device_operations {
 struct block_device_operations {
 	int (*open) (struct block_device *, fmode_t);
 	int (*open) (struct block_device *, fmode_t);
 	void (*release) (struct gendisk *, fmode_t);
 	void (*release) (struct gendisk *, fmode_t);
-	int (*rw_page)(struct block_device *, sector_t, struct page *, int op);
+	int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,

+ 1 - 2
include/linux/fs.h

@@ -2480,13 +2480,12 @@ extern void init_special_inode(struct inode *, umode_t, dev_t);
 extern void make_bad_inode(struct inode *);
 extern void make_bad_inode(struct inode *);
 extern bool is_bad_inode(struct inode *);
 extern bool is_bad_inode(struct inode *);
 
 
+#ifdef CONFIG_BLOCK
 static inline bool op_is_write(unsigned int op)
 static inline bool op_is_write(unsigned int op)
 {
 {
 	return op == REQ_OP_READ ? false : true;
 	return op == REQ_OP_READ ? false : true;
 }
 }
 
 
-#ifdef CONFIG_BLOCK
-
 /*
 /*
  * return data direction, READ or WRITE
  * return data direction, READ or WRITE
  */
  */

+ 1 - 1
include/linux/pagemap.h

@@ -510,7 +510,7 @@ static inline void wait_on_page_writeback(struct page *page)
 extern void end_page_writeback(struct page *page);
 extern void end_page_writeback(struct page *page);
 void wait_for_stable_page(struct page *page);
 void wait_for_stable_page(struct page *page);
 
 
-void page_endio(struct page *page, int op, int err);
+void page_endio(struct page *page, bool is_write, int err);
 
 
 /*
 /*
  * Add an arbitrary waiter to a page's wait queue
  * Add an arbitrary waiter to a page's wait queue

+ 4 - 4
include/trace/events/bcache.h

@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(bcache_request,
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 		__entry->orig_sector	= bio->bi_iter.bi_sector - 16;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 	),
 	),
 
 
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 	),
 	),
 
 
@@ -138,7 +138,7 @@ TRACE_EVENT(bcache_read,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 		__entry->cache_hit = hit;
 		__entry->cache_hit = hit;
 		__entry->bypass = bypass;
 		__entry->bypass = bypass;
@@ -170,7 +170,7 @@ TRACE_EVENT(bcache_write,
 		__entry->inode		= inode;
 		__entry->inode		= inode;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
 		__entry->nr_sector	= bio->bi_iter.bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 		__entry->writeback = writeback;
 		__entry->writeback = writeback;
 		__entry->bypass = bypass;
 		__entry->bypass = bypass;

+ 7 - 7
include/trace/events/block.h

@@ -274,7 +274,7 @@ TRACE_EVENT(block_bio_bounce,
 					  bio->bi_bdev->bd_dev : 0;
 					  bio->bi_bdev->bd_dev : 0;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 	),
@@ -313,7 +313,7 @@ TRACE_EVENT(block_bio_complete,
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->error		= error;
 		__entry->error		= error;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 	),
 	),
 
 
@@ -341,7 +341,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 	),
@@ -409,7 +409,7 @@ TRACE_EVENT(block_bio_queue,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->nr_sector	= bio_sectors(bio);
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 	),
@@ -439,7 +439,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
 		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
 		__entry->sector		= bio ? bio->bi_iter.bi_sector : 0;
 		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
 		__entry->nr_sector	= bio ? bio_sectors(bio) : 0;
 		blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
 		blk_fill_rwbs(__entry->rwbs, bio ? bio_op(bio) : 0,
-			      bio ? bio->bi_rw : 0, __entry->nr_sector);
+			      bio ? bio->bi_opf : 0, __entry->nr_sector);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
         ),
         ),
 
 
@@ -573,7 +573,7 @@ TRACE_EVENT(block_split,
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->dev		= bio->bi_bdev->bd_dev;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->sector		= bio->bi_iter.bi_sector;
 		__entry->new_sector	= new_sector;
 		__entry->new_sector	= new_sector;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 	),
@@ -617,7 +617,7 @@ TRACE_EVENT(block_bio_remap,
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->nr_sector	= bio_sectors(bio);
 		__entry->old_dev	= dev;
 		__entry->old_dev	= dev;
 		__entry->old_sector	= from;
 		__entry->old_sector	= from;
-		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_rw,
+		blk_fill_rwbs(__entry->rwbs, bio_op(bio), bio->bi_opf,
 			      bio->bi_iter.bi_size);
 			      bio->bi_iter.bi_size);
 	),
 	),
 
 

+ 3 - 3
kernel/trace/blktrace.c

@@ -776,7 +776,7 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
 		return;
 		return;
 
 
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-			bio_op(bio), bio->bi_rw, what, error, 0, NULL);
+			bio_op(bio), bio->bi_opf, what, error, 0, NULL);
 }
 }
 
 
 static void blk_add_trace_bio_bounce(void *ignore,
 static void blk_add_trace_bio_bounce(void *ignore,
@@ -881,7 +881,7 @@ static void blk_add_trace_split(void *ignore,
 		__be64 rpdu = cpu_to_be64(pdu);
 		__be64 rpdu = cpu_to_be64(pdu);
 
 
 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
-				bio->bi_iter.bi_size, bio_op(bio), bio->bi_rw,
+				bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
 				BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
 				BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
 				&rpdu);
 				&rpdu);
 	}
 	}
@@ -915,7 +915,7 @@ static void blk_add_trace_bio_remap(void *ignore,
 	r.sector_from = cpu_to_be64(from);
 	r.sector_from = cpu_to_be64(from);
 
 
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-			bio_op(bio), bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
+			bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error,
 			sizeof(r), &r);
 			sizeof(r), &r);
 }
 }
 
 

+ 2 - 2
mm/filemap.c

@@ -887,9 +887,9 @@ EXPORT_SYMBOL(end_page_writeback);
  * After completing I/O on a page, call this routine to update the page
  * After completing I/O on a page, call this routine to update the page
  * flags appropriately
  * flags appropriately
  */
  */
-void page_endio(struct page *page, int op, int err)
+void page_endio(struct page *page, bool is_write, int err)
 {
 {
-	if (!op_is_write(op)) {
+	if (!is_write) {
 		if (!err) {
 		if (!err) {
 			SetPageUptodate(page);
 			SetPageUptodate(page);
 		} else {
 		} else {

+ 3 - 2
mm/page_io.c

@@ -319,9 +319,10 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 		ret = -ENOMEM;
 		ret = -ENOMEM;
 		goto out;
 		goto out;
 	}
 	}
-	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 	if (wbc->sync_mode == WB_SYNC_ALL)
 	if (wbc->sync_mode == WB_SYNC_ALL)
-		bio->bi_rw |= REQ_SYNC;
+		bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC);
+	else
+		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
 	count_vm_event(PSWPOUT);
 	count_vm_event(PSWPOUT);
 	set_page_writeback(page);
 	set_page_writeback(page);
 	unlock_page(page);
 	unlock_page(page);