|
@@ -806,7 +806,7 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
|
|
dd_idx = 0;
|
|
dd_idx = 0;
|
|
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
|
|
while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx)
|
|
dd_idx++;
|
|
dd_idx++;
|
|
- if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw ||
|
|
|
|
|
|
+ if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf ||
|
|
bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
|
|
bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite))
|
|
goto unlock_out;
|
|
goto unlock_out;
|
|
|
|
|
|
@@ -1003,7 +1003,7 @@ again:
|
|
|
|
|
|
pr_debug("%s: for %llu schedule op %d on disc %d\n",
|
|
pr_debug("%s: for %llu schedule op %d on disc %d\n",
|
|
__func__, (unsigned long long)sh->sector,
|
|
__func__, (unsigned long long)sh->sector,
|
|
- bi->bi_rw, i);
|
|
|
|
|
|
+ bi->bi_opf, i);
|
|
atomic_inc(&sh->count);
|
|
atomic_inc(&sh->count);
|
|
if (sh != head_sh)
|
|
if (sh != head_sh)
|
|
atomic_inc(&head_sh->count);
|
|
atomic_inc(&head_sh->count);
|
|
@@ -1014,7 +1014,7 @@ again:
|
|
bi->bi_iter.bi_sector = (sh->sector
|
|
bi->bi_iter.bi_sector = (sh->sector
|
|
+ rdev->data_offset);
|
|
+ rdev->data_offset);
|
|
if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
|
|
if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags))
|
|
- bi->bi_rw |= REQ_NOMERGE;
|
|
|
|
|
|
+ bi->bi_opf |= REQ_NOMERGE;
|
|
|
|
|
|
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
|
|
if (test_bit(R5_SkipCopy, &sh->dev[i].flags))
|
|
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
|
|
WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags));
|
|
@@ -1055,7 +1055,7 @@ again:
|
|
pr_debug("%s: for %llu schedule op %d on "
|
|
pr_debug("%s: for %llu schedule op %d on "
|
|
"replacement disc %d\n",
|
|
"replacement disc %d\n",
|
|
__func__, (unsigned long long)sh->sector,
|
|
__func__, (unsigned long long)sh->sector,
|
|
- rbi->bi_rw, i);
|
|
|
|
|
|
+ rbi->bi_opf, i);
|
|
atomic_inc(&sh->count);
|
|
atomic_inc(&sh->count);
|
|
if (sh != head_sh)
|
|
if (sh != head_sh)
|
|
atomic_inc(&head_sh->count);
|
|
atomic_inc(&head_sh->count);
|
|
@@ -1088,7 +1088,7 @@ again:
|
|
if (op_is_write(op))
|
|
if (op_is_write(op))
|
|
set_bit(STRIPE_DEGRADED, &sh->state);
|
|
set_bit(STRIPE_DEGRADED, &sh->state);
|
|
pr_debug("skip op %d on disc %d for sector %llu\n",
|
|
pr_debug("skip op %d on disc %d for sector %llu\n",
|
|
- bi->bi_rw, i, (unsigned long long)sh->sector);
|
|
|
|
|
|
+ bi->bi_opf, i, (unsigned long long)sh->sector);
|
|
clear_bit(R5_LOCKED, &sh->dev[i].flags);
|
|
clear_bit(R5_LOCKED, &sh->dev[i].flags);
|
|
set_bit(STRIPE_HANDLE, &sh->state);
|
|
set_bit(STRIPE_HANDLE, &sh->state);
|
|
}
|
|
}
|
|
@@ -1619,9 +1619,9 @@ again:
|
|
|
|
|
|
while (wbi && wbi->bi_iter.bi_sector <
|
|
while (wbi && wbi->bi_iter.bi_sector <
|
|
dev->sector + STRIPE_SECTORS) {
|
|
dev->sector + STRIPE_SECTORS) {
|
|
- if (wbi->bi_rw & REQ_FUA)
|
|
|
|
|
|
+ if (wbi->bi_opf & REQ_FUA)
|
|
set_bit(R5_WantFUA, &dev->flags);
|
|
set_bit(R5_WantFUA, &dev->flags);
|
|
- if (wbi->bi_rw & REQ_SYNC)
|
|
|
|
|
|
+ if (wbi->bi_opf & REQ_SYNC)
|
|
set_bit(R5_SyncIO, &dev->flags);
|
|
set_bit(R5_SyncIO, &dev->flags);
|
|
if (bio_op(wbi) == REQ_OP_DISCARD)
|
|
if (bio_op(wbi) == REQ_OP_DISCARD)
|
|
set_bit(R5_Discard, &dev->flags);
|
|
set_bit(R5_Discard, &dev->flags);
|
|
@@ -5154,7 +5154,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|
DEFINE_WAIT(w);
|
|
DEFINE_WAIT(w);
|
|
bool do_prepare;
|
|
bool do_prepare;
|
|
|
|
|
|
- if (unlikely(bi->bi_rw & REQ_PREFLUSH)) {
|
|
|
|
|
|
+ if (unlikely(bi->bi_opf & REQ_PREFLUSH)) {
|
|
int ret = r5l_handle_flush_request(conf->log, bi);
|
|
int ret = r5l_handle_flush_request(conf->log, bi);
|
|
|
|
|
|
if (ret == 0)
|
|
if (ret == 0)
|
|
@@ -5237,7 +5237,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|
(unsigned long long)logical_sector);
|
|
(unsigned long long)logical_sector);
|
|
|
|
|
|
sh = raid5_get_active_stripe(conf, new_sector, previous,
|
|
sh = raid5_get_active_stripe(conf, new_sector, previous,
|
|
- (bi->bi_rw & REQ_RAHEAD), 0);
|
|
|
|
|
|
+ (bi->bi_opf & REQ_RAHEAD), 0);
|
|
if (sh) {
|
|
if (sh) {
|
|
if (unlikely(previous)) {
|
|
if (unlikely(previous)) {
|
|
/* expansion might have moved on while waiting for a
|
|
/* expansion might have moved on while waiting for a
|
|
@@ -5305,7 +5305,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
|
|
set_bit(STRIPE_HANDLE, &sh->state);
|
|
set_bit(STRIPE_HANDLE, &sh->state);
|
|
clear_bit(STRIPE_DELAYED, &sh->state);
|
|
clear_bit(STRIPE_DELAYED, &sh->state);
|
|
if ((!sh->batch_head || sh == sh->batch_head) &&
|
|
if ((!sh->batch_head || sh == sh->batch_head) &&
|
|
- (bi->bi_rw & REQ_SYNC) &&
|
|
|
|
|
|
+ (bi->bi_opf & REQ_SYNC) &&
|
|
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
|
|
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
|
|
atomic_inc(&conf->preread_active_stripes);
|
|
atomic_inc(&conf->preread_active_stripes);
|
|
release_stripe_plug(mddev, sh);
|
|
release_stripe_plug(mddev, sh);
|