|
@@ -404,8 +404,7 @@ static void raid10_end_read_request(struct bio *bio)
|
|
* oops, read error - keep the refcount on the rdev
|
|
* oops, read error - keep the refcount on the rdev
|
|
*/
|
|
*/
|
|
char b[BDEVNAME_SIZE];
|
|
char b[BDEVNAME_SIZE];
|
|
- printk_ratelimited(KERN_ERR
|
|
|
|
- "md/raid10:%s: %s: rescheduling sector %llu\n",
|
|
|
|
|
|
+ pr_err_ratelimited("md/raid10:%s: %s: rescheduling sector %llu\n",
|
|
mdname(conf->mddev),
|
|
mdname(conf->mddev),
|
|
bdevname(rdev->bdev, b),
|
|
bdevname(rdev->bdev, b),
|
|
(unsigned long long)r10_bio->sector);
|
|
(unsigned long long)r10_bio->sector);
|
|
@@ -1589,11 +1588,10 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
|
|
set_mask_bits(&mddev->flags, 0,
|
|
set_mask_bits(&mddev->flags, 0,
|
|
BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
|
|
BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
- printk(KERN_ALERT
|
|
|
|
- "md/raid10:%s: Disk failure on %s, disabling device.\n"
|
|
|
|
- "md/raid10:%s: Operation continuing on %d devices.\n",
|
|
|
|
- mdname(mddev), bdevname(rdev->bdev, b),
|
|
|
|
- mdname(mddev), conf->geo.raid_disks - mddev->degraded);
|
|
|
|
|
|
+ pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n"
|
|
|
|
+ "md/raid10:%s: Operation continuing on %d devices.\n",
|
|
|
|
+ mdname(mddev), bdevname(rdev->bdev, b),
|
|
|
|
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
|
|
}
|
|
}
|
|
|
|
|
|
static void print_conf(struct r10conf *conf)
|
|
static void print_conf(struct r10conf *conf)
|
|
@@ -1601,13 +1599,13 @@ static void print_conf(struct r10conf *conf)
|
|
int i;
|
|
int i;
|
|
struct md_rdev *rdev;
|
|
struct md_rdev *rdev;
|
|
|
|
|
|
- printk(KERN_DEBUG "RAID10 conf printout:\n");
|
|
|
|
|
|
+ pr_debug("RAID10 conf printout:\n");
|
|
if (!conf) {
|
|
if (!conf) {
|
|
- printk(KERN_DEBUG "(!conf)\n");
|
|
|
|
|
|
+ pr_debug("(!conf)\n");
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
|
|
|
|
- conf->geo.raid_disks);
|
|
|
|
|
|
+ pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
|
|
|
|
+ conf->geo.raid_disks);
|
|
|
|
|
|
/* This is only called with ->reconfix_mutex held, so
|
|
/* This is only called with ->reconfix_mutex held, so
|
|
* rcu protection of rdev is not needed */
|
|
* rcu protection of rdev is not needed */
|
|
@@ -1615,10 +1613,10 @@ static void print_conf(struct r10conf *conf)
|
|
char b[BDEVNAME_SIZE];
|
|
char b[BDEVNAME_SIZE];
|
|
rdev = conf->mirrors[i].rdev;
|
|
rdev = conf->mirrors[i].rdev;
|
|
if (rdev)
|
|
if (rdev)
|
|
- printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
|
|
|
|
- i, !test_bit(In_sync, &rdev->flags),
|
|
|
|
- !test_bit(Faulty, &rdev->flags),
|
|
|
|
- bdevname(rdev->bdev,b));
|
|
|
|
|
|
+ pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
|
|
|
|
+ i, !test_bit(In_sync, &rdev->flags),
|
|
|
|
+ !test_bit(Faulty, &rdev->flags),
|
|
|
|
+ bdevname(rdev->bdev,b));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2109,10 +2107,8 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
|
|
ok = rdev_set_badblocks(rdev2, addr, s, 0);
|
|
ok = rdev_set_badblocks(rdev2, addr, s, 0);
|
|
if (!ok) {
|
|
if (!ok) {
|
|
/* just abort the recovery */
|
|
/* just abort the recovery */
|
|
- printk(KERN_NOTICE
|
|
|
|
- "md/raid10:%s: recovery aborted"
|
|
|
|
- " due to read error\n",
|
|
|
|
- mdname(mddev));
|
|
|
|
|
|
+ pr_notice("md/raid10:%s: recovery aborted due to read error\n",
|
|
|
|
+ mdname(mddev));
|
|
|
|
|
|
conf->mirrors[dw].recovery_disabled
|
|
conf->mirrors[dw].recovery_disabled
|
|
= mddev->recovery_disabled;
|
|
= mddev->recovery_disabled;
|
|
@@ -2259,14 +2255,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
char b[BDEVNAME_SIZE];
|
|
char b[BDEVNAME_SIZE];
|
|
bdevname(rdev->bdev, b);
|
|
bdevname(rdev->bdev, b);
|
|
|
|
|
|
- printk(KERN_NOTICE
|
|
|
|
- "md/raid10:%s: %s: Raid device exceeded "
|
|
|
|
- "read_error threshold [cur %d:max %d]\n",
|
|
|
|
- mdname(mddev), b,
|
|
|
|
- atomic_read(&rdev->read_errors), max_read_errors);
|
|
|
|
- printk(KERN_NOTICE
|
|
|
|
- "md/raid10:%s: %s: Failing raid device\n",
|
|
|
|
- mdname(mddev), b);
|
|
|
|
|
|
+ pr_notice("md/raid10:%s: %s: Raid device exceeded read_error threshold [cur %d:max %d]\n",
|
|
|
|
+ mdname(mddev), b,
|
|
|
|
+ atomic_read(&rdev->read_errors), max_read_errors);
|
|
|
|
+ pr_notice("md/raid10:%s: %s: Failing raid device\n",
|
|
|
|
+ mdname(mddev), b);
|
|
md_error(mddev, rdev);
|
|
md_error(mddev, rdev);
|
|
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
|
|
r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
|
|
return;
|
|
return;
|
|
@@ -2356,20 +2349,16 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
s, conf->tmppage, WRITE)
|
|
s, conf->tmppage, WRITE)
|
|
== 0) {
|
|
== 0) {
|
|
/* Well, this device is dead */
|
|
/* Well, this device is dead */
|
|
- printk(KERN_NOTICE
|
|
|
|
- "md/raid10:%s: read correction "
|
|
|
|
- "write failed"
|
|
|
|
- " (%d sectors at %llu on %s)\n",
|
|
|
|
- mdname(mddev), s,
|
|
|
|
- (unsigned long long)(
|
|
|
|
- sect +
|
|
|
|
- choose_data_offset(r10_bio,
|
|
|
|
- rdev)),
|
|
|
|
- bdevname(rdev->bdev, b));
|
|
|
|
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
|
|
|
|
- "drive\n",
|
|
|
|
- mdname(mddev),
|
|
|
|
- bdevname(rdev->bdev, b));
|
|
|
|
|
|
+ pr_notice("md/raid10:%s: read correction write failed (%d sectors at %llu on %s)\n",
|
|
|
|
+ mdname(mddev), s,
|
|
|
|
+ (unsigned long long)(
|
|
|
|
+ sect +
|
|
|
|
+ choose_data_offset(r10_bio,
|
|
|
|
+ rdev)),
|
|
|
|
+ bdevname(rdev->bdev, b));
|
|
|
|
+ pr_notice("md/raid10:%s: %s: failing drive\n",
|
|
|
|
+ mdname(mddev),
|
|
|
|
+ bdevname(rdev->bdev, b));
|
|
}
|
|
}
|
|
rdev_dec_pending(rdev, mddev);
|
|
rdev_dec_pending(rdev, mddev);
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
@@ -2397,24 +2386,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
READ)) {
|
|
READ)) {
|
|
case 0:
|
|
case 0:
|
|
/* Well, this device is dead */
|
|
/* Well, this device is dead */
|
|
- printk(KERN_NOTICE
|
|
|
|
- "md/raid10:%s: unable to read back "
|
|
|
|
- "corrected sectors"
|
|
|
|
- " (%d sectors at %llu on %s)\n",
|
|
|
|
|
|
+ pr_notice("md/raid10:%s: unable to read back corrected sectors (%d sectors at %llu on %s)\n",
|
|
mdname(mddev), s,
|
|
mdname(mddev), s,
|
|
(unsigned long long)(
|
|
(unsigned long long)(
|
|
sect +
|
|
sect +
|
|
choose_data_offset(r10_bio, rdev)),
|
|
choose_data_offset(r10_bio, rdev)),
|
|
bdevname(rdev->bdev, b));
|
|
bdevname(rdev->bdev, b));
|
|
- printk(KERN_NOTICE "md/raid10:%s: %s: failing "
|
|
|
|
- "drive\n",
|
|
|
|
|
|
+ pr_notice("md/raid10:%s: %s: failing drive\n",
|
|
mdname(mddev),
|
|
mdname(mddev),
|
|
bdevname(rdev->bdev, b));
|
|
bdevname(rdev->bdev, b));
|
|
break;
|
|
break;
|
|
case 1:
|
|
case 1:
|
|
- printk(KERN_INFO
|
|
|
|
- "md/raid10:%s: read error corrected"
|
|
|
|
- " (%d sectors at %llu on %s)\n",
|
|
|
|
|
|
+ pr_info("md/raid10:%s: read error corrected (%d sectors at %llu on %s)\n",
|
|
mdname(mddev), s,
|
|
mdname(mddev), s,
|
|
(unsigned long long)(
|
|
(unsigned long long)(
|
|
sect +
|
|
sect +
|
|
@@ -2529,23 +2512,19 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
|
|
read_more:
|
|
read_more:
|
|
rdev = read_balance(conf, r10_bio, &max_sectors);
|
|
rdev = read_balance(conf, r10_bio, &max_sectors);
|
|
if (rdev == NULL) {
|
|
if (rdev == NULL) {
|
|
- printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
|
|
|
|
- " read error for block %llu\n",
|
|
|
|
- mdname(mddev), b,
|
|
|
|
- (unsigned long long)r10_bio->sector);
|
|
|
|
|
|
+ pr_crit_ratelimited("md/raid10:%s: %s: unrecoverable I/O read error for block %llu\n",
|
|
|
|
+ mdname(mddev), b,
|
|
|
|
+ (unsigned long long)r10_bio->sector);
|
|
raid_end_bio_io(r10_bio);
|
|
raid_end_bio_io(r10_bio);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
|
|
do_sync = (r10_bio->master_bio->bi_opf & REQ_SYNC);
|
|
slot = r10_bio->read_slot;
|
|
slot = r10_bio->read_slot;
|
|
- printk_ratelimited(
|
|
|
|
- KERN_ERR
|
|
|
|
- "md/raid10:%s: %s: redirecting "
|
|
|
|
- "sector %llu to another mirror\n",
|
|
|
|
- mdname(mddev),
|
|
|
|
- bdevname(rdev->bdev, b),
|
|
|
|
- (unsigned long long)r10_bio->sector);
|
|
|
|
|
|
+ pr_err_ratelimited("md/raid10:%s: %s: redirecting sector %llu to another mirror\n",
|
|
|
|
+ mdname(mddev),
|
|
|
|
+ bdevname(rdev->bdev, b),
|
|
|
|
+ (unsigned long long)r10_bio->sector);
|
|
bio = bio_clone_mddev(r10_bio->master_bio,
|
|
bio = bio_clone_mddev(r10_bio->master_bio,
|
|
GFP_NOIO, mddev);
|
|
GFP_NOIO, mddev);
|
|
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
|
|
bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
|
|
@@ -3160,8 +3139,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
if (!any_working) {
|
|
if (!any_working) {
|
|
if (!test_and_set_bit(MD_RECOVERY_INTR,
|
|
if (!test_and_set_bit(MD_RECOVERY_INTR,
|
|
&mddev->recovery))
|
|
&mddev->recovery))
|
|
- printk(KERN_INFO "md/raid10:%s: insufficient "
|
|
|
|
- "working devices for recovery.\n",
|
|
|
|
|
|
+ pr_warn("md/raid10:%s: insufficient working devices for recovery.\n",
|
|
mdname(mddev));
|
|
mdname(mddev));
|
|
mirror->recovery_disabled
|
|
mirror->recovery_disabled
|
|
= mddev->recovery_disabled;
|
|
= mddev->recovery_disabled;
|
|
@@ -3489,15 +3467,14 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
|
copies = setup_geo(&geo, mddev, geo_new);
|
|
copies = setup_geo(&geo, mddev, geo_new);
|
|
|
|
|
|
if (copies == -2) {
|
|
if (copies == -2) {
|
|
- printk(KERN_ERR "md/raid10:%s: chunk size must be "
|
|
|
|
- "at least PAGE_SIZE(%ld) and be a power of 2.\n",
|
|
|
|
- mdname(mddev), PAGE_SIZE);
|
|
|
|
|
|
+ pr_warn("md/raid10:%s: chunk size must be at least PAGE_SIZE(%ld) and be a power of 2.\n",
|
|
|
|
+ mdname(mddev), PAGE_SIZE);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
if (copies < 2 || copies > mddev->raid_disks) {
|
|
if (copies < 2 || copies > mddev->raid_disks) {
|
|
- printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
|
|
|
|
- mdname(mddev), mddev->new_layout);
|
|
|
|
|
|
+ pr_warn("md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
|
|
|
|
+ mdname(mddev), mddev->new_layout);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3557,9 +3534,6 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
|
return conf;
|
|
return conf;
|
|
|
|
|
|
out:
|
|
out:
|
|
- if (err == -ENOMEM)
|
|
|
|
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
|
|
|
|
- mdname(mddev));
|
|
|
|
if (conf) {
|
|
if (conf) {
|
|
mempool_destroy(conf->r10bio_pool);
|
|
mempool_destroy(conf->r10bio_pool);
|
|
kfree(conf->mirrors);
|
|
kfree(conf->mirrors);
|
|
@@ -3656,7 +3630,7 @@ static int raid10_run(struct mddev *mddev)
|
|
}
|
|
}
|
|
/* need to check that every block has at least one working mirror */
|
|
/* need to check that every block has at least one working mirror */
|
|
if (!enough(conf, -1)) {
|
|
if (!enough(conf, -1)) {
|
|
- printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
|
|
|
|
|
|
+ pr_err("md/raid10:%s: not enough operational mirrors.\n",
|
|
mdname(mddev));
|
|
mdname(mddev));
|
|
goto out_free_conf;
|
|
goto out_free_conf;
|
|
}
|
|
}
|
|
@@ -3698,11 +3672,9 @@ static int raid10_run(struct mddev *mddev)
|
|
}
|
|
}
|
|
|
|
|
|
if (mddev->recovery_cp != MaxSector)
|
|
if (mddev->recovery_cp != MaxSector)
|
|
- printk(KERN_NOTICE "md/raid10:%s: not clean"
|
|
|
|
- " -- starting background reconstruction\n",
|
|
|
|
- mdname(mddev));
|
|
|
|
- printk(KERN_INFO
|
|
|
|
- "md/raid10:%s: active with %d out of %d devices\n",
|
|
|
|
|
|
+ pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
|
|
|
|
+ mdname(mddev));
|
|
|
|
+ pr_info("md/raid10:%s: active with %d out of %d devices\n",
|
|
mdname(mddev), conf->geo.raid_disks - mddev->degraded,
|
|
mdname(mddev), conf->geo.raid_disks - mddev->degraded,
|
|
conf->geo.raid_disks);
|
|
conf->geo.raid_disks);
|
|
/*
|
|
/*
|
|
@@ -3739,7 +3711,7 @@ static int raid10_run(struct mddev *mddev)
|
|
|
|
|
|
if (max(before_length, after_length) > min_offset_diff) {
|
|
if (max(before_length, after_length) > min_offset_diff) {
|
|
/* This cannot work */
|
|
/* This cannot work */
|
|
- printk("md/raid10: offset difference not enough to continue reshape\n");
|
|
|
|
|
|
+ pr_warn("md/raid10: offset difference not enough to continue reshape\n");
|
|
goto out_free_conf;
|
|
goto out_free_conf;
|
|
}
|
|
}
|
|
conf->offset_diff = min_offset_diff;
|
|
conf->offset_diff = min_offset_diff;
|
|
@@ -3846,8 +3818,8 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
|
|
struct r10conf *conf;
|
|
struct r10conf *conf;
|
|
|
|
|
|
if (mddev->degraded > 0) {
|
|
if (mddev->degraded > 0) {
|
|
- printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
|
|
|
|
- mdname(mddev));
|
|
|
|
|
|
+ pr_warn("md/raid10:%s: Error: degraded raid0!\n",
|
|
|
|
+ mdname(mddev));
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
}
|
|
sector_div(size, devs);
|
|
sector_div(size, devs);
|
|
@@ -3887,9 +3859,8 @@ static void *raid10_takeover(struct mddev *mddev)
|
|
/* for raid0 takeover only one zone is supported */
|
|
/* for raid0 takeover only one zone is supported */
|
|
raid0_conf = mddev->private;
|
|
raid0_conf = mddev->private;
|
|
if (raid0_conf->nr_strip_zones > 1) {
|
|
if (raid0_conf->nr_strip_zones > 1) {
|
|
- printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
|
|
|
|
- " with more than one zone.\n",
|
|
|
|
- mdname(mddev));
|
|
|
|
|
|
+ pr_warn("md/raid10:%s: cannot takeover raid 0 with more than one zone.\n",
|
|
|
|
+ mdname(mddev));
|
|
return ERR_PTR(-EINVAL);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
}
|
|
return raid10_takeover_raid0(mddev,
|
|
return raid10_takeover_raid0(mddev,
|
|
@@ -4078,8 +4049,8 @@ static int raid10_start_reshape(struct mddev *mddev)
|
|
sector_t size = raid10_size(mddev, 0, 0);
|
|
sector_t size = raid10_size(mddev, 0, 0);
|
|
if (size < mddev->array_sectors) {
|
|
if (size < mddev->array_sectors) {
|
|
spin_unlock_irq(&conf->device_lock);
|
|
spin_unlock_irq(&conf->device_lock);
|
|
- printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
|
|
|
|
- mdname(mddev));
|
|
|
|
|
|
+ pr_warn("md/raid10:%s: array size must be reduce before number of disks\n",
|
|
|
|
+ mdname(mddev));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
mddev->resync_max_sectors = size;
|
|
mddev->resync_max_sectors = size;
|