|
@@ -24,6 +24,7 @@
|
|
|
#include <linux/module.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/ratelimit.h>
|
|
|
+#include <linux/kthread.h>
|
|
|
#include "md.h"
|
|
|
#include "raid10.h"
|
|
|
#include "raid0.h"
|
|
@@ -68,6 +69,11 @@ static int max_queued_requests = 1024;
|
|
|
static void allow_barrier(struct r10conf *conf);
|
|
|
static void lower_barrier(struct r10conf *conf);
|
|
|
static int enough(struct r10conf *conf, int ignore);
|
|
|
+static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
+ int *skipped);
|
|
|
+static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
|
|
|
+static void end_reshape_write(struct bio *bio, int error);
|
|
|
+static void end_reshape(struct r10conf *conf);
|
|
|
|
|
|
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
|
|
|
{
|
|
@@ -112,7 +118,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
|
|
|
if (!r10_bio)
|
|
|
return NULL;
|
|
|
|
|
|
- if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
|
|
|
+ if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
|
|
|
+ test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
|
|
|
nalloc = conf->copies; /* resync */
|
|
|
else
|
|
|
nalloc = 2; /* recovery */
|
|
@@ -140,9 +147,10 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
|
|
|
struct bio *rbio = r10_bio->devs[j].repl_bio;
|
|
|
bio = r10_bio->devs[j].bio;
|
|
|
for (i = 0; i < RESYNC_PAGES; i++) {
|
|
|
- if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
|
|
|
- &conf->mddev->recovery)) {
|
|
|
- /* we can share bv_page's during recovery */
|
|
|
+ if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
|
|
|
+ &conf->mddev->recovery)) {
|
|
|
+ /* we can share bv_page's during recovery
|
|
|
+ * and reshape */
|
|
|
struct bio *rbio = r10_bio->devs[0].bio;
|
|
|
page = rbio->bi_io_vec[i].bv_page;
|
|
|
get_page(page);
|
|
@@ -165,10 +173,11 @@ out_free_pages:
|
|
|
while (j--)
|
|
|
for (i = 0; i < RESYNC_PAGES ; i++)
|
|
|
safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
|
|
|
- j = -1;
|
|
|
+ j = 0;
|
|
|
out_free_bio:
|
|
|
- while (++j < nalloc) {
|
|
|
- bio_put(r10_bio->devs[j].bio);
|
|
|
+ for ( ; j < nalloc; j++) {
|
|
|
+ if (r10_bio->devs[j].bio)
|
|
|
+ bio_put(r10_bio->devs[j].bio);
|
|
|
if (r10_bio->devs[j].repl_bio)
|
|
|
bio_put(r10_bio->devs[j].repl_bio);
|
|
|
}
|
|
@@ -504,79 +513,96 @@ static void raid10_end_write_request(struct bio *bio, int error)
|
|
|
* sector offset to a virtual address
|
|
|
*/
|
|
|
|
|
|
-static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
|
|
|
+static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
|
|
|
{
|
|
|
int n,f;
|
|
|
sector_t sector;
|
|
|
sector_t chunk;
|
|
|
sector_t stripe;
|
|
|
int dev;
|
|
|
-
|
|
|
int slot = 0;
|
|
|
|
|
|
/* now calculate first sector/dev */
|
|
|
- chunk = r10bio->sector >> conf->chunk_shift;
|
|
|
- sector = r10bio->sector & conf->chunk_mask;
|
|
|
+ chunk = r10bio->sector >> geo->chunk_shift;
|
|
|
+ sector = r10bio->sector & geo->chunk_mask;
|
|
|
|
|
|
- chunk *= conf->near_copies;
|
|
|
+ chunk *= geo->near_copies;
|
|
|
stripe = chunk;
|
|
|
- dev = sector_div(stripe, conf->raid_disks);
|
|
|
- if (conf->far_offset)
|
|
|
- stripe *= conf->far_copies;
|
|
|
+ dev = sector_div(stripe, geo->raid_disks);
|
|
|
+ if (geo->far_offset)
|
|
|
+ stripe *= geo->far_copies;
|
|
|
|
|
|
- sector += stripe << conf->chunk_shift;
|
|
|
+ sector += stripe << geo->chunk_shift;
|
|
|
|
|
|
/* and calculate all the others */
|
|
|
- for (n=0; n < conf->near_copies; n++) {
|
|
|
+ for (n = 0; n < geo->near_copies; n++) {
|
|
|
int d = dev;
|
|
|
sector_t s = sector;
|
|
|
r10bio->devs[slot].addr = sector;
|
|
|
r10bio->devs[slot].devnum = d;
|
|
|
slot++;
|
|
|
|
|
|
- for (f = 1; f < conf->far_copies; f++) {
|
|
|
- d += conf->near_copies;
|
|
|
- if (d >= conf->raid_disks)
|
|
|
- d -= conf->raid_disks;
|
|
|
- s += conf->stride;
|
|
|
+ for (f = 1; f < geo->far_copies; f++) {
|
|
|
+ d += geo->near_copies;
|
|
|
+ if (d >= geo->raid_disks)
|
|
|
+ d -= geo->raid_disks;
|
|
|
+ s += geo->stride;
|
|
|
r10bio->devs[slot].devnum = d;
|
|
|
r10bio->devs[slot].addr = s;
|
|
|
slot++;
|
|
|
}
|
|
|
dev++;
|
|
|
- if (dev >= conf->raid_disks) {
|
|
|
+ if (dev >= geo->raid_disks) {
|
|
|
dev = 0;
|
|
|
- sector += (conf->chunk_mask + 1);
|
|
|
+ sector += (geo->chunk_mask + 1);
|
|
|
}
|
|
|
}
|
|
|
- BUG_ON(slot != conf->copies);
|
|
|
+}
|
|
|
+
|
|
|
+static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
|
|
|
+{
|
|
|
+ struct geom *geo = &conf->geo;
|
|
|
+
|
|
|
+ if (conf->reshape_progress != MaxSector &&
|
|
|
+ ((r10bio->sector >= conf->reshape_progress) !=
|
|
|
+ conf->mddev->reshape_backwards)) {
|
|
|
+ set_bit(R10BIO_Previous, &r10bio->state);
|
|
|
+ geo = &conf->prev;
|
|
|
+ } else
|
|
|
+ clear_bit(R10BIO_Previous, &r10bio->state);
|
|
|
+
|
|
|
+ __raid10_find_phys(geo, r10bio);
|
|
|
}
|
|
|
|
|
|
static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
|
|
|
{
|
|
|
sector_t offset, chunk, vchunk;
|
|
|
+ /* Never use conf->prev as this is only called during resync
|
|
|
+ * or recovery, so reshape isn't happening
|
|
|
+ */
|
|
|
+ struct geom *geo = &conf->geo;
|
|
|
|
|
|
- offset = sector & conf->chunk_mask;
|
|
|
- if (conf->far_offset) {
|
|
|
+ offset = sector & geo->chunk_mask;
|
|
|
+ if (geo->far_offset) {
|
|
|
int fc;
|
|
|
- chunk = sector >> conf->chunk_shift;
|
|
|
- fc = sector_div(chunk, conf->far_copies);
|
|
|
- dev -= fc * conf->near_copies;
|
|
|
+ chunk = sector >> geo->chunk_shift;
|
|
|
+ fc = sector_div(chunk, geo->far_copies);
|
|
|
+ dev -= fc * geo->near_copies;
|
|
|
if (dev < 0)
|
|
|
- dev += conf->raid_disks;
|
|
|
+ dev += geo->raid_disks;
|
|
|
} else {
|
|
|
- while (sector >= conf->stride) {
|
|
|
- sector -= conf->stride;
|
|
|
- if (dev < conf->near_copies)
|
|
|
- dev += conf->raid_disks - conf->near_copies;
|
|
|
+ while (sector >= geo->stride) {
|
|
|
+ sector -= geo->stride;
|
|
|
+ if (dev < geo->near_copies)
|
|
|
+ dev += geo->raid_disks - geo->near_copies;
|
|
|
else
|
|
|
- dev -= conf->near_copies;
|
|
|
+ dev -= geo->near_copies;
|
|
|
}
|
|
|
- chunk = sector >> conf->chunk_shift;
|
|
|
+ chunk = sector >> geo->chunk_shift;
|
|
|
}
|
|
|
- vchunk = chunk * conf->raid_disks + dev;
|
|
|
- sector_div(vchunk, conf->near_copies);
|
|
|
- return (vchunk << conf->chunk_shift) + offset;
|
|
|
+ vchunk = chunk * geo->raid_disks + dev;
|
|
|
+ sector_div(vchunk, geo->near_copies);
|
|
|
+ return (vchunk << geo->chunk_shift) + offset;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -597,10 +623,17 @@ static int raid10_mergeable_bvec(struct request_queue *q,
|
|
|
struct r10conf *conf = mddev->private;
|
|
|
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
|
|
|
int max;
|
|
|
- unsigned int chunk_sectors = mddev->chunk_sectors;
|
|
|
+ unsigned int chunk_sectors;
|
|
|
unsigned int bio_sectors = bvm->bi_size >> 9;
|
|
|
+ struct geom *geo = &conf->geo;
|
|
|
+
|
|
|
+ chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
|
|
|
+ if (conf->reshape_progress != MaxSector &&
|
|
|
+ ((sector >= conf->reshape_progress) !=
|
|
|
+ conf->mddev->reshape_backwards))
|
|
|
+ geo = &conf->prev;
|
|
|
|
|
|
- if (conf->near_copies < conf->raid_disks) {
|
|
|
+ if (geo->near_copies < geo->raid_disks) {
|
|
|
max = (chunk_sectors - ((sector & (chunk_sectors - 1))
|
|
|
+ bio_sectors)) << 9;
|
|
|
if (max < 0)
|
|
@@ -614,6 +647,12 @@ static int raid10_mergeable_bvec(struct request_queue *q,
|
|
|
if (mddev->merge_check_needed) {
|
|
|
struct r10bio r10_bio;
|
|
|
int s;
|
|
|
+ if (conf->reshape_progress != MaxSector) {
|
|
|
+ /* Cannot give any guidance during reshape */
|
|
|
+ if (max <= biovec->bv_len && bio_sectors == 0)
|
|
|
+ return biovec->bv_len;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
r10_bio.sector = sector;
|
|
|
raid10_find_phys(conf, &r10_bio);
|
|
|
rcu_read_lock();
|
|
@@ -681,6 +720,7 @@ static struct md_rdev *read_balance(struct r10conf *conf,
|
|
|
struct md_rdev *rdev, *best_rdev;
|
|
|
int do_balance;
|
|
|
int best_slot;
|
|
|
+ struct geom *geo = &conf->geo;
|
|
|
|
|
|
raid10_find_phys(conf, r10_bio);
|
|
|
rcu_read_lock();
|
|
@@ -761,11 +801,11 @@ retry:
|
|
|
* sequential read speed for 'far copies' arrays. So only
|
|
|
* keep it for 'near' arrays, and review those later.
|
|
|
*/
|
|
|
- if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending))
|
|
|
+ if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
|
|
|
break;
|
|
|
|
|
|
/* for far > 1 always use the lowest address */
|
|
|
- if (conf->far_copies > 1)
|
|
|
+ if (geo->far_copies > 1)
|
|
|
new_distance = r10_bio->devs[slot].addr;
|
|
|
else
|
|
|
new_distance = abs(r10_bio->devs[slot].addr -
|
|
@@ -812,7 +852,10 @@ static int raid10_congested(void *data, int bits)
|
|
|
if (mddev_congested(mddev, bits))
|
|
|
return 1;
|
|
|
rcu_read_lock();
|
|
|
- for (i = 0; i < conf->raid_disks && ret == 0; i++) {
|
|
|
+ for (i = 0;
|
|
|
+ (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
|
|
|
+ && ret == 0;
|
|
|
+ i++) {
|
|
|
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
|
|
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
|
@@ -973,13 +1016,24 @@ static void unfreeze_array(struct r10conf *conf)
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
}
|
|
|
|
|
|
+static sector_t choose_data_offset(struct r10bio *r10_bio,
|
|
|
+ struct md_rdev *rdev)
|
|
|
+{
|
|
|
+ if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
|
|
|
+ test_bit(R10BIO_Previous, &r10_bio->state))
|
|
|
+ return rdev->data_offset;
|
|
|
+ else
|
|
|
+ return rdev->new_data_offset;
|
|
|
+}
|
|
|
+
|
|
|
static void make_request(struct mddev *mddev, struct bio * bio)
|
|
|
{
|
|
|
struct r10conf *conf = mddev->private;
|
|
|
struct r10bio *r10_bio;
|
|
|
struct bio *read_bio;
|
|
|
int i;
|
|
|
- int chunk_sects = conf->chunk_mask + 1;
|
|
|
+ sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
|
|
|
+ int chunk_sects = chunk_mask + 1;
|
|
|
const int rw = bio_data_dir(bio);
|
|
|
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
|
|
|
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
|
|
@@ -988,6 +1042,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|
|
int plugged;
|
|
|
int sectors_handled;
|
|
|
int max_sectors;
|
|
|
+ int sectors;
|
|
|
|
|
|
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
|
|
|
md_flush_request(mddev, bio);
|
|
@@ -997,9 +1052,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|
|
/* If this request crosses a chunk boundary, we need to
|
|
|
* split it. This will only happen for 1 PAGE (or less) requests.
|
|
|
*/
|
|
|
- if (unlikely( (bio->bi_sector & conf->chunk_mask) + (bio->bi_size >> 9)
|
|
|
- > chunk_sects &&
|
|
|
- conf->near_copies < conf->raid_disks)) {
|
|
|
+ if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
|
|
|
+ > chunk_sects
|
|
|
+ && (conf->geo.near_copies < conf->geo.raid_disks
|
|
|
+ || conf->prev.near_copies < conf->prev.raid_disks))) {
|
|
|
struct bio_pair *bp;
|
|
|
/* Sanity check -- queue functions should prevent this happening */
|
|
|
if (bio->bi_vcnt != 1 ||
|
|
@@ -1051,10 +1107,41 @@ static void make_request(struct mddev *mddev, struct bio * bio)
|
|
|
*/
|
|
|
wait_barrier(conf);
|
|
|
|
|
|
+ sectors = bio->bi_size >> 9;
|
|
|
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
|
|
+ bio->bi_sector < conf->reshape_progress &&
|
|
|
+ bio->bi_sector + sectors > conf->reshape_progress) {
|
|
|
+ /* IO spans the reshape position. Need to wait for
|
|
|
+ * reshape to pass
|
|
|
+ */
|
|
|
+ allow_barrier(conf);
|
|
|
+ wait_event(conf->wait_barrier,
|
|
|
+ conf->reshape_progress <= bio->bi_sector ||
|
|
|
+ conf->reshape_progress >= bio->bi_sector + sectors);
|
|
|
+ wait_barrier(conf);
|
|
|
+ }
|
|
|
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
|
|
|
+ bio_data_dir(bio) == WRITE &&
|
|
|
+ (mddev->reshape_backwards
|
|
|
+ ? (bio->bi_sector < conf->reshape_safe &&
|
|
|
+ bio->bi_sector + sectors > conf->reshape_progress)
|
|
|
+ : (bio->bi_sector + sectors > conf->reshape_safe &&
|
|
|
+ bio->bi_sector < conf->reshape_progress))) {
|
|
|
+ /* Need to update reshape_position in metadata */
|
|
|
+ mddev->reshape_position = conf->reshape_progress;
|
|
|
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
|
+ set_bit(MD_CHANGE_PENDING, &mddev->flags);
|
|
|
+ md_wakeup_thread(mddev->thread);
|
|
|
+ wait_event(mddev->sb_wait,
|
|
|
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
|
|
|
+
|
|
|
+ conf->reshape_safe = mddev->reshape_position;
|
|
|
+ }
|
|
|
+
|
|
|
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
|
|
|
|
|
|
r10_bio->master_bio = bio;
|
|
|
- r10_bio->sectors = bio->bi_size >> 9;
|
|
|
+ r10_bio->sectors = sectors;
|
|
|
|
|
|
r10_bio->mddev = mddev;
|
|
|
r10_bio->sector = bio->bi_sector;
|
|
@@ -1093,7 +1180,7 @@ read_again:
|
|
|
r10_bio->devs[slot].rdev = rdev;
|
|
|
|
|
|
read_bio->bi_sector = r10_bio->devs[slot].addr +
|
|
|
- rdev->data_offset;
|
|
|
+ choose_data_offset(r10_bio, rdev);
|
|
|
read_bio->bi_bdev = rdev->bdev;
|
|
|
read_bio->bi_end_io = raid10_end_read_request;
|
|
|
read_bio->bi_rw = READ | do_sync;
|
|
@@ -1297,7 +1384,8 @@ retry_write:
|
|
|
r10_bio->devs[i].bio = mbio;
|
|
|
|
|
|
mbio->bi_sector = (r10_bio->devs[i].addr+
|
|
|
- conf->mirrors[d].rdev->data_offset);
|
|
|
+ choose_data_offset(r10_bio,
|
|
|
+ conf->mirrors[d].rdev));
|
|
|
mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
|
|
|
mbio->bi_end_io = raid10_end_write_request;
|
|
|
mbio->bi_rw = WRITE | do_sync | do_fua;
|
|
@@ -1321,8 +1409,10 @@ retry_write:
|
|
|
* so it cannot disappear, so the replacement cannot
|
|
|
* become NULL here
|
|
|
*/
|
|
|
- mbio->bi_sector = (r10_bio->devs[i].addr+
|
|
|
- conf->mirrors[d].replacement->data_offset);
|
|
|
+ mbio->bi_sector = (r10_bio->devs[i].addr +
|
|
|
+ choose_data_offset(
|
|
|
+ r10_bio,
|
|
|
+ conf->mirrors[d].replacement));
|
|
|
mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
|
|
|
mbio->bi_end_io = raid10_end_write_request;
|
|
|
mbio->bi_rw = WRITE | do_sync | do_fua;
|
|
@@ -1368,19 +1458,19 @@ static void status(struct seq_file *seq, struct mddev *mddev)
|
|
|
struct r10conf *conf = mddev->private;
|
|
|
int i;
|
|
|
|
|
|
- if (conf->near_copies < conf->raid_disks)
|
|
|
+ if (conf->geo.near_copies < conf->geo.raid_disks)
|
|
|
seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
|
|
|
- if (conf->near_copies > 1)
|
|
|
- seq_printf(seq, " %d near-copies", conf->near_copies);
|
|
|
- if (conf->far_copies > 1) {
|
|
|
- if (conf->far_offset)
|
|
|
- seq_printf(seq, " %d offset-copies", conf->far_copies);
|
|
|
+ if (conf->geo.near_copies > 1)
|
|
|
+ seq_printf(seq, " %d near-copies", conf->geo.near_copies);
|
|
|
+ if (conf->geo.far_copies > 1) {
|
|
|
+ if (conf->geo.far_offset)
|
|
|
+ seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
|
|
|
else
|
|
|
- seq_printf(seq, " %d far-copies", conf->far_copies);
|
|
|
+ seq_printf(seq, " %d far-copies", conf->geo.far_copies);
|
|
|
}
|
|
|
- seq_printf(seq, " [%d/%d] [", conf->raid_disks,
|
|
|
- conf->raid_disks - mddev->degraded);
|
|
|
- for (i = 0; i < conf->raid_disks; i++)
|
|
|
+ seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
|
|
|
+ conf->geo.raid_disks - mddev->degraded);
|
|
|
+ for (i = 0; i < conf->geo.raid_disks; i++)
|
|
|
seq_printf(seq, "%s",
|
|
|
conf->mirrors[i].rdev &&
|
|
|
test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
|
|
@@ -1392,7 +1482,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
|
|
|
* Don't consider the device numbered 'ignore'
|
|
|
* as we might be about to remove it.
|
|
|
*/
|
|
|
-static int enough(struct r10conf *conf, int ignore)
|
|
|
+static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
|
|
|
{
|
|
|
int first = 0;
|
|
|
|
|
@@ -1403,7 +1493,7 @@ static int enough(struct r10conf *conf, int ignore)
|
|
|
if (conf->mirrors[first].rdev &&
|
|
|
first != ignore)
|
|
|
cnt++;
|
|
|
- first = (first+1) % conf->raid_disks;
|
|
|
+ first = (first+1) % geo->raid_disks;
|
|
|
}
|
|
|
if (cnt == 0)
|
|
|
return 0;
|
|
@@ -1411,6 +1501,12 @@ static int enough(struct r10conf *conf, int ignore)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static int enough(struct r10conf *conf, int ignore)
|
|
|
+{
|
|
|
+ return _enough(conf, &conf->geo, ignore) &&
|
|
|
+ _enough(conf, &conf->prev, ignore);
|
|
|
+}
|
|
|
+
|
|
|
static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
{
|
|
|
char b[BDEVNAME_SIZE];
|
|
@@ -1445,7 +1541,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
"md/raid10:%s: Disk failure on %s, disabling device.\n"
|
|
|
"md/raid10:%s: Operation continuing on %d devices.\n",
|
|
|
mdname(mddev), bdevname(rdev->bdev, b),
|
|
|
- mdname(mddev), conf->raid_disks - mddev->degraded);
|
|
|
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded);
|
|
|
}
|
|
|
|
|
|
static void print_conf(struct r10conf *conf)
|
|
@@ -1458,10 +1554,10 @@ static void print_conf(struct r10conf *conf)
|
|
|
printk(KERN_DEBUG "(!conf)\n");
|
|
|
return;
|
|
|
}
|
|
|
- printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
|
|
|
- conf->raid_disks);
|
|
|
+ printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
|
|
|
+ conf->geo.raid_disks);
|
|
|
|
|
|
- for (i = 0; i < conf->raid_disks; i++) {
|
|
|
+ for (i = 0; i < conf->geo.raid_disks; i++) {
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
tmp = conf->mirrors + i;
|
|
|
if (tmp->rdev)
|
|
@@ -1493,7 +1589,7 @@ static int raid10_spare_active(struct mddev *mddev)
|
|
|
* Find all non-in_sync disks within the RAID10 configuration
|
|
|
* and mark them in_sync
|
|
|
*/
|
|
|
- for (i = 0; i < conf->raid_disks; i++) {
|
|
|
+ for (i = 0; i < conf->geo.raid_disks; i++) {
|
|
|
tmp = conf->mirrors + i;
|
|
|
if (tmp->replacement
|
|
|
&& tmp->replacement->recovery_offset == MaxSector
|
|
@@ -1535,7 +1631,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
int err = -EEXIST;
|
|
|
int mirror;
|
|
|
int first = 0;
|
|
|
- int last = conf->raid_disks - 1;
|
|
|
+ int last = conf->geo.raid_disks - 1;
|
|
|
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
|
|
|
|
|
if (mddev->recovery_cp < MaxSector)
|
|
@@ -1543,7 +1639,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
* very different from resync
|
|
|
*/
|
|
|
return -EBUSY;
|
|
|
- if (rdev->saved_raid_disk < 0 && !enough(conf, -1))
|
|
|
+ if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (rdev->raid_disk >= 0)
|
|
@@ -1635,6 +1731,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
|
|
|
if (!test_bit(Faulty, &rdev->flags) &&
|
|
|
mddev->recovery_disabled != p->recovery_disabled &&
|
|
|
(!p->replacement || p->replacement == rdev) &&
|
|
|
+ number < conf->geo.raid_disks &&
|
|
|
enough(conf, -1)) {
|
|
|
err = -EBUSY;
|
|
|
goto abort;
|
|
@@ -1676,7 +1773,11 @@ static void end_sync_read(struct bio *bio, int error)
|
|
|
struct r10conf *conf = r10_bio->mddev->private;
|
|
|
int d;
|
|
|
|
|
|
- d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
|
|
|
+ if (bio == r10_bio->master_bio) {
|
|
|
+ /* this is a reshape read */
|
|
|
+ d = r10_bio->read_slot; /* really the read dev */
|
|
|
+ } else
|
|
|
+ d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
|
|
|
|
|
|
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
|
|
|
set_bit(R10BIO_Uptodate, &r10_bio->state);
|
|
@@ -2218,7 +2319,9 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
|
" (%d sectors at %llu on %s)\n",
|
|
|
mdname(mddev), s,
|
|
|
(unsigned long long)(
|
|
|
- sect + rdev->data_offset),
|
|
|
+ sect +
|
|
|
+ choose_data_offset(r10_bio,
|
|
|
+ rdev)),
|
|
|
bdevname(rdev->bdev, b));
|
|
|
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
|
|
|
"drive\n",
|
|
@@ -2256,7 +2359,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
|
" (%d sectors at %llu on %s)\n",
|
|
|
mdname(mddev), s,
|
|
|
(unsigned long long)(
|
|
|
- sect + rdev->data_offset),
|
|
|
+ sect +
|
|
|
+ choose_data_offset(r10_bio, rdev)),
|
|
|
bdevname(rdev->bdev, b));
|
|
|
printk(KERN_NOTICE "md/raid10:%s: %s: failing "
|
|
|
"drive\n",
|
|
@@ -2269,7 +2373,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
|
|
|
" (%d sectors at %llu on %s)\n",
|
|
|
mdname(mddev), s,
|
|
|
(unsigned long long)(
|
|
|
- sect + rdev->data_offset),
|
|
|
+ sect +
|
|
|
+ choose_data_offset(r10_bio, rdev)),
|
|
|
bdevname(rdev->bdev, b));
|
|
|
atomic_add(s, &rdev->corrected_errors);
|
|
|
}
|
|
@@ -2343,7 +2448,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
|
|
|
wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
|
|
|
md_trim_bio(wbio, sector - bio->bi_sector, sectors);
|
|
|
wbio->bi_sector = (r10_bio->devs[i].addr+
|
|
|
- rdev->data_offset+
|
|
|
+ choose_data_offset(r10_bio, rdev) +
|
|
|
(sector - r10_bio->sector));
|
|
|
wbio->bi_bdev = rdev->bdev;
|
|
|
if (submit_bio_wait(WRITE, wbio) == 0)
|
|
@@ -2420,7 +2525,7 @@ read_more:
|
|
|
r10_bio->devs[slot].bio = bio;
|
|
|
r10_bio->devs[slot].rdev = rdev;
|
|
|
bio->bi_sector = r10_bio->devs[slot].addr
|
|
|
- + rdev->data_offset;
|
|
|
+ + choose_data_offset(r10_bio, rdev);
|
|
|
bio->bi_bdev = rdev->bdev;
|
|
|
bio->bi_rw = READ | do_sync;
|
|
|
bio->bi_private = r10_bio;
|
|
@@ -2480,7 +2585,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|
|
rdev_clear_badblocks(
|
|
|
rdev,
|
|
|
r10_bio->devs[m].addr,
|
|
|
- r10_bio->sectors);
|
|
|
+ r10_bio->sectors, 0);
|
|
|
} else {
|
|
|
if (!rdev_set_badblocks(
|
|
|
rdev,
|
|
@@ -2496,7 +2601,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|
|
rdev_clear_badblocks(
|
|
|
rdev,
|
|
|
r10_bio->devs[m].addr,
|
|
|
- r10_bio->sectors);
|
|
|
+ r10_bio->sectors, 0);
|
|
|
} else {
|
|
|
if (!rdev_set_badblocks(
|
|
|
rdev,
|
|
@@ -2515,7 +2620,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|
|
rdev_clear_badblocks(
|
|
|
rdev,
|
|
|
r10_bio->devs[m].addr,
|
|
|
- r10_bio->sectors);
|
|
|
+ r10_bio->sectors, 0);
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
|
|
} else if (bio != NULL &&
|
|
|
!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
|
|
@@ -2532,7 +2637,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
|
|
|
rdev_clear_badblocks(
|
|
|
rdev,
|
|
|
r10_bio->devs[m].addr,
|
|
|
- r10_bio->sectors);
|
|
|
+ r10_bio->sectors, 0);
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
|
|
}
|
|
|
}
|
|
@@ -2573,6 +2678,8 @@ static void raid10d(struct mddev *mddev)
|
|
|
if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
|
|
|
test_bit(R10BIO_WriteError, &r10_bio->state))
|
|
|
handle_write_completed(conf, r10_bio);
|
|
|
+ else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
|
|
|
+ reshape_request_write(mddev, r10_bio);
|
|
|
else if (test_bit(R10BIO_IsSync, &r10_bio->state))
|
|
|
sync_request_write(mddev, r10_bio);
|
|
|
else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
|
|
@@ -2603,7 +2710,7 @@ static int init_resync(struct r10conf *conf)
|
|
|
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
|
|
|
BUG_ON(conf->r10buf_pool);
|
|
|
conf->have_replacement = 0;
|
|
|
- for (i = 0; i < conf->raid_disks; i++)
|
|
|
+ for (i = 0; i < conf->geo.raid_disks; i++)
|
|
|
if (conf->mirrors[i].replacement)
|
|
|
conf->have_replacement = 1;
|
|
|
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
|
|
@@ -2657,6 +2764,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
sector_t sync_blocks;
|
|
|
sector_t sectors_skipped = 0;
|
|
|
int chunks_skipped = 0;
|
|
|
+ sector_t chunk_mask = conf->geo.chunk_mask;
|
|
|
|
|
|
if (!conf->r10buf_pool)
|
|
|
if (init_resync(conf))
|
|
@@ -2664,7 +2772,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
|
|
|
skipped:
|
|
|
max_sector = mddev->dev_sectors;
|
|
|
- if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
|
|
|
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
|
|
|
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
|
|
max_sector = mddev->resync_max_sectors;
|
|
|
if (sector_nr >= max_sector) {
|
|
|
/* If we aborted, we need to abort the
|
|
@@ -2676,11 +2785,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
* we need to convert that to several
|
|
|
* virtual addresses.
|
|
|
*/
|
|
|
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
|
|
|
+ end_reshape(conf);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
if (mddev->curr_resync < max_sector) { /* aborted */
|
|
|
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
|
|
|
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
|
|
&sync_blocks, 1);
|
|
|
- else for (i=0; i<conf->raid_disks; i++) {
|
|
|
+ else for (i = 0; i < conf->geo.raid_disks; i++) {
|
|
|
sector_t sect =
|
|
|
raid10_find_virt(conf, mddev->curr_resync, i);
|
|
|
bitmap_end_sync(mddev->bitmap, sect,
|
|
@@ -2694,7 +2808,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
/* Completed a full sync so the replacements
|
|
|
* are now fully recovered.
|
|
|
*/
|
|
|
- for (i = 0; i < conf->raid_disks; i++)
|
|
|
+ for (i = 0; i < conf->geo.raid_disks; i++)
|
|
|
if (conf->mirrors[i].replacement)
|
|
|
conf->mirrors[i].replacement
|
|
|
->recovery_offset
|
|
@@ -2707,7 +2821,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
*skipped = 1;
|
|
|
return sectors_skipped;
|
|
|
}
|
|
|
- if (chunks_skipped >= conf->raid_disks) {
|
|
|
+
|
|
|
+ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
|
|
|
+ return reshape_request(mddev, sector_nr, skipped);
|
|
|
+
|
|
|
+ if (chunks_skipped >= conf->geo.raid_disks) {
|
|
|
/* if there has been nothing to do on any drive,
|
|
|
* then there is nothing to do at all..
|
|
|
*/
|
|
@@ -2721,9 +2839,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
/* make sure whole request will fit in a chunk - if chunks
|
|
|
* are meaningful
|
|
|
*/
|
|
|
- if (conf->near_copies < conf->raid_disks &&
|
|
|
- max_sector > (sector_nr | conf->chunk_mask))
|
|
|
- max_sector = (sector_nr | conf->chunk_mask) + 1;
|
|
|
+ if (conf->geo.near_copies < conf->geo.raid_disks &&
|
|
|
+ max_sector > (sector_nr | chunk_mask))
|
|
|
+ max_sector = (sector_nr | chunk_mask) + 1;
|
|
|
/*
|
|
|
* If there is non-resync activity waiting for us then
|
|
|
* put in a delay to throttle resync.
|
|
@@ -2752,7 +2870,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
int j;
|
|
|
r10_bio = NULL;
|
|
|
|
|
|
- for (i=0 ; i<conf->raid_disks; i++) {
|
|
|
+ for (i = 0 ; i < conf->geo.raid_disks; i++) {
|
|
|
int still_degraded;
|
|
|
struct r10bio *rb2;
|
|
|
sector_t sect;
|
|
@@ -2806,7 +2924,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
/* Need to check if the array will still be
|
|
|
* degraded
|
|
|
*/
|
|
|
- for (j=0; j<conf->raid_disks; j++)
|
|
|
+ for (j = 0; j < conf->geo.raid_disks; j++)
|
|
|
if (conf->mirrors[j].rdev == NULL ||
|
|
|
test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
|
|
|
still_degraded = 1;
|
|
@@ -2984,9 +3102,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
r10_bio->sector = sector_nr;
|
|
|
set_bit(R10BIO_IsSync, &r10_bio->state);
|
|
|
raid10_find_phys(conf, r10_bio);
|
|
|
- r10_bio->sectors = (sector_nr | conf->chunk_mask) - sector_nr +1;
|
|
|
+ r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
|
|
|
|
|
|
- for (i=0; i<conf->copies; i++) {
|
|
|
+ for (i = 0; i < conf->copies; i++) {
|
|
|
int d = r10_bio->devs[i].devnum;
|
|
|
sector_t first_bad, sector;
|
|
|
int bad_sectors;
|
|
@@ -3152,16 +3270,17 @@ raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
|
|
|
struct r10conf *conf = mddev->private;
|
|
|
|
|
|
if (!raid_disks)
|
|
|
- raid_disks = conf->raid_disks;
|
|
|
+ raid_disks = min(conf->geo.raid_disks,
|
|
|
+ conf->prev.raid_disks);
|
|
|
if (!sectors)
|
|
|
sectors = conf->dev_sectors;
|
|
|
|
|
|
- size = sectors >> conf->chunk_shift;
|
|
|
- sector_div(size, conf->far_copies);
|
|
|
+ size = sectors >> conf->geo.chunk_shift;
|
|
|
+ sector_div(size, conf->geo.far_copies);
|
|
|
size = size * raid_disks;
|
|
|
- sector_div(size, conf->near_copies);
|
|
|
+ sector_div(size, conf->geo.near_copies);
|
|
|
|
|
|
- return size << conf->chunk_shift;
|
|
|
+ return size << conf->geo.chunk_shift;
|
|
|
}
|
|
|
|
|
|
static void calc_sectors(struct r10conf *conf, sector_t size)
|
|
@@ -3171,10 +3290,10 @@ static void calc_sectors(struct r10conf *conf, sector_t size)
|
|
|
* conf->stride
|
|
|
*/
|
|
|
|
|
|
- size = size >> conf->chunk_shift;
|
|
|
- sector_div(size, conf->far_copies);
|
|
|
- size = size * conf->raid_disks;
|
|
|
- sector_div(size, conf->near_copies);
|
|
|
+ size = size >> conf->geo.chunk_shift;
|
|
|
+ sector_div(size, conf->geo.far_copies);
|
|
|
+ size = size * conf->geo.raid_disks;
|
|
|
+ sector_div(size, conf->geo.near_copies);
|
|
|
/* 'size' is now the number of chunks in the array */
|
|
|
/* calculate "used chunks per device" */
|
|
|
size = size * conf->copies;
|
|
@@ -3182,38 +3301,76 @@ static void calc_sectors(struct r10conf *conf, sector_t size)
|
|
|
/* We need to round up when dividing by raid_disks to
|
|
|
* get the stride size.
|
|
|
*/
|
|
|
- size = DIV_ROUND_UP_SECTOR_T(size, conf->raid_disks);
|
|
|
+ size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
|
|
|
|
|
|
- conf->dev_sectors = size << conf->chunk_shift;
|
|
|
+ conf->dev_sectors = size << conf->geo.chunk_shift;
|
|
|
|
|
|
- if (conf->far_offset)
|
|
|
- conf->stride = 1 << conf->chunk_shift;
|
|
|
+ if (conf->geo.far_offset)
|
|
|
+ conf->geo.stride = 1 << conf->geo.chunk_shift;
|
|
|
else {
|
|
|
- sector_div(size, conf->far_copies);
|
|
|
- conf->stride = size << conf->chunk_shift;
|
|
|
+ sector_div(size, conf->geo.far_copies);
|
|
|
+ conf->geo.stride = size << conf->geo.chunk_shift;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+enum geo_type {geo_new, geo_old, geo_start};
|
|
|
+static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
|
|
|
+{
|
|
|
+ int nc, fc, fo;
|
|
|
+ int layout, chunk, disks;
|
|
|
+ switch (new) {
|
|
|
+ case geo_old:
|
|
|
+ layout = mddev->layout;
|
|
|
+ chunk = mddev->chunk_sectors;
|
|
|
+ disks = mddev->raid_disks - mddev->delta_disks;
|
|
|
+ break;
|
|
|
+ case geo_new:
|
|
|
+ layout = mddev->new_layout;
|
|
|
+ chunk = mddev->new_chunk_sectors;
|
|
|
+ disks = mddev->raid_disks;
|
|
|
+ break;
|
|
|
+ default: /* avoid 'may be unused' warnings */
|
|
|
+ case geo_start: /* new when starting reshape - raid_disks not
|
|
|
+ * updated yet. */
|
|
|
+ layout = mddev->new_layout;
|
|
|
+ chunk = mddev->new_chunk_sectors;
|
|
|
+ disks = mddev->raid_disks + mddev->delta_disks;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (layout >> 17)
|
|
|
+ return -1;
|
|
|
+ if (chunk < (PAGE_SIZE >> 9) ||
|
|
|
+ !is_power_of_2(chunk))
|
|
|
+ return -2;
|
|
|
+ nc = layout & 255;
|
|
|
+ fc = (layout >> 8) & 255;
|
|
|
+ fo = layout & (1<<16);
|
|
|
+ geo->raid_disks = disks;
|
|
|
+ geo->near_copies = nc;
|
|
|
+ geo->far_copies = fc;
|
|
|
+ geo->far_offset = fo;
|
|
|
+ geo->chunk_mask = chunk - 1;
|
|
|
+ geo->chunk_shift = ffz(~chunk);
|
|
|
+ return nc*fc;
|
|
|
+}
|
|
|
+
|
|
|
static struct r10conf *setup_conf(struct mddev *mddev)
|
|
|
{
|
|
|
struct r10conf *conf = NULL;
|
|
|
- int nc, fc, fo;
|
|
|
int err = -EINVAL;
|
|
|
+ struct geom geo;
|
|
|
+ int copies;
|
|
|
+
|
|
|
+ copies = setup_geo(&geo, mddev, geo_new);
|
|
|
|
|
|
- if (mddev->new_chunk_sectors < (PAGE_SIZE >> 9) ||
|
|
|
- !is_power_of_2(mddev->new_chunk_sectors)) {
|
|
|
+ if (copies == -2) {
|
|
|
printk(KERN_ERR "md/raid10:%s: chunk size must be "
|
|
|
"at least PAGE_SIZE(%ld) and be a power of 2.\n",
|
|
|
mdname(mddev), PAGE_SIZE);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- nc = mddev->new_layout & 255;
|
|
|
- fc = (mddev->new_layout >> 8) & 255;
|
|
|
- fo = mddev->new_layout & (1<<16);
|
|
|
-
|
|
|
- if ((nc*fc) <2 || (nc*fc) > mddev->raid_disks ||
|
|
|
- (mddev->new_layout >> 17)) {
|
|
|
+ if (copies < 2 || copies > mddev->raid_disks) {
|
|
|
printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
|
|
|
mdname(mddev), mddev->new_layout);
|
|
|
goto out;
|
|
@@ -3224,7 +3381,9 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
|
|
if (!conf)
|
|
|
goto out;
|
|
|
|
|
|
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
|
|
|
+ /* FIXME calc properly */
|
|
|
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks +
|
|
|
+ max(0,mddev->delta_disks)),
|
|
|
GFP_KERNEL);
|
|
|
if (!conf->mirrors)
|
|
|
goto out;
|
|
@@ -3233,22 +3392,29 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
|
|
if (!conf->tmppage)
|
|
|
goto out;
|
|
|
|
|
|
-
|
|
|
- conf->raid_disks = mddev->raid_disks;
|
|
|
- conf->near_copies = nc;
|
|
|
- conf->far_copies = fc;
|
|
|
- conf->copies = nc*fc;
|
|
|
- conf->far_offset = fo;
|
|
|
- conf->chunk_mask = mddev->new_chunk_sectors - 1;
|
|
|
- conf->chunk_shift = ffz(~mddev->new_chunk_sectors);
|
|
|
-
|
|
|
+ conf->geo = geo;
|
|
|
+ conf->copies = copies;
|
|
|
conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
|
|
|
r10bio_pool_free, conf);
|
|
|
if (!conf->r10bio_pool)
|
|
|
goto out;
|
|
|
|
|
|
calc_sectors(conf, mddev->dev_sectors);
|
|
|
-
|
|
|
+ if (mddev->reshape_position == MaxSector) {
|
|
|
+ conf->prev = conf->geo;
|
|
|
+ conf->reshape_progress = MaxSector;
|
|
|
+ } else {
|
|
|
+ if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
|
|
|
+ err = -EINVAL;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ conf->reshape_progress = mddev->reshape_position;
|
|
|
+ if (conf->prev.far_offset)
|
|
|
+ conf->prev.stride = 1 << conf->prev.chunk_shift;
|
|
|
+ else
|
|
|
+ /* far_copies must be 1 */
|
|
|
+ conf->prev.stride = conf->dev_sectors;
|
|
|
+ }
|
|
|
spin_lock_init(&conf->device_lock);
|
|
|
INIT_LIST_HEAD(&conf->retry_list);
|
|
|
|
|
@@ -3263,8 +3429,9 @@ static struct r10conf *setup_conf(struct mddev *mddev)
|
|
|
return conf;
|
|
|
|
|
|
out:
|
|
|
- printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
|
|
|
- mdname(mddev));
|
|
|
+ if (err == -ENOMEM)
|
|
|
+ printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
|
|
|
+ mdname(mddev));
|
|
|
if (conf) {
|
|
|
if (conf->r10bio_pool)
|
|
|
mempool_destroy(conf->r10bio_pool);
|
|
@@ -3282,12 +3449,8 @@ static int run(struct mddev *mddev)
|
|
|
struct mirror_info *disk;
|
|
|
struct md_rdev *rdev;
|
|
|
sector_t size;
|
|
|
-
|
|
|
- /*
|
|
|
- * copy the already verified devices into our private RAID10
|
|
|
- * bookkeeping area. [whatever we allocate in run(),
|
|
|
- * should be freed in stop()]
|
|
|
- */
|
|
|
+ sector_t min_offset_diff = 0;
|
|
|
+ int first = 1;
|
|
|
|
|
|
if (mddev->private == NULL) {
|
|
|
conf = setup_conf(mddev);
|
|
@@ -3304,17 +3467,20 @@ static int run(struct mddev *mddev)
|
|
|
|
|
|
chunk_size = mddev->chunk_sectors << 9;
|
|
|
blk_queue_io_min(mddev->queue, chunk_size);
|
|
|
- if (conf->raid_disks % conf->near_copies)
|
|
|
- blk_queue_io_opt(mddev->queue, chunk_size * conf->raid_disks);
|
|
|
+ if (conf->geo.raid_disks % conf->geo.near_copies)
|
|
|
+ blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
|
|
|
else
|
|
|
blk_queue_io_opt(mddev->queue, chunk_size *
|
|
|
- (conf->raid_disks / conf->near_copies));
|
|
|
+ (conf->geo.raid_disks / conf->geo.near_copies));
|
|
|
|
|
|
rdev_for_each(rdev, mddev) {
|
|
|
+ long long diff;
|
|
|
|
|
|
disk_idx = rdev->raid_disk;
|
|
|
- if (disk_idx >= conf->raid_disks
|
|
|
- || disk_idx < 0)
|
|
|
+ if (disk_idx < 0)
|
|
|
+ continue;
|
|
|
+ if (disk_idx >= conf->geo.raid_disks &&
|
|
|
+ disk_idx >= conf->prev.raid_disks)
|
|
|
continue;
|
|
|
disk = conf->mirrors + disk_idx;
|
|
|
|
|
@@ -3327,12 +3493,20 @@ static int run(struct mddev *mddev)
|
|
|
goto out_free_conf;
|
|
|
disk->rdev = rdev;
|
|
|
}
|
|
|
+ diff = (rdev->new_data_offset - rdev->data_offset);
|
|
|
+ if (!mddev->reshape_backwards)
|
|
|
+ diff = -diff;
|
|
|
+ if (diff < 0)
|
|
|
+ diff = 0;
|
|
|
+ if (first || diff < min_offset_diff)
|
|
|
+ min_offset_diff = diff;
|
|
|
|
|
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
rdev->data_offset << 9);
|
|
|
|
|
|
disk->head_position = 0;
|
|
|
}
|
|
|
+
|
|
|
/* need to check that every block has at least one working mirror */
|
|
|
if (!enough(conf, -1)) {
|
|
|
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
|
|
@@ -3340,8 +3514,21 @@ static int run(struct mddev *mddev)
|
|
|
goto out_free_conf;
|
|
|
}
|
|
|
|
|
|
+ if (conf->reshape_progress != MaxSector) {
|
|
|
+ /* must ensure that shape change is supported */
|
|
|
+ if (conf->geo.far_copies != 1 &&
|
|
|
+ conf->geo.far_offset == 0)
|
|
|
+ goto out_free_conf;
|
|
|
+ if (conf->prev.far_copies != 1 &&
|
|
|
+ conf->geo.far_offset == 0)
|
|
|
+ goto out_free_conf;
|
|
|
+ }
|
|
|
+
|
|
|
mddev->degraded = 0;
|
|
|
- for (i = 0; i < conf->raid_disks; i++) {
|
|
|
+ for (i = 0;
|
|
|
+ i < conf->geo.raid_disks
|
|
|
+ || i < conf->prev.raid_disks;
|
|
|
+ i++) {
|
|
|
|
|
|
disk = conf->mirrors + i;
|
|
|
|
|
@@ -3368,8 +3555,8 @@ static int run(struct mddev *mddev)
|
|
|
mdname(mddev));
|
|
|
printk(KERN_INFO
|
|
|
"md/raid10:%s: active with %d out of %d devices\n",
|
|
|
- mdname(mddev), conf->raid_disks - mddev->degraded,
|
|
|
- conf->raid_disks);
|
|
|
+ mdname(mddev), conf->geo.raid_disks - mddev->degraded,
|
|
|
+ conf->geo.raid_disks);
|
|
|
/*
|
|
|
* Ok, everything is just fine now
|
|
|
*/
|
|
@@ -3386,11 +3573,11 @@ static int run(struct mddev *mddev)
|
|
|
* maybe...
|
|
|
*/
|
|
|
{
|
|
|
- int stripe = conf->raid_disks *
|
|
|
+ int stripe = conf->geo.raid_disks *
|
|
|
((mddev->chunk_sectors << 9) / PAGE_SIZE);
|
|
|
- stripe /= conf->near_copies;
|
|
|
- if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
|
|
|
- mddev->queue->backing_dev_info.ra_pages = 2* stripe;
|
|
|
+ stripe /= conf->geo.near_copies;
|
|
|
+ if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
|
|
+ mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
|
|
}
|
|
|
|
|
|
blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
|
|
@@ -3398,6 +3585,30 @@ static int run(struct mddev *mddev)
|
|
|
if (md_integrity_register(mddev))
|
|
|
goto out_free_conf;
|
|
|
|
|
|
+ if (conf->reshape_progress != MaxSector) {
|
|
|
+ unsigned long before_length, after_length;
|
|
|
+
|
|
|
+ before_length = ((1 << conf->prev.chunk_shift) *
|
|
|
+ conf->prev.far_copies);
|
|
|
+ after_length = ((1 << conf->geo.chunk_shift) *
|
|
|
+ conf->geo.far_copies);
|
|
|
+
|
|
|
+ if (max(before_length, after_length) > min_offset_diff) {
|
|
|
+ /* This cannot work */
|
|
|
+ printk("md/raid10: offset difference not enough to continue reshape\n");
|
|
|
+ goto out_free_conf;
|
|
|
+ }
|
|
|
+ conf->offset_diff = min_offset_diff;
|
|
|
+
|
|
|
+ conf->reshape_safe = conf->reshape_progress;
|
|
|
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
|
|
|
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
|
|
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
|
|
|
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
|
|
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
|
|
|
+ "reshape");
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
out_free_conf:
|
|
@@ -3460,14 +3671,23 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
|
|
|
struct r10conf *conf = mddev->private;
|
|
|
sector_t oldsize, size;
|
|
|
|
|
|
- if (conf->far_copies > 1 && !conf->far_offset)
|
|
|
+ if (mddev->reshape_position != MaxSector)
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
|
|
|
return -EINVAL;
|
|
|
|
|
|
oldsize = raid10_size(mddev, 0, 0);
|
|
|
size = raid10_size(mddev, sectors, 0);
|
|
|
- md_set_array_sectors(mddev, size);
|
|
|
- if (mddev->array_sectors > size)
|
|
|
+ if (mddev->external_size &&
|
|
|
+ mddev->array_sectors > size)
|
|
|
return -EINVAL;
|
|
|
+ if (mddev->bitmap) {
|
|
|
+ int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ md_set_array_sectors(mddev, size);
|
|
|
set_capacity(mddev->gendisk, mddev->array_sectors);
|
|
|
revalidate_disk(mddev->gendisk);
|
|
|
if (sectors > mddev->dev_sectors &&
|
|
@@ -3534,6 +3754,758 @@ static void *raid10_takeover(struct mddev *mddev)
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
}
|
|
|
|
|
|
+static int raid10_check_reshape(struct mddev *mddev)
|
|
|
+{
|
|
|
+ /* Called when there is a request to change
|
|
|
+ * - layout (to ->new_layout)
|
|
|
+ * - chunk size (to ->new_chunk_sectors)
|
|
|
+ * - raid_disks (by delta_disks)
|
|
|
+ * or when trying to restart a reshape that was ongoing.
|
|
|
+ *
|
|
|
+ * We need to validate the request and possibly allocate
|
|
|
+ * space if that might be an issue later.
|
|
|
+ *
|
|
|
+ * Currently we reject any reshape of a 'far' mode array,
|
|
|
+ * allow chunk size to change if new is generally acceptable,
|
|
|
+ * allow raid_disks to increase, and allow
|
|
|
+ * a switch between 'near' mode and 'offset' mode.
|
|
|
+ */
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+ struct geom geo;
|
|
|
+
|
|
|
+ if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (setup_geo(&geo, mddev, geo_start) != conf->copies)
|
|
|
+ /* mustn't change number of copies */
|
|
|
+ return -EINVAL;
|
|
|
+ if (geo.far_copies > 1 && !geo.far_offset)
|
|
|
+ /* Cannot switch to 'far' mode */
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (mddev->array_sectors & geo.chunk_mask)
|
|
|
+ /* not factor of array size */
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!enough(conf, -1))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ kfree(conf->mirrors_new);
|
|
|
+ conf->mirrors_new = NULL;
|
|
|
+ if (mddev->delta_disks > 0) {
|
|
|
+ /* allocate new 'mirrors' list */
|
|
|
+ conf->mirrors_new = kzalloc(
|
|
|
+ sizeof(struct mirror_info)
|
|
|
+ *(mddev->raid_disks +
|
|
|
+ mddev->delta_disks),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!conf->mirrors_new)
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Need to check if array has failed when deciding whether to:
|
|
|
+ * - start an array
|
|
|
+ * - remove non-faulty devices
|
|
|
+ * - add a spare
|
|
|
+ * - allow a reshape
|
|
|
+ * This determination is simple when no reshape is happening.
|
|
|
+ * However if there is a reshape, we need to carefully check
|
|
|
+ * both the before and after sections.
|
|
|
+ * This is because some failed devices may only affect one
|
|
|
+ * of the two sections, and some non-in_sync devices may
|
|
|
+ * be insync in the section most affected by failed devices.
|
|
|
+ */
|
|
|
+static int calc_degraded(struct r10conf *conf)
|
|
|
+{
|
|
|
+ int degraded, degraded2;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ degraded = 0;
|
|
|
+ /* 'prev' section first */
|
|
|
+ for (i = 0; i < conf->prev.raid_disks; i++) {
|
|
|
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
+ if (!rdev || test_bit(Faulty, &rdev->flags))
|
|
|
+ degraded++;
|
|
|
+ else if (!test_bit(In_sync, &rdev->flags))
|
|
|
+ /* When we can reduce the number of devices in
|
|
|
+ * an array, this might not contribute to
|
|
|
+ * 'degraded'. It does now.
|
|
|
+ */
|
|
|
+ degraded++;
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ if (conf->geo.raid_disks == conf->prev.raid_disks)
|
|
|
+ return degraded;
|
|
|
+ rcu_read_lock();
|
|
|
+ degraded2 = 0;
|
|
|
+ for (i = 0; i < conf->geo.raid_disks; i++) {
|
|
|
+ struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
+ if (!rdev || test_bit(Faulty, &rdev->flags))
|
|
|
+ degraded2++;
|
|
|
+ else if (!test_bit(In_sync, &rdev->flags)) {
|
|
|
+ /* If reshape is increasing the number of devices,
|
|
|
+ * this section has already been recovered, so
|
|
|
+ * it doesn't contribute to degraded.
|
|
|
+ * else it does.
|
|
|
+ */
|
|
|
+ if (conf->geo.raid_disks <= conf->prev.raid_disks)
|
|
|
+ degraded2++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ rcu_read_unlock();
|
|
|
+ if (degraded2 > degraded)
|
|
|
+ return degraded2;
|
|
|
+ return degraded;
|
|
|
+}
|
|
|
+
|
|
|
+static int raid10_start_reshape(struct mddev *mddev)
|
|
|
+{
|
|
|
+ /* A 'reshape' has been requested. This commits
|
|
|
+ * the various 'new' fields and sets MD_RECOVER_RESHAPE
|
|
|
+ * This also checks if there are enough spares and adds them
|
|
|
+ * to the array.
|
|
|
+ * We currently require enough spares to make the final
|
|
|
+ * array non-degraded. We also require that the difference
|
|
|
+ * between old and new data_offset - on each device - is
|
|
|
+ * enough that we never risk over-writing.
|
|
|
+ */
|
|
|
+
|
|
|
+ unsigned long before_length, after_length;
|
|
|
+ sector_t min_offset_diff = 0;
|
|
|
+ int first = 1;
|
|
|
+ struct geom new;
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+ struct md_rdev *rdev;
|
|
|
+ int spares = 0;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ if (setup_geo(&new, mddev, geo_start) != conf->copies)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ before_length = ((1 << conf->prev.chunk_shift) *
|
|
|
+ conf->prev.far_copies);
|
|
|
+ after_length = ((1 << conf->geo.chunk_shift) *
|
|
|
+ conf->geo.far_copies);
|
|
|
+
|
|
|
+ rdev_for_each(rdev, mddev) {
|
|
|
+ if (!test_bit(In_sync, &rdev->flags)
|
|
|
+ && !test_bit(Faulty, &rdev->flags))
|
|
|
+ spares++;
|
|
|
+ if (rdev->raid_disk >= 0) {
|
|
|
+ long long diff = (rdev->new_data_offset
|
|
|
+ - rdev->data_offset);
|
|
|
+ if (!mddev->reshape_backwards)
|
|
|
+ diff = -diff;
|
|
|
+ if (diff < 0)
|
|
|
+ diff = 0;
|
|
|
+ if (first || diff < min_offset_diff)
|
|
|
+ min_offset_diff = diff;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (max(before_length, after_length) > min_offset_diff)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (spares < mddev->delta_disks)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ conf->offset_diff = min_offset_diff;
|
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
|
+ if (conf->mirrors_new) {
|
|
|
+ memcpy(conf->mirrors_new, conf->mirrors,
|
|
|
+ sizeof(struct mirror_info)*conf->prev.raid_disks);
|
|
|
+ smp_mb();
|
|
|
+ kfree(conf->mirrors_old); /* FIXME and elsewhere */
|
|
|
+ conf->mirrors_old = conf->mirrors;
|
|
|
+ conf->mirrors = conf->mirrors_new;
|
|
|
+ conf->mirrors_new = NULL;
|
|
|
+ }
|
|
|
+ setup_geo(&conf->geo, mddev, geo_start);
|
|
|
+ smp_mb();
|
|
|
+ if (mddev->reshape_backwards) {
|
|
|
+ sector_t size = raid10_size(mddev, 0, 0);
|
|
|
+ if (size < mddev->array_sectors) {
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+ printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
|
|
|
+ mdname(mddev));
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ mddev->resync_max_sectors = size;
|
|
|
+ conf->reshape_progress = size;
|
|
|
+ } else
|
|
|
+ conf->reshape_progress = 0;
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+
|
|
|
+ if (mddev->delta_disks && mddev->bitmap) {
|
|
|
+ ret = bitmap_resize(mddev->bitmap,
|
|
|
+ raid10_size(mddev, 0,
|
|
|
+ conf->geo.raid_disks),
|
|
|
+ 0, 0);
|
|
|
+ if (ret)
|
|
|
+ goto abort;
|
|
|
+ }
|
|
|
+ if (mddev->delta_disks > 0) {
|
|
|
+ rdev_for_each(rdev, mddev)
|
|
|
+ if (rdev->raid_disk < 0 &&
|
|
|
+ !test_bit(Faulty, &rdev->flags)) {
|
|
|
+ if (raid10_add_disk(mddev, rdev) == 0) {
|
|
|
+ if (rdev->raid_disk >=
|
|
|
+ conf->prev.raid_disks)
|
|
|
+ set_bit(In_sync, &rdev->flags);
|
|
|
+ else
|
|
|
+ rdev->recovery_offset = 0;
|
|
|
+
|
|
|
+ if (sysfs_link_rdev(mddev, rdev))
|
|
|
+ /* Failure here is OK */;
|
|
|
+ }
|
|
|
+ } else if (rdev->raid_disk >= conf->prev.raid_disks
|
|
|
+ && !test_bit(Faulty, &rdev->flags)) {
|
|
|
+ /* This is a spare that was manually added */
|
|
|
+ set_bit(In_sync, &rdev->flags);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* When a reshape changes the number of devices,
|
|
|
+ * ->degraded is measured against the larger of the
|
|
|
+ * pre and post numbers.
|
|
|
+ */
|
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
|
+ mddev->degraded = calc_degraded(conf);
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+ mddev->raid_disks = conf->geo.raid_disks;
|
|
|
+ mddev->reshape_position = conf->reshape_progress;
|
|
|
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
|
+
|
|
|
+ clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
|
|
|
+ clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
|
|
|
+ set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
|
|
|
+ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
|
|
|
+
|
|
|
+ mddev->sync_thread = md_register_thread(md_do_sync, mddev,
|
|
|
+ "reshape");
|
|
|
+ if (!mddev->sync_thread) {
|
|
|
+ ret = -EAGAIN;
|
|
|
+ goto abort;
|
|
|
+ }
|
|
|
+ conf->reshape_checkpoint = jiffies;
|
|
|
+ md_wakeup_thread(mddev->sync_thread);
|
|
|
+ md_new_event(mddev);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+abort:
|
|
|
+ mddev->recovery = 0;
|
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
|
+ conf->geo = conf->prev;
|
|
|
+ mddev->raid_disks = conf->geo.raid_disks;
|
|
|
+ rdev_for_each(rdev, mddev)
|
|
|
+ rdev->new_data_offset = rdev->data_offset;
|
|
|
+ smp_wmb();
|
|
|
+ conf->reshape_progress = MaxSector;
|
|
|
+ mddev->reshape_position = MaxSector;
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/* Calculate the last device-address that could contain
|
|
|
+ * any block from the chunk that includes the array-address 's'
|
|
|
+ * and report the next address.
|
|
|
+ * i.e. the address returned will be chunk-aligned and after
|
|
|
+ * any data that is in the chunk containing 's'.
|
|
|
+ */
|
|
|
+static sector_t last_dev_address(sector_t s, struct geom *geo)
|
|
|
+{
|
|
|
+ s = (s | geo->chunk_mask) + 1;
|
|
|
+ s >>= geo->chunk_shift;
|
|
|
+ s *= geo->near_copies;
|
|
|
+ s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
|
|
|
+ s *= geo->far_copies;
|
|
|
+ s <<= geo->chunk_shift;
|
|
|
+ return s;
|
|
|
+}
|
|
|
+
|
|
|
+/* Calculate the first device-address that could contain
|
|
|
+ * any block from the chunk that includes the array-address 's'.
|
|
|
+ * This too will be the start of a chunk
|
|
|
+ */
|
|
|
+static sector_t first_dev_address(sector_t s, struct geom *geo)
|
|
|
+{
|
|
|
+ s >>= geo->chunk_shift;
|
|
|
+ s *= geo->near_copies;
|
|
|
+ sector_div(s, geo->raid_disks);
|
|
|
+ s *= geo->far_copies;
|
|
|
+ s <<= geo->chunk_shift;
|
|
|
+ return s;
|
|
|
+}
|
|
|
+
|
|
|
+static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
|
|
|
+ int *skipped)
|
|
|
+{
|
|
|
+ /* We simply copy at most one chunk (smallest of old and new)
|
|
|
+ * at a time, possibly less if that exceeds RESYNC_PAGES,
|
|
|
+ * or we hit a bad block or something.
|
|
|
+ * This might mean we pause for normal IO in the middle of
|
|
|
+ * a chunk, but that is not a problem was mddev->reshape_position
|
|
|
+ * can record any location.
|
|
|
+ *
|
|
|
+ * If we will want to write to a location that isn't
|
|
|
+ * yet recorded as 'safe' (i.e. in metadata on disk) then
|
|
|
+ * we need to flush all reshape requests and update the metadata.
|
|
|
+ *
|
|
|
+ * When reshaping forwards (e.g. to more devices), we interpret
|
|
|
+ * 'safe' as the earliest block which might not have been copied
|
|
|
+ * down yet. We divide this by previous stripe size and multiply
|
|
|
+ * by previous stripe length to get lowest device offset that we
|
|
|
+ * cannot write to yet.
|
|
|
+ * We interpret 'sector_nr' as an address that we want to write to.
|
|
|
+ * From this we use last_device_address() to find where we might
|
|
|
+ * write to, and first_device_address on the 'safe' position.
|
|
|
+ * If this 'next' write position is after the 'safe' position,
|
|
|
+ * we must update the metadata to increase the 'safe' position.
|
|
|
+ *
|
|
|
+ * When reshaping backwards, we round in the opposite direction
|
|
|
+ * and perform the reverse test: next write position must not be
|
|
|
+ * less than current safe position.
|
|
|
+ *
|
|
|
+ * In all this the minimum difference in data offsets
|
|
|
+ * (conf->offset_diff - always positive) allows a bit of slack,
|
|
|
+ * so next can be after 'safe', but not by more than offset_disk
|
|
|
+ *
|
|
|
+ * We need to prepare all the bios here before we start any IO
|
|
|
+ * to ensure the size we choose is acceptable to all devices.
|
|
|
+ * The means one for each copy for write-out and an extra one for
|
|
|
+ * read-in.
|
|
|
+ * We store the read-in bio in ->master_bio and the others in
|
|
|
+ * ->devs[x].bio and ->devs[x].repl_bio.
|
|
|
+ */
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+ struct r10bio *r10_bio;
|
|
|
+ sector_t next, safe, last;
|
|
|
+ int max_sectors;
|
|
|
+ int nr_sectors;
|
|
|
+ int s;
|
|
|
+ struct md_rdev *rdev;
|
|
|
+ int need_flush = 0;
|
|
|
+ struct bio *blist;
|
|
|
+ struct bio *bio, *read_bio;
|
|
|
+ int sectors_done = 0;
|
|
|
+
|
|
|
+ if (sector_nr == 0) {
|
|
|
+ /* If restarting in the middle, skip the initial sectors */
|
|
|
+ if (mddev->reshape_backwards &&
|
|
|
+ conf->reshape_progress < raid10_size(mddev, 0, 0)) {
|
|
|
+ sector_nr = (raid10_size(mddev, 0, 0)
|
|
|
+ - conf->reshape_progress);
|
|
|
+ } else if (!mddev->reshape_backwards &&
|
|
|
+ conf->reshape_progress > 0)
|
|
|
+ sector_nr = conf->reshape_progress;
|
|
|
+ if (sector_nr) {
|
|
|
+ mddev->curr_resync_completed = sector_nr;
|
|
|
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
|
|
|
+ *skipped = 1;
|
|
|
+ return sector_nr;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We don't use sector_nr to track where we are up to
|
|
|
+ * as that doesn't work well for ->reshape_backwards.
|
|
|
+ * So just use ->reshape_progress.
|
|
|
+ */
|
|
|
+ if (mddev->reshape_backwards) {
|
|
|
+ /* 'next' is the earliest device address that we might
|
|
|
+ * write to for this chunk in the new layout
|
|
|
+ */
|
|
|
+ next = first_dev_address(conf->reshape_progress - 1,
|
|
|
+ &conf->geo);
|
|
|
+
|
|
|
+ /* 'safe' is the last device address that we might read from
|
|
|
+ * in the old layout after a restart
|
|
|
+ */
|
|
|
+ safe = last_dev_address(conf->reshape_safe - 1,
|
|
|
+ &conf->prev);
|
|
|
+
|
|
|
+ if (next + conf->offset_diff < safe)
|
|
|
+ need_flush = 1;
|
|
|
+
|
|
|
+ last = conf->reshape_progress - 1;
|
|
|
+ sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
|
|
|
+ & conf->prev.chunk_mask);
|
|
|
+ if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
|
|
|
+ sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
|
|
|
+ } else {
|
|
|
+ /* 'next' is after the last device address that we
|
|
|
+ * might write to for this chunk in the new layout
|
|
|
+ */
|
|
|
+ next = last_dev_address(conf->reshape_progress, &conf->geo);
|
|
|
+
|
|
|
+ /* 'safe' is the earliest device address that we might
|
|
|
+ * read from in the old layout after a restart
|
|
|
+ */
|
|
|
+ safe = first_dev_address(conf->reshape_safe, &conf->prev);
|
|
|
+
|
|
|
+ /* Need to update metadata if 'next' might be beyond 'safe'
|
|
|
+ * as that would possibly corrupt data
|
|
|
+ */
|
|
|
+ if (next > safe + conf->offset_diff)
|
|
|
+ need_flush = 1;
|
|
|
+
|
|
|
+ sector_nr = conf->reshape_progress;
|
|
|
+ last = sector_nr | (conf->geo.chunk_mask
|
|
|
+ & conf->prev.chunk_mask);
|
|
|
+
|
|
|
+ if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
|
|
|
+ last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (need_flush ||
|
|
|
+ time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
|
|
|
+ /* Need to update reshape_position in metadata */
|
|
|
+ wait_barrier(conf);
|
|
|
+ mddev->reshape_position = conf->reshape_progress;
|
|
|
+ if (mddev->reshape_backwards)
|
|
|
+ mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
|
|
|
+ - conf->reshape_progress;
|
|
|
+ else
|
|
|
+ mddev->curr_resync_completed = conf->reshape_progress;
|
|
|
+ conf->reshape_checkpoint = jiffies;
|
|
|
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
|
|
+ md_wakeup_thread(mddev->thread);
|
|
|
+ wait_event(mddev->sb_wait, mddev->flags == 0 ||
|
|
|
+ kthread_should_stop());
|
|
|
+ conf->reshape_safe = mddev->reshape_position;
|
|
|
+ allow_barrier(conf);
|
|
|
+ }
|
|
|
+
|
|
|
+read_more:
|
|
|
+ /* Now schedule reads for blocks from sector_nr to last */
|
|
|
+ r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
|
|
|
+ raise_barrier(conf, sectors_done != 0);
|
|
|
+ atomic_set(&r10_bio->remaining, 0);
|
|
|
+ r10_bio->mddev = mddev;
|
|
|
+ r10_bio->sector = sector_nr;
|
|
|
+ set_bit(R10BIO_IsReshape, &r10_bio->state);
|
|
|
+ r10_bio->sectors = last - sector_nr + 1;
|
|
|
+ rdev = read_balance(conf, r10_bio, &max_sectors);
|
|
|
+ BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
|
|
|
+
|
|
|
+ if (!rdev) {
|
|
|
+ /* Cannot read from here, so need to record bad blocks
|
|
|
+ * on all the target devices.
|
|
|
+ */
|
|
|
+ // FIXME
|
|
|
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
|
|
+ return sectors_done;
|
|
|
+ }
|
|
|
+
|
|
|
+ read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
|
|
|
+
|
|
|
+ read_bio->bi_bdev = rdev->bdev;
|
|
|
+ read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
|
|
|
+ + rdev->data_offset);
|
|
|
+ read_bio->bi_private = r10_bio;
|
|
|
+ read_bio->bi_end_io = end_sync_read;
|
|
|
+ read_bio->bi_rw = READ;
|
|
|
+ read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
|
|
+ read_bio->bi_flags |= 1 << BIO_UPTODATE;
|
|
|
+ read_bio->bi_vcnt = 0;
|
|
|
+ read_bio->bi_idx = 0;
|
|
|
+ read_bio->bi_size = 0;
|
|
|
+ r10_bio->master_bio = read_bio;
|
|
|
+ r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
|
|
|
+
|
|
|
+ /* Now find the locations in the new layout */
|
|
|
+ __raid10_find_phys(&conf->geo, r10_bio);
|
|
|
+
|
|
|
+ blist = read_bio;
|
|
|
+ read_bio->bi_next = NULL;
|
|
|
+
|
|
|
+ for (s = 0; s < conf->copies*2; s++) {
|
|
|
+ struct bio *b;
|
|
|
+ int d = r10_bio->devs[s/2].devnum;
|
|
|
+ struct md_rdev *rdev2;
|
|
|
+ if (s&1) {
|
|
|
+ rdev2 = conf->mirrors[d].replacement;
|
|
|
+ b = r10_bio->devs[s/2].repl_bio;
|
|
|
+ } else {
|
|
|
+ rdev2 = conf->mirrors[d].rdev;
|
|
|
+ b = r10_bio->devs[s/2].bio;
|
|
|
+ }
|
|
|
+ if (!rdev2 || test_bit(Faulty, &rdev2->flags))
|
|
|
+ continue;
|
|
|
+ b->bi_bdev = rdev2->bdev;
|
|
|
+ b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
|
|
|
+ b->bi_private = r10_bio;
|
|
|
+ b->bi_end_io = end_reshape_write;
|
|
|
+ b->bi_rw = WRITE;
|
|
|
+ b->bi_flags &= ~(BIO_POOL_MASK - 1);
|
|
|
+ b->bi_flags |= 1 << BIO_UPTODATE;
|
|
|
+ b->bi_next = blist;
|
|
|
+ b->bi_vcnt = 0;
|
|
|
+ b->bi_idx = 0;
|
|
|
+ b->bi_size = 0;
|
|
|
+ blist = b;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Now add as many pages as possible to all of these bios. */
|
|
|
+
|
|
|
+ nr_sectors = 0;
|
|
|
+ for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
|
|
|
+ struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
|
|
|
+ int len = (max_sectors - s) << 9;
|
|
|
+ if (len > PAGE_SIZE)
|
|
|
+ len = PAGE_SIZE;
|
|
|
+ for (bio = blist; bio ; bio = bio->bi_next) {
|
|
|
+ struct bio *bio2;
|
|
|
+ if (bio_add_page(bio, page, len, 0))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Didn't fit, must stop */
|
|
|
+ for (bio2 = blist;
|
|
|
+ bio2 && bio2 != bio;
|
|
|
+ bio2 = bio2->bi_next) {
|
|
|
+ /* Remove last page from this bio */
|
|
|
+ bio2->bi_vcnt--;
|
|
|
+ bio2->bi_size -= len;
|
|
|
+ bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
|
|
|
+ }
|
|
|
+ goto bio_full;
|
|
|
+ }
|
|
|
+ sector_nr += len >> 9;
|
|
|
+ nr_sectors += len >> 9;
|
|
|
+ }
|
|
|
+bio_full:
|
|
|
+ r10_bio->sectors = nr_sectors;
|
|
|
+
|
|
|
+ /* Now submit the read */
|
|
|
+ md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
|
|
|
+ atomic_inc(&r10_bio->remaining);
|
|
|
+ read_bio->bi_next = NULL;
|
|
|
+ generic_make_request(read_bio);
|
|
|
+ sector_nr += nr_sectors;
|
|
|
+ sectors_done += nr_sectors;
|
|
|
+ if (sector_nr <= last)
|
|
|
+ goto read_more;
|
|
|
+
|
|
|
+ /* Now that we have done the whole section we can
|
|
|
+ * update reshape_progress
|
|
|
+ */
|
|
|
+ if (mddev->reshape_backwards)
|
|
|
+ conf->reshape_progress -= sectors_done;
|
|
|
+ else
|
|
|
+ conf->reshape_progress += sectors_done;
|
|
|
+
|
|
|
+ return sectors_done;
|
|
|
+}
|
|
|
+
|
|
|
+static void end_reshape_request(struct r10bio *r10_bio);
|
|
|
+static int handle_reshape_read_error(struct mddev *mddev,
|
|
|
+ struct r10bio *r10_bio);
|
|
|
+static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
|
|
+{
|
|
|
+ /* Reshape read completed. Hopefully we have a block
|
|
|
+ * to write out.
|
|
|
+ * If we got a read error then we do sync 1-page reads from
|
|
|
+ * elsewhere until we find the data - or give up.
|
|
|
+ */
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+ int s;
|
|
|
+
|
|
|
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
|
|
|
+ if (handle_reshape_read_error(mddev, r10_bio) < 0) {
|
|
|
+ /* Reshape has been aborted */
|
|
|
+ md_done_sync(mddev, r10_bio->sectors, 0);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We definitely have the data in the pages, schedule the
|
|
|
+ * writes.
|
|
|
+ */
|
|
|
+ atomic_set(&r10_bio->remaining, 1);
|
|
|
+ for (s = 0; s < conf->copies*2; s++) {
|
|
|
+ struct bio *b;
|
|
|
+ int d = r10_bio->devs[s/2].devnum;
|
|
|
+ struct md_rdev *rdev;
|
|
|
+ if (s&1) {
|
|
|
+ rdev = conf->mirrors[d].replacement;
|
|
|
+ b = r10_bio->devs[s/2].repl_bio;
|
|
|
+ } else {
|
|
|
+ rdev = conf->mirrors[d].rdev;
|
|
|
+ b = r10_bio->devs[s/2].bio;
|
|
|
+ }
|
|
|
+ if (!rdev || test_bit(Faulty, &rdev->flags))
|
|
|
+ continue;
|
|
|
+ atomic_inc(&rdev->nr_pending);
|
|
|
+ md_sync_acct(b->bi_bdev, r10_bio->sectors);
|
|
|
+ atomic_inc(&r10_bio->remaining);
|
|
|
+ b->bi_next = NULL;
|
|
|
+ generic_make_request(b);
|
|
|
+ }
|
|
|
+ end_reshape_request(r10_bio);
|
|
|
+}
|
|
|
+
|
|
|
+static void end_reshape(struct r10conf *conf)
|
|
|
+{
|
|
|
+ if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
|
|
|
+ return;
|
|
|
+
|
|
|
+ spin_lock_irq(&conf->device_lock);
|
|
|
+ conf->prev = conf->geo;
|
|
|
+ md_finish_reshape(conf->mddev);
|
|
|
+ smp_wmb();
|
|
|
+ conf->reshape_progress = MaxSector;
|
|
|
+ spin_unlock_irq(&conf->device_lock);
|
|
|
+
|
|
|
+ /* read-ahead size must cover two whole stripes, which is
|
|
|
+ * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
|
|
|
+ */
|
|
|
+ if (conf->mddev->queue) {
|
|
|
+ int stripe = conf->geo.raid_disks *
|
|
|
+ ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
|
|
|
+ stripe /= conf->geo.near_copies;
|
|
|
+ if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
|
|
+ conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
|
|
+ }
|
|
|
+ conf->fullsync = 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static int handle_reshape_read_error(struct mddev *mddev,
|
|
|
+ struct r10bio *r10_bio)
|
|
|
+{
|
|
|
+ /* Use sync reads to get the blocks from somewhere else */
|
|
|
+ int sectors = r10_bio->sectors;
|
|
|
+ struct r10bio r10b;
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+ int slot = 0;
|
|
|
+ int idx = 0;
|
|
|
+ struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
|
|
|
+
|
|
|
+ r10b.sector = r10_bio->sector;
|
|
|
+ __raid10_find_phys(&conf->prev, &r10b);
|
|
|
+
|
|
|
+ while (sectors) {
|
|
|
+ int s = sectors;
|
|
|
+ int success = 0;
|
|
|
+ int first_slot = slot;
|
|
|
+
|
|
|
+ if (s > (PAGE_SIZE >> 9))
|
|
|
+ s = PAGE_SIZE >> 9;
|
|
|
+
|
|
|
+ while (!success) {
|
|
|
+ int d = r10b.devs[slot].devnum;
|
|
|
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
|
|
|
+ sector_t addr;
|
|
|
+ if (rdev == NULL ||
|
|
|
+ test_bit(Faulty, &rdev->flags) ||
|
|
|
+ !test_bit(In_sync, &rdev->flags))
|
|
|
+ goto failed;
|
|
|
+
|
|
|
+ addr = r10b.devs[slot].addr + idx * PAGE_SIZE;
|
|
|
+ success = sync_page_io(rdev,
|
|
|
+ addr,
|
|
|
+ s << 9,
|
|
|
+ bvec[idx].bv_page,
|
|
|
+ READ, false);
|
|
|
+ if (success)
|
|
|
+ break;
|
|
|
+ failed:
|
|
|
+ slot++;
|
|
|
+ if (slot >= conf->copies)
|
|
|
+ slot = 0;
|
|
|
+ if (slot == first_slot)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (!success) {
|
|
|
+ /* couldn't read this block, must give up */
|
|
|
+ set_bit(MD_RECOVERY_INTR,
|
|
|
+ &mddev->recovery);
|
|
|
+ return -EIO;
|
|
|
+ }
|
|
|
+ sectors -= s;
|
|
|
+ idx++;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void end_reshape_write(struct bio *bio, int error)
|
|
|
+{
|
|
|
+ int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
+ struct r10bio *r10_bio = bio->bi_private;
|
|
|
+ struct mddev *mddev = r10_bio->mddev;
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+ int d;
|
|
|
+ int slot;
|
|
|
+ int repl;
|
|
|
+ struct md_rdev *rdev = NULL;
|
|
|
+
|
|
|
+ d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
|
|
|
+ if (repl)
|
|
|
+ rdev = conf->mirrors[d].replacement;
|
|
|
+ if (!rdev) {
|
|
|
+ smp_mb();
|
|
|
+ rdev = conf->mirrors[d].rdev;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!uptodate) {
|
|
|
+ /* FIXME should record badblock */
|
|
|
+ md_error(mddev, rdev);
|
|
|
+ }
|
|
|
+
|
|
|
+ rdev_dec_pending(rdev, mddev);
|
|
|
+ end_reshape_request(r10_bio);
|
|
|
+}
|
|
|
+
|
|
|
+static void end_reshape_request(struct r10bio *r10_bio)
|
|
|
+{
|
|
|
+ if (!atomic_dec_and_test(&r10_bio->remaining))
|
|
|
+ return;
|
|
|
+ md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
|
|
|
+ bio_put(r10_bio->master_bio);
|
|
|
+ put_buf(r10_bio);
|
|
|
+}
|
|
|
+
|
|
|
+static void raid10_finish_reshape(struct mddev *mddev)
|
|
|
+{
|
|
|
+ struct r10conf *conf = mddev->private;
|
|
|
+
|
|
|
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (mddev->delta_disks > 0) {
|
|
|
+ sector_t size = raid10_size(mddev, 0, 0);
|
|
|
+ md_set_array_sectors(mddev, size);
|
|
|
+ if (mddev->recovery_cp > mddev->resync_max_sectors) {
|
|
|
+ mddev->recovery_cp = mddev->resync_max_sectors;
|
|
|
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
+ }
|
|
|
+ mddev->resync_max_sectors = size;
|
|
|
+ set_capacity(mddev->gendisk, mddev->array_sectors);
|
|
|
+ revalidate_disk(mddev->gendisk);
|
|
|
+ } else {
|
|
|
+ int d;
|
|
|
+ for (d = conf->geo.raid_disks ;
|
|
|
+ d < conf->geo.raid_disks - mddev->delta_disks;
|
|
|
+ d++) {
|
|
|
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
|
|
|
+ if (rdev)
|
|
|
+ clear_bit(In_sync, &rdev->flags);
|
|
|
+ rdev = conf->mirrors[d].replacement;
|
|
|
+ if (rdev)
|
|
|
+ clear_bit(In_sync, &rdev->flags);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ mddev->layout = mddev->new_layout;
|
|
|
+ mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
|
|
|
+ mddev->reshape_position = MaxSector;
|
|
|
+ mddev->delta_disks = 0;
|
|
|
+ mddev->reshape_backwards = 0;
|
|
|
+}
|
|
|
+
|
|
|
static struct md_personality raid10_personality =
|
|
|
{
|
|
|
.name = "raid10",
|
|
@@ -3552,6 +4524,9 @@ static struct md_personality raid10_personality =
|
|
|
.size = raid10_size,
|
|
|
.resize = raid10_resize,
|
|
|
.takeover = raid10_takeover,
|
|
|
+ .check_reshape = raid10_check_reshape,
|
|
|
+ .start_reshape = raid10_start_reshape,
|
|
|
+ .finish_reshape = raid10_finish_reshape,
|
|
|
};
|
|
|
|
|
|
static int __init raid_init(void)
|