Przeglądaj źródła

md/raid5: switch to use conf->chunk_sectors in place of mddev->chunk_sectors where possible

The chunk_sectors and new_chunk_sectors fields of mddev can be changed
any time (via sysfs) that the reconfig mutex can be taken.  So raid5
keeps internal copies in 'conf' which are stable except for a short
locked moment when reshape stops/starts.

So any access that does not hold reconfig_mutex should use the 'conf'
values, not the 'mddev' values.
Several don't.

This could result in corruption if new values were written at awkward
times.

Also use min() or max() rather than open-coding.

Signed-off-by: NeilBrown <neilb@suse.com>
NeilBrown 10 lat temu
rodzic
commit
3cb5edf454
1 zmienionych plików z 14 dodań i 14 usunięć
  1. 14 14
      drivers/md/raid5.c

+ 14 - 14
drivers/md/raid5.c

@@ -4676,9 +4676,10 @@ static int raid5_mergeable_bvec(struct mddev *mddev,
 				struct bvec_merge_data *bvm,
 				struct bvec_merge_data *bvm,
 				struct bio_vec *biovec)
 				struct bio_vec *biovec)
 {
 {
+	struct r5conf *conf = mddev->private;
 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 	int max;
 	int max;
-	unsigned int chunk_sectors = mddev->chunk_sectors;
+	unsigned int chunk_sectors;
 	unsigned int bio_sectors = bvm->bi_size >> 9;
 	unsigned int bio_sectors = bvm->bi_size >> 9;
 
 
 	/*
 	/*
@@ -4688,8 +4689,7 @@ static int raid5_mergeable_bvec(struct mddev *mddev,
 	if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
 	if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
 		return biovec->bv_len;
 		return biovec->bv_len;
 
 
-	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
-		chunk_sectors = mddev->new_chunk_sectors;
+	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
 	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
 	if (max < 0) max = 0;
 	if (max < 0) max = 0;
 	if (max <= biovec->bv_len && bio_sectors == 0)
 	if (max <= biovec->bv_len && bio_sectors == 0)
@@ -4700,12 +4700,12 @@ static int raid5_mergeable_bvec(struct mddev *mddev,
 
 
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
 {
+	struct r5conf *conf = mddev->private;
 	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
 	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
-	unsigned int chunk_sectors = mddev->chunk_sectors;
+	unsigned int chunk_sectors;
 	unsigned int bio_sectors = bio_sectors(bio);
 	unsigned int bio_sectors = bio_sectors(bio);
 
 
-	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
-		chunk_sectors = mddev->new_chunk_sectors;
+	chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
 	return  chunk_sectors >=
 	return  chunk_sectors >=
 		((sector & (chunk_sectors - 1)) + bio_sectors);
 		((sector & (chunk_sectors - 1)) + bio_sectors);
 }
 }
@@ -5372,10 +5372,8 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk
 	 * If old and new chunk sizes differ, we need to process the
 	 * If old and new chunk sizes differ, we need to process the
 	 * largest of these
 	 * largest of these
 	 */
 	 */
-	if (mddev->new_chunk_sectors > mddev->chunk_sectors)
-		reshape_sectors = mddev->new_chunk_sectors;
-	else
-		reshape_sectors = mddev->chunk_sectors;
+
+	reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
 
 
 	/* We update the metadata at least every 10 seconds, or when
 	/* We update the metadata at least every 10 seconds, or when
 	 * the data about to be copied would over-write the source of
 	 * the data about to be copied would over-write the source of
@@ -6260,8 +6258,8 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 		/* size is defined by the smallest of previous and new size */
 		/* size is defined by the smallest of previous and new size */
 		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
 		raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
 
 
-	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
-	sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
+	sectors &= ~((sector_t)conf->chunk_sectors - 1);
+	sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
 	return sectors * (raid_disks - conf->max_degraded);
 	return sectors * (raid_disks - conf->max_degraded);
 }
 }
 
 
@@ -6996,7 +6994,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
 	int i;
 	int i;
 
 
 	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
 	seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
-		mddev->chunk_sectors / 2, mddev->layout);
+		conf->chunk_sectors / 2, mddev->layout);
 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
 	seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
 	for (i = 0; i < conf->raid_disks; i++)
 	for (i = 0; i < conf->raid_disks; i++)
 		seq_printf (seq, "%s",
 		seq_printf (seq, "%s",
@@ -7202,7 +7200,9 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
 	 * worth it.
 	 * worth it.
 	 */
 	 */
 	sector_t newsize;
 	sector_t newsize;
-	sectors &= ~((sector_t)mddev->chunk_sectors - 1);
+	struct r5conf *conf = mddev->private;
+
+	sectors &= ~((sector_t)conf->chunk_sectors - 1);
 	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
 	newsize = raid5_size(mddev, sectors, mddev->raid_disks);
 	if (mddev->external_size &&
 	if (mddev->external_size &&
 	    mddev->array_sectors > newsize)
 	    mddev->array_sectors > newsize)