|
|
@@ -29,9 +29,6 @@
|
|
|
*/
|
|
|
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
|
|
|
|
|
|
-/* Global list of all raid sets */
|
|
|
-static LIST_HEAD(raid_sets);
|
|
|
-
|
|
|
static bool devices_handle_discard_safely = false;
|
|
|
|
|
|
/*
|
|
|
@@ -227,7 +224,6 @@ struct rs_layout {
|
|
|
|
|
|
struct raid_set {
|
|
|
struct dm_target *ti;
|
|
|
- struct list_head list;
|
|
|
|
|
|
uint32_t stripe_cache_entries;
|
|
|
unsigned long ctr_flags;
|
|
|
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
|
|
|
mddev->new_chunk_sectors = l->new_chunk_sectors;
|
|
|
}
|
|
|
|
|
|
-/* Find any raid_set in active slot for @rs on global list */
|
|
|
-static struct raid_set *rs_find_active(struct raid_set *rs)
|
|
|
-{
|
|
|
- struct raid_set *r;
|
|
|
- struct mapped_device *md = dm_table_get_md(rs->ti->table);
|
|
|
-
|
|
|
- list_for_each_entry(r, &raid_sets, list)
|
|
|
- if (r != rs && dm_table_get_md(r->ti->table) == md)
|
|
|
- return r;
|
|
|
-
|
|
|
- return NULL;
|
|
|
-}
|
|
|
-
|
|
|
/* raid10 algorithms (i.e. formats) */
|
|
|
#define ALGORITHM_RAID10_DEFAULT 0
|
|
|
#define ALGORITHM_RAID10_NEAR 1
|
|
|
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
|
|
|
|
|
|
mddev_init(&rs->md);
|
|
|
|
|
|
- INIT_LIST_HEAD(&rs->list);
|
|
|
rs->raid_disks = raid_devs;
|
|
|
rs->delta_disks = 0;
|
|
|
|
|
|
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
|
|
|
for (i = 0; i < raid_devs; i++)
|
|
|
md_rdev_init(&rs->dev[i].rdev);
|
|
|
|
|
|
- /* Add @rs to global list. */
|
|
|
- list_add(&rs->list, &raid_sets);
|
|
|
-
|
|
|
/*
|
|
|
* Remaining items to be initialized by further RAID params:
|
|
|
* rs->md.persistent
|
|
|
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
|
|
|
return rs;
|
|
|
}
|
|
|
|
|
|
-/* Free all @rs allocations and remove it from global list. */
|
|
|
+/* Free all @rs allocations */
|
|
|
static void raid_set_free(struct raid_set *rs)
|
|
|
{
|
|
|
int i;
|
|
|
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
|
|
|
dm_put_device(rs->ti, rs->dev[i].data_dev);
|
|
|
}
|
|
|
|
|
|
- list_del(&rs->list);
|
|
|
-
|
|
|
kfree(rs);
|
|
|
}
|
|
|
|
|
|
@@ -3947,29 +3924,6 @@ static int raid_preresume(struct dm_target *ti)
|
|
|
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
|
|
|
return 0;
|
|
|
|
|
|
- if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
|
|
|
- struct raid_set *rs_active = rs_find_active(rs);
|
|
|
-
|
|
|
- if (rs_active) {
|
|
|
- /*
|
|
|
- * In case no rebuilds have been requested
|
|
|
- * and an active table slot exists, copy
|
|
|
- * current resynchonization completed and
|
|
|
- * reshape position pointers across from
|
|
|
- * suspended raid set in the active slot.
|
|
|
- *
|
|
|
- * This resumes the new mapping at current
|
|
|
- * offsets to continue recover/reshape without
|
|
|
- * necessarily redoing a raid set partially or
|
|
|
- * causing data corruption in case of a reshape.
|
|
|
- */
|
|
|
- if (rs_active->md.curr_resync_completed != MaxSector)
|
|
|
- mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
|
|
|
- if (rs_active->md.reshape_position != MaxSector)
|
|
|
- mddev->reshape_position = rs_active->md.reshape_position;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* The superblocks need to be updated on disk if the
|
|
|
* array is new or new devices got added (thus zeroed
|