|
@@ -5879,6 +5879,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
|
|
|
raid5_show_stripe_cache_size,
|
|
|
raid5_store_stripe_cache_size);
|
|
|
|
|
|
+static ssize_t
|
|
|
+raid5_show_rmw_level(struct mddev *mddev, char *page)
|
|
|
+{
|
|
|
+ struct r5conf *conf = mddev->private;
|
|
|
+ if (conf)
|
|
|
+ return sprintf(page, "%d\n", conf->rmw_level);
|
|
|
+ else
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t
|
|
|
+raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
|
|
|
+{
|
|
|
+ struct r5conf *conf = mddev->private;
|
|
|
+ unsigned long new;
|
|
|
+
|
|
|
+ if (!conf)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ if (len >= PAGE_SIZE)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (kstrtoul(page, 10, &new))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (new != PARITY_DISABLE_RMW &&
|
|
|
+ new != PARITY_ENABLE_RMW &&
|
|
|
+ new != PARITY_PREFER_RMW)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ conf->rmw_level = new;
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
+static struct md_sysfs_entry
|
|
|
+raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
|
|
|
+ raid5_show_rmw_level,
|
|
|
+ raid5_store_rmw_level);
|
|
|
+
|
|
|
+
|
|
|
static ssize_t
|
|
|
raid5_show_preread_threshold(struct mddev *mddev, char *page)
|
|
|
{
|
|
@@ -6065,6 +6108,7 @@ static struct attribute *raid5_attrs[] = {
|
|
|
&raid5_preread_bypass_threshold.attr,
|
|
|
&raid5_group_thread_cnt.attr,
|
|
|
&raid5_skip_copy.attr,
|
|
|
+ &raid5_rmw_level.attr,
|
|
|
NULL,
|
|
|
};
|
|
|
static struct attribute_group raid5_attrs_group = {
|