raid0.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. raid0.c : Multiple Devices driver for Linux
  3. Copyright (C) 1994-96 Marc ZYNGIER
  4. <zyngier@ufr-info-p7.ibp.fr> or
  5. <maz@gloups.fdn.fr>
  6. Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
  7. RAID-0 management functions.
  8. This program is free software; you can redistribute it and/or modify
  9. it under the terms of the GNU General Public License as published by
  10. the Free Software Foundation; either version 2, or (at your option)
  11. any later version.
  12. You should have received a copy of the GNU General Public License
  13. (for example /usr/src/linux/COPYING); if not, write to the Free
  14. Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  15. */
  16. #include <linux/blkdev.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/module.h>
  19. #include <linux/slab.h>
  20. #include <trace/events/block.h>
  21. #include "md.h"
  22. #include "raid0.h"
  23. #include "raid5.h"
  24. #define UNSUPPORTED_MDDEV_FLAGS \
  25. ((1L << MD_HAS_JOURNAL) | \
  26. (1L << MD_JOURNAL_CLEAN) | \
  27. (1L << MD_FAILFAST_SUPPORTED))
  28. static int raid0_congested(struct mddev *mddev, int bits)
  29. {
  30. struct r0conf *conf = mddev->private;
  31. struct md_rdev **devlist = conf->devlist;
  32. int raid_disks = conf->strip_zone[0].nb_dev;
  33. int i, ret = 0;
  34. for (i = 0; i < raid_disks && !ret ; i++) {
  35. struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
  36. ret |= bdi_congested(&q->backing_dev_info, bits);
  37. }
  38. return ret;
  39. }
  40. /*
  41. * inform the user of the raid configuration
  42. */
  43. static void dump_zones(struct mddev *mddev)
  44. {
  45. int j, k;
  46. sector_t zone_size = 0;
  47. sector_t zone_start = 0;
  48. char b[BDEVNAME_SIZE];
  49. struct r0conf *conf = mddev->private;
  50. int raid_disks = conf->strip_zone[0].nb_dev;
  51. pr_debug("md: RAID0 configuration for %s - %d zone%s\n",
  52. mdname(mddev),
  53. conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
  54. for (j = 0; j < conf->nr_strip_zones; j++) {
  55. char line[200];
  56. int len = 0;
  57. for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
  58. len += snprintf(line+len, 200-len, "%s%s", k?"/":"",
  59. bdevname(conf->devlist[j*raid_disks
  60. + k]->bdev, b));
  61. pr_debug("md: zone%d=[%s]\n", j, line);
  62. zone_size = conf->strip_zone[j].zone_end - zone_start;
  63. pr_debug(" zone-offset=%10lluKB, device-offset=%10lluKB, size=%10lluKB\n",
  64. (unsigned long long)zone_start>>1,
  65. (unsigned long long)conf->strip_zone[j].dev_start>>1,
  66. (unsigned long long)zone_size>>1);
  67. zone_start = conf->strip_zone[j].zone_end;
  68. }
  69. }
  70. static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
  71. {
  72. int i, c, err;
  73. sector_t curr_zone_end, sectors;
  74. struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
  75. struct strip_zone *zone;
  76. int cnt;
  77. char b[BDEVNAME_SIZE];
  78. char b2[BDEVNAME_SIZE];
  79. struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
  80. unsigned short blksize = 512;
  81. *private_conf = ERR_PTR(-ENOMEM);
  82. if (!conf)
  83. return -ENOMEM;
  84. rdev_for_each(rdev1, mddev) {
  85. pr_debug("md/raid0:%s: looking at %s\n",
  86. mdname(mddev),
  87. bdevname(rdev1->bdev, b));
  88. c = 0;
  89. /* round size to chunk_size */
  90. sectors = rdev1->sectors;
  91. sector_div(sectors, mddev->chunk_sectors);
  92. rdev1->sectors = sectors * mddev->chunk_sectors;
  93. blksize = max(blksize, queue_logical_block_size(
  94. rdev1->bdev->bd_disk->queue));
  95. rdev_for_each(rdev2, mddev) {
  96. pr_debug("md/raid0:%s: comparing %s(%llu)"
  97. " with %s(%llu)\n",
  98. mdname(mddev),
  99. bdevname(rdev1->bdev,b),
  100. (unsigned long long)rdev1->sectors,
  101. bdevname(rdev2->bdev,b2),
  102. (unsigned long long)rdev2->sectors);
  103. if (rdev2 == rdev1) {
  104. pr_debug("md/raid0:%s: END\n",
  105. mdname(mddev));
  106. break;
  107. }
  108. if (rdev2->sectors == rdev1->sectors) {
  109. /*
  110. * Not unique, don't count it as a new
  111. * group
  112. */
  113. pr_debug("md/raid0:%s: EQUAL\n",
  114. mdname(mddev));
  115. c = 1;
  116. break;
  117. }
  118. pr_debug("md/raid0:%s: NOT EQUAL\n",
  119. mdname(mddev));
  120. }
  121. if (!c) {
  122. pr_debug("md/raid0:%s: ==> UNIQUE\n",
  123. mdname(mddev));
  124. conf->nr_strip_zones++;
  125. pr_debug("md/raid0:%s: %d zones\n",
  126. mdname(mddev), conf->nr_strip_zones);
  127. }
  128. }
  129. pr_debug("md/raid0:%s: FINAL %d zones\n",
  130. mdname(mddev), conf->nr_strip_zones);
  131. /*
  132. * now since we have the hard sector sizes, we can make sure
  133. * chunk size is a multiple of that sector size
  134. */
  135. if ((mddev->chunk_sectors << 9) % blksize) {
  136. pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
  137. mdname(mddev),
  138. mddev->chunk_sectors << 9, blksize);
  139. err = -EINVAL;
  140. goto abort;
  141. }
  142. err = -ENOMEM;
  143. conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
  144. conf->nr_strip_zones, GFP_KERNEL);
  145. if (!conf->strip_zone)
  146. goto abort;
  147. conf->devlist = kzalloc(sizeof(struct md_rdev*)*
  148. conf->nr_strip_zones*mddev->raid_disks,
  149. GFP_KERNEL);
  150. if (!conf->devlist)
  151. goto abort;
  152. /* The first zone must contain all devices, so here we check that
  153. * there is a proper alignment of slots to devices and find them all
  154. */
  155. zone = &conf->strip_zone[0];
  156. cnt = 0;
  157. smallest = NULL;
  158. dev = conf->devlist;
  159. err = -EINVAL;
  160. rdev_for_each(rdev1, mddev) {
  161. int j = rdev1->raid_disk;
  162. if (mddev->level == 10) {
  163. /* taking over a raid10-n2 array */
  164. j /= 2;
  165. rdev1->new_raid_disk = j;
  166. }
  167. if (mddev->level == 1) {
  168. /* taiking over a raid1 array-
  169. * we have only one active disk
  170. */
  171. j = 0;
  172. rdev1->new_raid_disk = j;
  173. }
  174. if (j < 0) {
  175. pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
  176. mdname(mddev));
  177. goto abort;
  178. }
  179. if (j >= mddev->raid_disks) {
  180. pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
  181. mdname(mddev), j);
  182. goto abort;
  183. }
  184. if (dev[j]) {
  185. pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
  186. mdname(mddev), j);
  187. goto abort;
  188. }
  189. dev[j] = rdev1;
  190. if (!smallest || (rdev1->sectors < smallest->sectors))
  191. smallest = rdev1;
  192. cnt++;
  193. }
  194. if (cnt != mddev->raid_disks) {
  195. pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
  196. mdname(mddev), cnt, mddev->raid_disks);
  197. goto abort;
  198. }
  199. zone->nb_dev = cnt;
  200. zone->zone_end = smallest->sectors * cnt;
  201. curr_zone_end = zone->zone_end;
  202. /* now do the other zones */
  203. for (i = 1; i < conf->nr_strip_zones; i++)
  204. {
  205. int j;
  206. zone = conf->strip_zone + i;
  207. dev = conf->devlist + i * mddev->raid_disks;
  208. pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
  209. zone->dev_start = smallest->sectors;
  210. smallest = NULL;
  211. c = 0;
  212. for (j=0; j<cnt; j++) {
  213. rdev = conf->devlist[j];
  214. if (rdev->sectors <= zone->dev_start) {
  215. pr_debug("md/raid0:%s: checking %s ... nope\n",
  216. mdname(mddev),
  217. bdevname(rdev->bdev, b));
  218. continue;
  219. }
  220. pr_debug("md/raid0:%s: checking %s ..."
  221. " contained as device %d\n",
  222. mdname(mddev),
  223. bdevname(rdev->bdev, b), c);
  224. dev[c] = rdev;
  225. c++;
  226. if (!smallest || rdev->sectors < smallest->sectors) {
  227. smallest = rdev;
  228. pr_debug("md/raid0:%s: (%llu) is smallest!.\n",
  229. mdname(mddev),
  230. (unsigned long long)rdev->sectors);
  231. }
  232. }
  233. zone->nb_dev = c;
  234. sectors = (smallest->sectors - zone->dev_start) * c;
  235. pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
  236. mdname(mddev),
  237. zone->nb_dev, (unsigned long long)sectors);
  238. curr_zone_end += sectors;
  239. zone->zone_end = curr_zone_end;
  240. pr_debug("md/raid0:%s: current zone start: %llu\n",
  241. mdname(mddev),
  242. (unsigned long long)smallest->sectors);
  243. }
  244. pr_debug("md/raid0:%s: done.\n", mdname(mddev));
  245. *private_conf = conf;
  246. return 0;
  247. abort:
  248. kfree(conf->strip_zone);
  249. kfree(conf->devlist);
  250. kfree(conf);
  251. *private_conf = ERR_PTR(err);
  252. return err;
  253. }
  254. /* Find the zone which holds a particular offset
  255. * Update *sectorp to be an offset in that zone
  256. */
  257. static struct strip_zone *find_zone(struct r0conf *conf,
  258. sector_t *sectorp)
  259. {
  260. int i;
  261. struct strip_zone *z = conf->strip_zone;
  262. sector_t sector = *sectorp;
  263. for (i = 0; i < conf->nr_strip_zones; i++)
  264. if (sector < z[i].zone_end) {
  265. if (i)
  266. *sectorp = sector - z[i-1].zone_end;
  267. return z + i;
  268. }
  269. BUG();
  270. }
  271. /*
  272. * remaps the bio to the target device. we separate two flows.
  273. * power 2 flow and a general flow for the sake of performance
  274. */
  275. static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
  276. sector_t sector, sector_t *sector_offset)
  277. {
  278. unsigned int sect_in_chunk;
  279. sector_t chunk;
  280. struct r0conf *conf = mddev->private;
  281. int raid_disks = conf->strip_zone[0].nb_dev;
  282. unsigned int chunk_sects = mddev->chunk_sectors;
  283. if (is_power_of_2(chunk_sects)) {
  284. int chunksect_bits = ffz(~chunk_sects);
  285. /* find the sector offset inside the chunk */
  286. sect_in_chunk = sector & (chunk_sects - 1);
  287. sector >>= chunksect_bits;
  288. /* chunk in zone */
  289. chunk = *sector_offset;
  290. /* quotient is the chunk in real device*/
  291. sector_div(chunk, zone->nb_dev << chunksect_bits);
  292. } else{
  293. sect_in_chunk = sector_div(sector, chunk_sects);
  294. chunk = *sector_offset;
  295. sector_div(chunk, chunk_sects * zone->nb_dev);
  296. }
  297. /*
  298. * position the bio over the real device
  299. * real sector = chunk in device + starting of zone
  300. * + the position in the chunk
  301. */
  302. *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
  303. return conf->devlist[(zone - conf->strip_zone)*raid_disks
  304. + sector_div(sector, zone->nb_dev)];
  305. }
  306. static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  307. {
  308. sector_t array_sectors = 0;
  309. struct md_rdev *rdev;
  310. WARN_ONCE(sectors || raid_disks,
  311. "%s does not support generic reshape\n", __func__);
  312. rdev_for_each(rdev, mddev)
  313. array_sectors += (rdev->sectors &
  314. ~(sector_t)(mddev->chunk_sectors-1));
  315. return array_sectors;
  316. }
  317. static void raid0_free(struct mddev *mddev, void *priv);
  318. static int raid0_run(struct mddev *mddev)
  319. {
  320. struct r0conf *conf;
  321. int ret;
  322. if (mddev->chunk_sectors == 0) {
  323. pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
  324. return -EINVAL;
  325. }
  326. if (md_check_no_bitmap(mddev))
  327. return -EINVAL;
  328. /* if private is not null, we are here after takeover */
  329. if (mddev->private == NULL) {
  330. ret = create_strip_zones(mddev, &conf);
  331. if (ret < 0)
  332. return ret;
  333. mddev->private = conf;
  334. }
  335. conf = mddev->private;
  336. if (mddev->queue) {
  337. struct md_rdev *rdev;
  338. bool discard_supported = false;
  339. blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
  340. blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
  341. blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
  342. blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
  343. blk_queue_io_opt(mddev->queue,
  344. (mddev->chunk_sectors << 9) * mddev->raid_disks);
  345. rdev_for_each(rdev, mddev) {
  346. disk_stack_limits(mddev->gendisk, rdev->bdev,
  347. rdev->data_offset << 9);
  348. if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
  349. discard_supported = true;
  350. }
  351. if (!discard_supported)
  352. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  353. else
  354. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  355. }
  356. /* calculate array device size */
  357. md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
  358. pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
  359. mdname(mddev),
  360. (unsigned long long)mddev->array_sectors);
  361. if (mddev->queue) {
  362. /* calculate the max read-ahead size.
  363. * For read-ahead of large files to be effective, we need to
  364. * readahead at least twice a whole stripe. i.e. number of devices
  365. * multiplied by chunk size times 2.
  366. * If an individual device has an ra_pages greater than the
  367. * chunk size, then we will not drive that device as hard as it
  368. * wants. We consider this a configuration error: a larger
  369. * chunksize should be used in that case.
  370. */
  371. int stripe = mddev->raid_disks *
  372. (mddev->chunk_sectors << 9) / PAGE_SIZE;
  373. if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
  374. mddev->queue->backing_dev_info.ra_pages = 2* stripe;
  375. }
  376. dump_zones(mddev);
  377. ret = md_integrity_register(mddev);
  378. return ret;
  379. }
  380. static void raid0_free(struct mddev *mddev, void *priv)
  381. {
  382. struct r0conf *conf = priv;
  383. kfree(conf->strip_zone);
  384. kfree(conf->devlist);
  385. kfree(conf);
  386. }
  387. /*
  388. * Is io distribute over 1 or more chunks ?
  389. */
  390. static inline int is_io_in_chunk_boundary(struct mddev *mddev,
  391. unsigned int chunk_sects, struct bio *bio)
  392. {
  393. if (likely(is_power_of_2(chunk_sects))) {
  394. return chunk_sects >=
  395. ((bio->bi_iter.bi_sector & (chunk_sects-1))
  396. + bio_sectors(bio));
  397. } else{
  398. sector_t sector = bio->bi_iter.bi_sector;
  399. return chunk_sects >= (sector_div(sector, chunk_sects)
  400. + bio_sectors(bio));
  401. }
  402. }
  403. static void raid0_make_request(struct mddev *mddev, struct bio *bio)
  404. {
  405. struct strip_zone *zone;
  406. struct md_rdev *tmp_dev;
  407. struct bio *split;
  408. if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
  409. md_flush_request(mddev, bio);
  410. return;
  411. }
  412. do {
  413. sector_t bio_sector = bio->bi_iter.bi_sector;
  414. sector_t sector = bio_sector;
  415. unsigned chunk_sects = mddev->chunk_sectors;
  416. unsigned sectors = chunk_sects -
  417. (likely(is_power_of_2(chunk_sects))
  418. ? (sector & (chunk_sects-1))
  419. : sector_div(sector, chunk_sects));
  420. /* Restore due to sector_div */
  421. sector = bio_sector;
  422. if (sectors < bio_sectors(bio)) {
  423. split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
  424. bio_chain(split, bio);
  425. } else {
  426. split = bio;
  427. }
  428. zone = find_zone(mddev->private, &sector);
  429. tmp_dev = map_sector(mddev, zone, sector, &sector);
  430. split->bi_bdev = tmp_dev->bdev;
  431. split->bi_iter.bi_sector = sector + zone->dev_start +
  432. tmp_dev->data_offset;
  433. if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
  434. !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
  435. /* Just ignore it */
  436. bio_endio(split);
  437. } else {
  438. if (mddev->gendisk)
  439. trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
  440. split, disk_devt(mddev->gendisk),
  441. bio_sector);
  442. generic_make_request(split);
  443. }
  444. } while (split != bio);
  445. }
  446. static void raid0_status(struct seq_file *seq, struct mddev *mddev)
  447. {
  448. seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
  449. return;
  450. }
  451. static void *raid0_takeover_raid45(struct mddev *mddev)
  452. {
  453. struct md_rdev *rdev;
  454. struct r0conf *priv_conf;
  455. if (mddev->degraded != 1) {
  456. pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
  457. mdname(mddev),
  458. mddev->degraded);
  459. return ERR_PTR(-EINVAL);
  460. }
  461. rdev_for_each(rdev, mddev) {
  462. /* check slot number for a disk */
  463. if (rdev->raid_disk == mddev->raid_disks-1) {
  464. pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
  465. mdname(mddev));
  466. return ERR_PTR(-EINVAL);
  467. }
  468. rdev->sectors = mddev->dev_sectors;
  469. }
  470. /* Set new parameters */
  471. mddev->new_level = 0;
  472. mddev->new_layout = 0;
  473. mddev->new_chunk_sectors = mddev->chunk_sectors;
  474. mddev->raid_disks--;
  475. mddev->delta_disks = -1;
  476. /* make sure it will be not marked as dirty */
  477. mddev->recovery_cp = MaxSector;
  478. mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
  479. create_strip_zones(mddev, &priv_conf);
  480. return priv_conf;
  481. }
  482. static void *raid0_takeover_raid10(struct mddev *mddev)
  483. {
  484. struct r0conf *priv_conf;
  485. /* Check layout:
  486. * - far_copies must be 1
  487. * - near_copies must be 2
  488. * - disks number must be even
  489. * - all mirrors must be already degraded
  490. */
  491. if (mddev->layout != ((1 << 8) + 2)) {
  492. pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
  493. mdname(mddev),
  494. mddev->layout);
  495. return ERR_PTR(-EINVAL);
  496. }
  497. if (mddev->raid_disks & 1) {
  498. pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
  499. mdname(mddev));
  500. return ERR_PTR(-EINVAL);
  501. }
  502. if (mddev->degraded != (mddev->raid_disks>>1)) {
  503. pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
  504. mdname(mddev));
  505. return ERR_PTR(-EINVAL);
  506. }
  507. /* Set new parameters */
  508. mddev->new_level = 0;
  509. mddev->new_layout = 0;
  510. mddev->new_chunk_sectors = mddev->chunk_sectors;
  511. mddev->delta_disks = - mddev->raid_disks / 2;
  512. mddev->raid_disks += mddev->delta_disks;
  513. mddev->degraded = 0;
  514. /* make sure it will be not marked as dirty */
  515. mddev->recovery_cp = MaxSector;
  516. mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
  517. create_strip_zones(mddev, &priv_conf);
  518. return priv_conf;
  519. }
  520. static void *raid0_takeover_raid1(struct mddev *mddev)
  521. {
  522. struct r0conf *priv_conf;
  523. int chunksect;
  524. /* Check layout:
  525. * - (N - 1) mirror drives must be already faulty
  526. */
  527. if ((mddev->raid_disks - 1) != mddev->degraded) {
  528. pr_err("md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
  529. mdname(mddev));
  530. return ERR_PTR(-EINVAL);
  531. }
  532. /*
  533. * a raid1 doesn't have the notion of chunk size, so
  534. * figure out the largest suitable size we can use.
  535. */
  536. chunksect = 64 * 2; /* 64K by default */
  537. /* The array must be an exact multiple of chunksize */
  538. while (chunksect && (mddev->array_sectors & (chunksect - 1)))
  539. chunksect >>= 1;
  540. if ((chunksect << 9) < PAGE_SIZE)
  541. /* array size does not allow a suitable chunk size */
  542. return ERR_PTR(-EINVAL);
  543. /* Set new parameters */
  544. mddev->new_level = 0;
  545. mddev->new_layout = 0;
  546. mddev->new_chunk_sectors = chunksect;
  547. mddev->chunk_sectors = chunksect;
  548. mddev->delta_disks = 1 - mddev->raid_disks;
  549. mddev->raid_disks = 1;
  550. /* make sure it will be not marked as dirty */
  551. mddev->recovery_cp = MaxSector;
  552. mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
  553. create_strip_zones(mddev, &priv_conf);
  554. return priv_conf;
  555. }
  556. static void *raid0_takeover(struct mddev *mddev)
  557. {
  558. /* raid0 can take over:
  559. * raid4 - if all data disks are active.
  560. * raid5 - providing it is Raid4 layout and one disk is faulty
  561. * raid10 - assuming we have all necessary active disks
  562. * raid1 - with (N -1) mirror drives faulty
  563. */
  564. if (mddev->bitmap) {
  565. pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
  566. mdname(mddev));
  567. return ERR_PTR(-EBUSY);
  568. }
  569. if (mddev->level == 4)
  570. return raid0_takeover_raid45(mddev);
  571. if (mddev->level == 5) {
  572. if (mddev->layout == ALGORITHM_PARITY_N)
  573. return raid0_takeover_raid45(mddev);
  574. pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
  575. mdname(mddev), ALGORITHM_PARITY_N);
  576. }
  577. if (mddev->level == 10)
  578. return raid0_takeover_raid10(mddev);
  579. if (mddev->level == 1)
  580. return raid0_takeover_raid1(mddev);
  581. pr_warn("Takeover from raid%i to raid0 not supported\n",
  582. mddev->level);
  583. return ERR_PTR(-EINVAL);
  584. }
  585. static void raid0_quiesce(struct mddev *mddev, int state)
  586. {
  587. }
  588. static struct md_personality raid0_personality=
  589. {
  590. .name = "raid0",
  591. .level = 0,
  592. .owner = THIS_MODULE,
  593. .make_request = raid0_make_request,
  594. .run = raid0_run,
  595. .free = raid0_free,
  596. .status = raid0_status,
  597. .size = raid0_size,
  598. .takeover = raid0_takeover,
  599. .quiesce = raid0_quiesce,
  600. .congested = raid0_congested,
  601. };
  602. static int __init raid0_init (void)
  603. {
  604. return register_md_personality (&raid0_personality);
  605. }
  606. static void raid0_exit (void)
  607. {
  608. unregister_md_personality (&raid0_personality);
  609. }
  610. module_init(raid0_init);
  611. module_exit(raid0_exit);
  612. MODULE_LICENSE("GPL");
  613. MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
  614. MODULE_ALIAS("md-personality-2"); /* RAID0 */
  615. MODULE_ALIAS("md-raid0");
  616. MODULE_ALIAS("md-level-0");