raid1.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210
  1. /*
  2. * raid1.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
  5. *
  6. * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  7. *
  8. * RAID-1 management functions.
  9. *
  10. * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11. *
  12. * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13. * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14. *
  15. * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16. * bitmapped intelligence in resync:
  17. *
  18. * - bitmap marked during normal i/o
  19. * - bitmap used to skip nondirty blocks during sync
  20. *
  21. * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22. * - persistent bitmap code
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License as published by
  26. * the Free Software Foundation; either version 2, or (at your option)
  27. * any later version.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * (for example /usr/src/linux/COPYING); if not, write to the Free
  31. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32. */
  33. #include <linux/slab.h>
  34. #include <linux/delay.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/module.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/ratelimit.h>
  39. #include "md.h"
  40. #include "raid1.h"
  41. #include "bitmap.h"
  42. /*
  43. * Number of guaranteed r1bios in case of extreme VM load:
  44. */
  45. #define NR_RAID1_BIOS 256
  46. /* when we get a read error on a read-only array, we redirect to another
  47. * device without failing the first device, or trying to over-write to
  48. * correct the read error. To keep track of bad blocks on a per-bio
  49. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  50. */
  51. #define IO_BLOCKED ((struct bio *)1)
  52. /* When we successfully write to a known bad-block, we need to remove the
  53. * bad-block marking which must be done from process context. So we record
  54. * the success by setting devs[n].bio to IO_MADE_GOOD
  55. */
  56. #define IO_MADE_GOOD ((struct bio *)2)
  57. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  58. /* When there are this many requests queue to be written by
  59. * the raid1 thread, we become 'congested' to provide back-pressure
  60. * for writeback.
  61. */
  62. static int max_queued_requests = 1024;
  63. static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  64. sector_t bi_sector);
  65. static void lower_barrier(struct r1conf *conf);
  66. static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  67. {
  68. struct pool_info *pi = data;
  69. int size = offsetof(struct r1bio, bios[pi->raid_disks]);
  70. /* allocate a r1bio with room for raid_disks entries in the bios array */
  71. return kzalloc(size, gfp_flags);
  72. }
  73. static void r1bio_pool_free(void *r1_bio, void *data)
  74. {
  75. kfree(r1_bio);
  76. }
  77. #define RESYNC_BLOCK_SIZE (64*1024)
  78. #define RESYNC_DEPTH 32
  79. #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  80. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  81. #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
  82. #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
  83. #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
  84. static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  85. {
  86. struct pool_info *pi = data;
  87. struct r1bio *r1_bio;
  88. struct bio *bio;
  89. int i, j;
  90. r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  91. if (!r1_bio)
  92. return NULL;
  93. /*
  94. * Allocate bios : 1 for reading, n-1 for writing
  95. */
  96. for (j = pi->raid_disks ; j-- ; ) {
  97. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  98. if (!bio)
  99. goto out_free_bio;
  100. r1_bio->bios[j] = bio;
  101. }
  102. /*
  103. * Allocate RESYNC_PAGES data pages and attach them to
  104. * the first bio.
  105. * If this is a user-requested check/repair, allocate
  106. * RESYNC_PAGES for each bio.
  107. */
  108. if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
  109. j = pi->raid_disks;
  110. else
  111. j = 1;
  112. while(j--) {
  113. bio = r1_bio->bios[j];
  114. bio->bi_vcnt = RESYNC_PAGES;
  115. if (bio_alloc_pages(bio, gfp_flags))
  116. goto out_free_bio;
  117. }
  118. /* If not user-requests, copy the page pointers to all bios */
  119. if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
  120. for (i=0; i<RESYNC_PAGES ; i++)
  121. for (j=1; j<pi->raid_disks; j++)
  122. r1_bio->bios[j]->bi_io_vec[i].bv_page =
  123. r1_bio->bios[0]->bi_io_vec[i].bv_page;
  124. }
  125. r1_bio->master_bio = NULL;
  126. return r1_bio;
  127. out_free_bio:
  128. while (++j < pi->raid_disks)
  129. bio_put(r1_bio->bios[j]);
  130. r1bio_pool_free(r1_bio, data);
  131. return NULL;
  132. }
  133. static void r1buf_pool_free(void *__r1_bio, void *data)
  134. {
  135. struct pool_info *pi = data;
  136. int i,j;
  137. struct r1bio *r1bio = __r1_bio;
  138. for (i = 0; i < RESYNC_PAGES; i++)
  139. for (j = pi->raid_disks; j-- ;) {
  140. if (j == 0 ||
  141. r1bio->bios[j]->bi_io_vec[i].bv_page !=
  142. r1bio->bios[0]->bi_io_vec[i].bv_page)
  143. safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
  144. }
  145. for (i=0 ; i < pi->raid_disks; i++)
  146. bio_put(r1bio->bios[i]);
  147. r1bio_pool_free(r1bio, data);
  148. }
  149. static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
  150. {
  151. int i;
  152. for (i = 0; i < conf->raid_disks * 2; i++) {
  153. struct bio **bio = r1_bio->bios + i;
  154. if (!BIO_SPECIAL(*bio))
  155. bio_put(*bio);
  156. *bio = NULL;
  157. }
  158. }
  159. static void free_r1bio(struct r1bio *r1_bio)
  160. {
  161. struct r1conf *conf = r1_bio->mddev->private;
  162. put_all_bios(conf, r1_bio);
  163. mempool_free(r1_bio, conf->r1bio_pool);
  164. }
  165. static void put_buf(struct r1bio *r1_bio)
  166. {
  167. struct r1conf *conf = r1_bio->mddev->private;
  168. int i;
  169. for (i = 0; i < conf->raid_disks * 2; i++) {
  170. struct bio *bio = r1_bio->bios[i];
  171. if (bio->bi_end_io)
  172. rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
  173. }
  174. mempool_free(r1_bio, conf->r1buf_pool);
  175. lower_barrier(conf);
  176. }
  177. static void reschedule_retry(struct r1bio *r1_bio)
  178. {
  179. unsigned long flags;
  180. struct mddev *mddev = r1_bio->mddev;
  181. struct r1conf *conf = mddev->private;
  182. spin_lock_irqsave(&conf->device_lock, flags);
  183. list_add(&r1_bio->retry_list, &conf->retry_list);
  184. conf->nr_queued ++;
  185. spin_unlock_irqrestore(&conf->device_lock, flags);
  186. wake_up(&conf->wait_barrier);
  187. md_wakeup_thread(mddev->thread);
  188. }
  189. /*
  190. * raid_end_bio_io() is called when we have finished servicing a mirrored
  191. * operation and are ready to return a success/failure code to the buffer
  192. * cache layer.
  193. */
  194. static void call_bio_endio(struct r1bio *r1_bio)
  195. {
  196. struct bio *bio = r1_bio->master_bio;
  197. int done;
  198. struct r1conf *conf = r1_bio->mddev->private;
  199. sector_t start_next_window = r1_bio->start_next_window;
  200. sector_t bi_sector = bio->bi_iter.bi_sector;
  201. if (bio->bi_phys_segments) {
  202. unsigned long flags;
  203. spin_lock_irqsave(&conf->device_lock, flags);
  204. bio->bi_phys_segments--;
  205. done = (bio->bi_phys_segments == 0);
  206. spin_unlock_irqrestore(&conf->device_lock, flags);
  207. /*
  208. * make_request() might be waiting for
  209. * bi_phys_segments to decrease
  210. */
  211. wake_up(&conf->wait_barrier);
  212. } else
  213. done = 1;
  214. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  215. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  216. if (done) {
  217. bio_endio(bio, 0);
  218. /*
  219. * Wake up any possible resync thread that waits for the device
  220. * to go idle.
  221. */
  222. allow_barrier(conf, start_next_window, bi_sector);
  223. }
  224. }
  225. static void raid_end_bio_io(struct r1bio *r1_bio)
  226. {
  227. struct bio *bio = r1_bio->master_bio;
  228. /* if nobody has done the final endio yet, do it now */
  229. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  230. pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
  231. (bio_data_dir(bio) == WRITE) ? "write" : "read",
  232. (unsigned long long) bio->bi_iter.bi_sector,
  233. (unsigned long long) bio_end_sector(bio) - 1);
  234. call_bio_endio(r1_bio);
  235. }
  236. free_r1bio(r1_bio);
  237. }
  238. /*
  239. * Update disk head position estimator based on IRQ completion info.
  240. */
  241. static inline void update_head_pos(int disk, struct r1bio *r1_bio)
  242. {
  243. struct r1conf *conf = r1_bio->mddev->private;
  244. conf->mirrors[disk].head_position =
  245. r1_bio->sector + (r1_bio->sectors);
  246. }
  247. /*
  248. * Find the disk number which triggered given bio
  249. */
  250. static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
  251. {
  252. int mirror;
  253. struct r1conf *conf = r1_bio->mddev->private;
  254. int raid_disks = conf->raid_disks;
  255. for (mirror = 0; mirror < raid_disks * 2; mirror++)
  256. if (r1_bio->bios[mirror] == bio)
  257. break;
  258. BUG_ON(mirror == raid_disks * 2);
  259. update_head_pos(mirror, r1_bio);
  260. return mirror;
  261. }
  262. static void raid1_end_read_request(struct bio *bio, int error)
  263. {
  264. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  265. struct r1bio *r1_bio = bio->bi_private;
  266. int mirror;
  267. struct r1conf *conf = r1_bio->mddev->private;
  268. mirror = r1_bio->read_disk;
  269. /*
  270. * this branch is our 'one mirror IO has finished' event handler:
  271. */
  272. update_head_pos(mirror, r1_bio);
  273. if (uptodate)
  274. set_bit(R1BIO_Uptodate, &r1_bio->state);
  275. else {
  276. /* If all other devices have failed, we want to return
  277. * the error upwards rather than fail the last device.
  278. * Here we redefine "uptodate" to mean "Don't want to retry"
  279. */
  280. unsigned long flags;
  281. spin_lock_irqsave(&conf->device_lock, flags);
  282. if (r1_bio->mddev->degraded == conf->raid_disks ||
  283. (r1_bio->mddev->degraded == conf->raid_disks-1 &&
  284. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
  285. uptodate = 1;
  286. spin_unlock_irqrestore(&conf->device_lock, flags);
  287. }
  288. if (uptodate) {
  289. raid_end_bio_io(r1_bio);
  290. rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
  291. } else {
  292. /*
  293. * oops, read error:
  294. */
  295. char b[BDEVNAME_SIZE];
  296. printk_ratelimited(
  297. KERN_ERR "md/raid1:%s: %s: "
  298. "rescheduling sector %llu\n",
  299. mdname(conf->mddev),
  300. bdevname(conf->mirrors[mirror].rdev->bdev,
  301. b),
  302. (unsigned long long)r1_bio->sector);
  303. set_bit(R1BIO_ReadError, &r1_bio->state);
  304. reschedule_retry(r1_bio);
  305. /* don't drop the reference on read_disk yet */
  306. }
  307. }
  308. static void close_write(struct r1bio *r1_bio)
  309. {
  310. /* it really is the end of this request */
  311. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  312. /* free extra copy of the data pages */
  313. int i = r1_bio->behind_page_count;
  314. while (i--)
  315. safe_put_page(r1_bio->behind_bvecs[i].bv_page);
  316. kfree(r1_bio->behind_bvecs);
  317. r1_bio->behind_bvecs = NULL;
  318. }
  319. /* clear the bitmap if all writes complete successfully */
  320. bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
  321. r1_bio->sectors,
  322. !test_bit(R1BIO_Degraded, &r1_bio->state),
  323. test_bit(R1BIO_BehindIO, &r1_bio->state));
  324. md_write_end(r1_bio->mddev);
  325. }
  326. static void r1_bio_write_done(struct r1bio *r1_bio)
  327. {
  328. if (!atomic_dec_and_test(&r1_bio->remaining))
  329. return;
  330. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  331. reschedule_retry(r1_bio);
  332. else {
  333. close_write(r1_bio);
  334. if (test_bit(R1BIO_MadeGood, &r1_bio->state))
  335. reschedule_retry(r1_bio);
  336. else
  337. raid_end_bio_io(r1_bio);
  338. }
  339. }
  340. static void raid1_end_write_request(struct bio *bio, int error)
  341. {
  342. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  343. struct r1bio *r1_bio = bio->bi_private;
  344. int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
  345. struct r1conf *conf = r1_bio->mddev->private;
  346. struct bio *to_put = NULL;
  347. mirror = find_bio_disk(r1_bio, bio);
  348. /*
  349. * 'one mirror IO has finished' event handler:
  350. */
  351. if (!uptodate) {
  352. set_bit(WriteErrorSeen,
  353. &conf->mirrors[mirror].rdev->flags);
  354. if (!test_and_set_bit(WantReplacement,
  355. &conf->mirrors[mirror].rdev->flags))
  356. set_bit(MD_RECOVERY_NEEDED, &
  357. conf->mddev->recovery);
  358. set_bit(R1BIO_WriteError, &r1_bio->state);
  359. } else {
  360. /*
  361. * Set R1BIO_Uptodate in our master bio, so that we
  362. * will return a good error code for to the higher
  363. * levels even if IO on some other mirrored buffer
  364. * fails.
  365. *
  366. * The 'master' represents the composite IO operation
  367. * to user-side. So if something waits for IO, then it
  368. * will wait for the 'master' bio.
  369. */
  370. sector_t first_bad;
  371. int bad_sectors;
  372. r1_bio->bios[mirror] = NULL;
  373. to_put = bio;
  374. /*
  375. * Do not set R1BIO_Uptodate if the current device is
  376. * rebuilding or Faulty. This is because we cannot use
  377. * such device for properly reading the data back (we could
  378. * potentially use it, if the current write would have felt
  379. * before rdev->recovery_offset, but for simplicity we don't
  380. * check this here.
  381. */
  382. if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
  383. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
  384. set_bit(R1BIO_Uptodate, &r1_bio->state);
  385. /* Maybe we can clear some bad blocks. */
  386. if (is_badblock(conf->mirrors[mirror].rdev,
  387. r1_bio->sector, r1_bio->sectors,
  388. &first_bad, &bad_sectors)) {
  389. r1_bio->bios[mirror] = IO_MADE_GOOD;
  390. set_bit(R1BIO_MadeGood, &r1_bio->state);
  391. }
  392. }
  393. if (behind) {
  394. if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
  395. atomic_dec(&r1_bio->behind_remaining);
  396. /*
  397. * In behind mode, we ACK the master bio once the I/O
  398. * has safely reached all non-writemostly
  399. * disks. Setting the Returned bit ensures that this
  400. * gets done only once -- we don't ever want to return
  401. * -EIO here, instead we'll wait
  402. */
  403. if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
  404. test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  405. /* Maybe we can return now */
  406. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  407. struct bio *mbio = r1_bio->master_bio;
  408. pr_debug("raid1: behind end write sectors"
  409. " %llu-%llu\n",
  410. (unsigned long long) mbio->bi_iter.bi_sector,
  411. (unsigned long long) bio_end_sector(mbio) - 1);
  412. call_bio_endio(r1_bio);
  413. }
  414. }
  415. }
  416. if (r1_bio->bios[mirror] == NULL)
  417. rdev_dec_pending(conf->mirrors[mirror].rdev,
  418. conf->mddev);
  419. /*
  420. * Let's see if all mirrored write operations have finished
  421. * already.
  422. */
  423. r1_bio_write_done(r1_bio);
  424. if (to_put)
  425. bio_put(to_put);
  426. }
  427. /*
  428. * This routine returns the disk from which the requested read should
  429. * be done. There is a per-array 'next expected sequential IO' sector
  430. * number - if this matches on the next IO then we use the last disk.
  431. * There is also a per-disk 'last know head position' sector that is
  432. * maintained from IRQ contexts, both the normal and the resync IO
  433. * completion handlers update this position correctly. If there is no
  434. * perfect sequential match then we pick the disk whose head is closest.
  435. *
  436. * If there are 2 mirrors in the same 2 devices, performance degrades
  437. * because position is mirror, not device based.
  438. *
  439. * The rdev for the device selected will have nr_pending incremented.
  440. */
  441. static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
  442. {
  443. const sector_t this_sector = r1_bio->sector;
  444. int sectors;
  445. int best_good_sectors;
  446. int best_disk, best_dist_disk, best_pending_disk;
  447. int has_nonrot_disk;
  448. int disk;
  449. sector_t best_dist;
  450. unsigned int min_pending;
  451. struct md_rdev *rdev;
  452. int choose_first;
  453. int choose_next_idle;
  454. rcu_read_lock();
  455. /*
  456. * Check if we can balance. We can balance on the whole
  457. * device if no resync is going on, or below the resync window.
  458. * We take the first readable disk when above the resync window.
  459. */
  460. retry:
  461. sectors = r1_bio->sectors;
  462. best_disk = -1;
  463. best_dist_disk = -1;
  464. best_dist = MaxSector;
  465. best_pending_disk = -1;
  466. min_pending = UINT_MAX;
  467. best_good_sectors = 0;
  468. has_nonrot_disk = 0;
  469. choose_next_idle = 0;
  470. if (conf->mddev->recovery_cp < MaxSector &&
  471. (this_sector + sectors >= conf->next_resync))
  472. choose_first = 1;
  473. else
  474. choose_first = 0;
  475. for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
  476. sector_t dist;
  477. sector_t first_bad;
  478. int bad_sectors;
  479. unsigned int pending;
  480. bool nonrot;
  481. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  482. if (r1_bio->bios[disk] == IO_BLOCKED
  483. || rdev == NULL
  484. || test_bit(Unmerged, &rdev->flags)
  485. || test_bit(Faulty, &rdev->flags))
  486. continue;
  487. if (!test_bit(In_sync, &rdev->flags) &&
  488. rdev->recovery_offset < this_sector + sectors)
  489. continue;
  490. if (test_bit(WriteMostly, &rdev->flags)) {
  491. /* Don't balance among write-mostly, just
  492. * use the first as a last resort */
  493. if (best_disk < 0) {
  494. if (is_badblock(rdev, this_sector, sectors,
  495. &first_bad, &bad_sectors)) {
  496. if (first_bad < this_sector)
  497. /* Cannot use this */
  498. continue;
  499. best_good_sectors = first_bad - this_sector;
  500. } else
  501. best_good_sectors = sectors;
  502. best_disk = disk;
  503. }
  504. continue;
  505. }
  506. /* This is a reasonable device to use. It might
  507. * even be best.
  508. */
  509. if (is_badblock(rdev, this_sector, sectors,
  510. &first_bad, &bad_sectors)) {
  511. if (best_dist < MaxSector)
  512. /* already have a better device */
  513. continue;
  514. if (first_bad <= this_sector) {
  515. /* cannot read here. If this is the 'primary'
  516. * device, then we must not read beyond
  517. * bad_sectors from another device..
  518. */
  519. bad_sectors -= (this_sector - first_bad);
  520. if (choose_first && sectors > bad_sectors)
  521. sectors = bad_sectors;
  522. if (best_good_sectors > sectors)
  523. best_good_sectors = sectors;
  524. } else {
  525. sector_t good_sectors = first_bad - this_sector;
  526. if (good_sectors > best_good_sectors) {
  527. best_good_sectors = good_sectors;
  528. best_disk = disk;
  529. }
  530. if (choose_first)
  531. break;
  532. }
  533. continue;
  534. } else
  535. best_good_sectors = sectors;
  536. nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
  537. has_nonrot_disk |= nonrot;
  538. pending = atomic_read(&rdev->nr_pending);
  539. dist = abs(this_sector - conf->mirrors[disk].head_position);
  540. if (choose_first) {
  541. best_disk = disk;
  542. break;
  543. }
  544. /* Don't change to another disk for sequential reads */
  545. if (conf->mirrors[disk].next_seq_sect == this_sector
  546. || dist == 0) {
  547. int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
  548. struct raid1_info *mirror = &conf->mirrors[disk];
  549. best_disk = disk;
  550. /*
  551. * If buffered sequential IO size exceeds optimal
  552. * iosize, check if there is idle disk. If yes, choose
  553. * the idle disk. read_balance could already choose an
  554. * idle disk before noticing it's a sequential IO in
  555. * this disk. This doesn't matter because this disk
  556. * will idle, next time it will be utilized after the
  557. * first disk has IO size exceeds optimal iosize. In
  558. * this way, iosize of the first disk will be optimal
  559. * iosize at least. iosize of the second disk might be
  560. * small, but not a big deal since when the second disk
  561. * starts IO, the first disk is likely still busy.
  562. */
  563. if (nonrot && opt_iosize > 0 &&
  564. mirror->seq_start != MaxSector &&
  565. mirror->next_seq_sect > opt_iosize &&
  566. mirror->next_seq_sect - opt_iosize >=
  567. mirror->seq_start) {
  568. choose_next_idle = 1;
  569. continue;
  570. }
  571. break;
  572. }
  573. /* If device is idle, use it */
  574. if (pending == 0) {
  575. best_disk = disk;
  576. break;
  577. }
  578. if (choose_next_idle)
  579. continue;
  580. if (min_pending > pending) {
  581. min_pending = pending;
  582. best_pending_disk = disk;
  583. }
  584. if (dist < best_dist) {
  585. best_dist = dist;
  586. best_dist_disk = disk;
  587. }
  588. }
  589. /*
  590. * If all disks are rotational, choose the closest disk. If any disk is
  591. * non-rotational, choose the disk with less pending request even the
  592. * disk is rotational, which might/might not be optimal for raids with
  593. * mixed ratation/non-rotational disks depending on workload.
  594. */
  595. if (best_disk == -1) {
  596. if (has_nonrot_disk)
  597. best_disk = best_pending_disk;
  598. else
  599. best_disk = best_dist_disk;
  600. }
  601. if (best_disk >= 0) {
  602. rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
  603. if (!rdev)
  604. goto retry;
  605. atomic_inc(&rdev->nr_pending);
  606. if (test_bit(Faulty, &rdev->flags)) {
  607. /* cannot risk returning a device that failed
  608. * before we inc'ed nr_pending
  609. */
  610. rdev_dec_pending(rdev, conf->mddev);
  611. goto retry;
  612. }
  613. sectors = best_good_sectors;
  614. if (conf->mirrors[best_disk].next_seq_sect != this_sector)
  615. conf->mirrors[best_disk].seq_start = this_sector;
  616. conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
  617. }
  618. rcu_read_unlock();
  619. *max_sectors = sectors;
  620. return best_disk;
  621. }
  622. static int raid1_mergeable_bvec(struct request_queue *q,
  623. struct bvec_merge_data *bvm,
  624. struct bio_vec *biovec)
  625. {
  626. struct mddev *mddev = q->queuedata;
  627. struct r1conf *conf = mddev->private;
  628. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  629. int max = biovec->bv_len;
  630. if (mddev->merge_check_needed) {
  631. int disk;
  632. rcu_read_lock();
  633. for (disk = 0; disk < conf->raid_disks * 2; disk++) {
  634. struct md_rdev *rdev = rcu_dereference(
  635. conf->mirrors[disk].rdev);
  636. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  637. struct request_queue *q =
  638. bdev_get_queue(rdev->bdev);
  639. if (q->merge_bvec_fn) {
  640. bvm->bi_sector = sector +
  641. rdev->data_offset;
  642. bvm->bi_bdev = rdev->bdev;
  643. max = min(max, q->merge_bvec_fn(
  644. q, bvm, biovec));
  645. }
  646. }
  647. }
  648. rcu_read_unlock();
  649. }
  650. return max;
  651. }
  652. int md_raid1_congested(struct mddev *mddev, int bits)
  653. {
  654. struct r1conf *conf = mddev->private;
  655. int i, ret = 0;
  656. if ((bits & (1 << BDI_async_congested)) &&
  657. conf->pending_count >= max_queued_requests)
  658. return 1;
  659. rcu_read_lock();
  660. for (i = 0; i < conf->raid_disks * 2; i++) {
  661. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  662. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  663. struct request_queue *q = bdev_get_queue(rdev->bdev);
  664. BUG_ON(!q);
  665. /* Note the '|| 1' - when read_balance prefers
  666. * non-congested targets, it can be removed
  667. */
  668. if ((bits & (1<<BDI_async_congested)) || 1)
  669. ret |= bdi_congested(&q->backing_dev_info, bits);
  670. else
  671. ret &= bdi_congested(&q->backing_dev_info, bits);
  672. }
  673. }
  674. rcu_read_unlock();
  675. return ret;
  676. }
  677. EXPORT_SYMBOL_GPL(md_raid1_congested);
  678. static int raid1_congested(void *data, int bits)
  679. {
  680. struct mddev *mddev = data;
  681. return mddev_congested(mddev, bits) ||
  682. md_raid1_congested(mddev, bits);
  683. }
  684. static void flush_pending_writes(struct r1conf *conf)
  685. {
  686. /* Any writes that have been queued but are awaiting
  687. * bitmap updates get flushed here.
  688. */
  689. spin_lock_irq(&conf->device_lock);
  690. if (conf->pending_bio_list.head) {
  691. struct bio *bio;
  692. bio = bio_list_get(&conf->pending_bio_list);
  693. conf->pending_count = 0;
  694. spin_unlock_irq(&conf->device_lock);
  695. /* flush any pending bitmap writes to
  696. * disk before proceeding w/ I/O */
  697. bitmap_unplug(conf->mddev->bitmap);
  698. wake_up(&conf->wait_barrier);
  699. while (bio) { /* submit pending writes */
  700. struct bio *next = bio->bi_next;
  701. bio->bi_next = NULL;
  702. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  703. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  704. /* Just ignore it */
  705. bio_endio(bio, 0);
  706. else
  707. generic_make_request(bio);
  708. bio = next;
  709. }
  710. } else
  711. spin_unlock_irq(&conf->device_lock);
  712. }
  713. /* Barriers....
  714. * Sometimes we need to suspend IO while we do something else,
  715. * either some resync/recovery, or reconfigure the array.
  716. * To do this we raise a 'barrier'.
  717. * The 'barrier' is a counter that can be raised multiple times
  718. * to count how many activities are happening which preclude
  719. * normal IO.
  720. * We can only raise the barrier if there is no pending IO.
  721. * i.e. if nr_pending == 0.
  722. * We choose only to raise the barrier if no-one is waiting for the
  723. * barrier to go down. This means that as soon as an IO request
  724. * is ready, no other operations which require a barrier will start
  725. * until the IO request has had a chance.
  726. *
  727. * So: regular IO calls 'wait_barrier'. When that returns there
  728. * is no backgroup IO happening, It must arrange to call
  729. * allow_barrier when it has finished its IO.
  730. * backgroup IO calls must call raise_barrier. Once that returns
  731. * there is no normal IO happeing. It must arrange to call
  732. * lower_barrier when the particular background IO completes.
  733. */
  734. static void raise_barrier(struct r1conf *conf)
  735. {
  736. spin_lock_irq(&conf->resync_lock);
  737. /* Wait until no block IO is waiting */
  738. wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
  739. conf->resync_lock);
  740. /* block any new IO from starting */
  741. conf->barrier++;
  742. /* For these conditions we must wait:
  743. * A: while the array is in frozen state
  744. * B: while barrier >= RESYNC_DEPTH, meaning resync reach
  745. * the max count which allowed.
  746. * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
  747. * next resync will reach to the window which normal bios are
  748. * handling.
  749. */
  750. wait_event_lock_irq(conf->wait_barrier,
  751. !conf->array_frozen &&
  752. conf->barrier < RESYNC_DEPTH &&
  753. (conf->start_next_window >=
  754. conf->next_resync + RESYNC_SECTORS),
  755. conf->resync_lock);
  756. spin_unlock_irq(&conf->resync_lock);
  757. }
  758. static void lower_barrier(struct r1conf *conf)
  759. {
  760. unsigned long flags;
  761. BUG_ON(conf->barrier <= 0);
  762. spin_lock_irqsave(&conf->resync_lock, flags);
  763. conf->barrier--;
  764. spin_unlock_irqrestore(&conf->resync_lock, flags);
  765. wake_up(&conf->wait_barrier);
  766. }
  767. static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
  768. {
  769. bool wait = false;
  770. if (conf->array_frozen || !bio)
  771. wait = true;
  772. else if (conf->barrier && bio_data_dir(bio) == WRITE) {
  773. if (conf->next_resync < RESYNC_WINDOW_SECTORS)
  774. wait = true;
  775. else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
  776. >= bio_end_sector(bio)) ||
  777. (conf->next_resync + NEXT_NORMALIO_DISTANCE
  778. <= bio->bi_iter.bi_sector))
  779. wait = false;
  780. else
  781. wait = true;
  782. }
  783. return wait;
  784. }
  785. static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
  786. {
  787. sector_t sector = 0;
  788. spin_lock_irq(&conf->resync_lock);
  789. if (need_to_wait_for_sync(conf, bio)) {
  790. conf->nr_waiting++;
  791. /* Wait for the barrier to drop.
  792. * However if there are already pending
  793. * requests (preventing the barrier from
  794. * rising completely), and the
  795. * pre-process bio queue isn't empty,
  796. * then don't wait, as we need to empty
  797. * that queue to get the nr_pending
  798. * count down.
  799. */
  800. wait_event_lock_irq(conf->wait_barrier,
  801. !conf->array_frozen &&
  802. (!conf->barrier ||
  803. ((conf->start_next_window <
  804. conf->next_resync + RESYNC_SECTORS) &&
  805. current->bio_list &&
  806. !bio_list_empty(current->bio_list))),
  807. conf->resync_lock);
  808. conf->nr_waiting--;
  809. }
  810. if (bio && bio_data_dir(bio) == WRITE) {
  811. if (conf->next_resync + NEXT_NORMALIO_DISTANCE
  812. <= bio->bi_iter.bi_sector) {
  813. if (conf->start_next_window == MaxSector)
  814. conf->start_next_window =
  815. conf->next_resync +
  816. NEXT_NORMALIO_DISTANCE;
  817. if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
  818. <= bio->bi_iter.bi_sector)
  819. conf->next_window_requests++;
  820. else
  821. conf->current_window_requests++;
  822. sector = conf->start_next_window;
  823. }
  824. }
  825. conf->nr_pending++;
  826. spin_unlock_irq(&conf->resync_lock);
  827. return sector;
  828. }
  829. static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  830. sector_t bi_sector)
  831. {
  832. unsigned long flags;
  833. spin_lock_irqsave(&conf->resync_lock, flags);
  834. conf->nr_pending--;
  835. if (start_next_window) {
  836. if (start_next_window == conf->start_next_window) {
  837. if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
  838. <= bi_sector)
  839. conf->next_window_requests--;
  840. else
  841. conf->current_window_requests--;
  842. } else
  843. conf->current_window_requests--;
  844. if (!conf->current_window_requests) {
  845. if (conf->next_window_requests) {
  846. conf->current_window_requests =
  847. conf->next_window_requests;
  848. conf->next_window_requests = 0;
  849. conf->start_next_window +=
  850. NEXT_NORMALIO_DISTANCE;
  851. } else
  852. conf->start_next_window = MaxSector;
  853. }
  854. }
  855. spin_unlock_irqrestore(&conf->resync_lock, flags);
  856. wake_up(&conf->wait_barrier);
  857. }
  858. static void freeze_array(struct r1conf *conf, int extra)
  859. {
  860. /* stop syncio and normal IO and wait for everything to
  861. * go quite.
  862. * We wait until nr_pending match nr_queued+extra
  863. * This is called in the context of one normal IO request
  864. * that has failed. Thus any sync request that might be pending
  865. * will be blocked by nr_pending, and we need to wait for
  866. * pending IO requests to complete or be queued for re-try.
  867. * Thus the number queued (nr_queued) plus this request (extra)
  868. * must match the number of pending IOs (nr_pending) before
  869. * we continue.
  870. */
  871. spin_lock_irq(&conf->resync_lock);
  872. conf->array_frozen = 1;
  873. wait_event_lock_irq_cmd(conf->wait_barrier,
  874. conf->nr_pending == conf->nr_queued+extra,
  875. conf->resync_lock,
  876. flush_pending_writes(conf));
  877. spin_unlock_irq(&conf->resync_lock);
  878. }
  879. static void unfreeze_array(struct r1conf *conf)
  880. {
  881. /* reverse the effect of the freeze */
  882. spin_lock_irq(&conf->resync_lock);
  883. conf->array_frozen = 0;
  884. wake_up(&conf->wait_barrier);
  885. spin_unlock_irq(&conf->resync_lock);
  886. }
  887. /* duplicate the data pages for behind I/O
  888. */
  889. static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
  890. {
  891. int i;
  892. struct bio_vec *bvec;
  893. struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
  894. GFP_NOIO);
  895. if (unlikely(!bvecs))
  896. return;
  897. bio_for_each_segment_all(bvec, bio, i) {
  898. bvecs[i] = *bvec;
  899. bvecs[i].bv_page = alloc_page(GFP_NOIO);
  900. if (unlikely(!bvecs[i].bv_page))
  901. goto do_sync_io;
  902. memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
  903. kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
  904. kunmap(bvecs[i].bv_page);
  905. kunmap(bvec->bv_page);
  906. }
  907. r1_bio->behind_bvecs = bvecs;
  908. r1_bio->behind_page_count = bio->bi_vcnt;
  909. set_bit(R1BIO_BehindIO, &r1_bio->state);
  910. return;
  911. do_sync_io:
  912. for (i = 0; i < bio->bi_vcnt; i++)
  913. if (bvecs[i].bv_page)
  914. put_page(bvecs[i].bv_page);
  915. kfree(bvecs);
  916. pr_debug("%dB behind alloc failed, doing sync I/O\n",
  917. bio->bi_iter.bi_size);
  918. }
  919. struct raid1_plug_cb {
  920. struct blk_plug_cb cb;
  921. struct bio_list pending;
  922. int pending_cnt;
  923. };
  924. static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
  925. {
  926. struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
  927. cb);
  928. struct mddev *mddev = plug->cb.data;
  929. struct r1conf *conf = mddev->private;
  930. struct bio *bio;
  931. if (from_schedule || current->bio_list) {
  932. spin_lock_irq(&conf->device_lock);
  933. bio_list_merge(&conf->pending_bio_list, &plug->pending);
  934. conf->pending_count += plug->pending_cnt;
  935. spin_unlock_irq(&conf->device_lock);
  936. wake_up(&conf->wait_barrier);
  937. md_wakeup_thread(mddev->thread);
  938. kfree(plug);
  939. return;
  940. }
  941. /* we aren't scheduling, so we can do the write-out directly. */
  942. bio = bio_list_get(&plug->pending);
  943. bitmap_unplug(mddev->bitmap);
  944. wake_up(&conf->wait_barrier);
  945. while (bio) { /* submit pending writes */
  946. struct bio *next = bio->bi_next;
  947. bio->bi_next = NULL;
  948. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  949. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  950. /* Just ignore it */
  951. bio_endio(bio, 0);
  952. else
  953. generic_make_request(bio);
  954. bio = next;
  955. }
  956. kfree(plug);
  957. }
  958. static void make_request(struct mddev *mddev, struct bio * bio)
  959. {
  960. struct r1conf *conf = mddev->private;
  961. struct raid1_info *mirror;
  962. struct r1bio *r1_bio;
  963. struct bio *read_bio;
  964. int i, disks;
  965. struct bitmap *bitmap;
  966. unsigned long flags;
  967. const int rw = bio_data_dir(bio);
  968. const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
  969. const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
  970. const unsigned long do_discard = (bio->bi_rw
  971. & (REQ_DISCARD | REQ_SECURE));
  972. const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
  973. struct md_rdev *blocked_rdev;
  974. struct blk_plug_cb *cb;
  975. struct raid1_plug_cb *plug = NULL;
  976. int first_clone;
  977. int sectors_handled;
  978. int max_sectors;
  979. sector_t start_next_window;
  980. /*
  981. * Register the new request and wait if the reconstruction
  982. * thread has put up a bar for new requests.
  983. * Continue immediately if no resync is active currently.
  984. */
  985. md_write_start(mddev, bio); /* wait on superblock update early */
  986. if (bio_data_dir(bio) == WRITE &&
  987. bio_end_sector(bio) > mddev->suspend_lo &&
  988. bio->bi_iter.bi_sector < mddev->suspend_hi) {
  989. /* As the suspend_* range is controlled by
  990. * userspace, we want an interruptible
  991. * wait.
  992. */
  993. DEFINE_WAIT(w);
  994. for (;;) {
  995. flush_signals(current);
  996. prepare_to_wait(&conf->wait_barrier,
  997. &w, TASK_INTERRUPTIBLE);
  998. if (bio_end_sector(bio) <= mddev->suspend_lo ||
  999. bio->bi_iter.bi_sector >= mddev->suspend_hi)
  1000. break;
  1001. schedule();
  1002. }
  1003. finish_wait(&conf->wait_barrier, &w);
  1004. }
  1005. start_next_window = wait_barrier(conf, bio);
  1006. bitmap = mddev->bitmap;
  1007. /*
  1008. * make_request() can abort the operation when READA is being
  1009. * used and no empty request is available.
  1010. *
  1011. */
  1012. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1013. r1_bio->master_bio = bio;
  1014. r1_bio->sectors = bio_sectors(bio);
  1015. r1_bio->state = 0;
  1016. r1_bio->mddev = mddev;
  1017. r1_bio->sector = bio->bi_iter.bi_sector;
  1018. /* We might need to issue multiple reads to different
  1019. * devices if there are bad blocks around, so we keep
  1020. * track of the number of reads in bio->bi_phys_segments.
  1021. * If this is 0, there is only one r1_bio and no locking
  1022. * will be needed when requests complete. If it is
  1023. * non-zero, then it is the number of not-completed requests.
  1024. */
  1025. bio->bi_phys_segments = 0;
  1026. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  1027. if (rw == READ) {
  1028. /*
  1029. * read balancing logic:
  1030. */
  1031. int rdisk;
  1032. read_again:
  1033. rdisk = read_balance(conf, r1_bio, &max_sectors);
  1034. if (rdisk < 0) {
  1035. /* couldn't find anywhere to read from */
  1036. raid_end_bio_io(r1_bio);
  1037. return;
  1038. }
  1039. mirror = conf->mirrors + rdisk;
  1040. if (test_bit(WriteMostly, &mirror->rdev->flags) &&
  1041. bitmap) {
  1042. /* Reading from a write-mostly device must
  1043. * take care not to over-take any writes
  1044. * that are 'behind'
  1045. */
  1046. wait_event(bitmap->behind_wait,
  1047. atomic_read(&bitmap->behind_writes) == 0);
  1048. }
  1049. r1_bio->read_disk = rdisk;
  1050. read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1051. bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
  1052. max_sectors);
  1053. r1_bio->bios[rdisk] = read_bio;
  1054. read_bio->bi_iter.bi_sector = r1_bio->sector +
  1055. mirror->rdev->data_offset;
  1056. read_bio->bi_bdev = mirror->rdev->bdev;
  1057. read_bio->bi_end_io = raid1_end_read_request;
  1058. read_bio->bi_rw = READ | do_sync;
  1059. read_bio->bi_private = r1_bio;
  1060. if (max_sectors < r1_bio->sectors) {
  1061. /* could not read all from this device, so we will
  1062. * need another r1_bio.
  1063. */
  1064. sectors_handled = (r1_bio->sector + max_sectors
  1065. - bio->bi_iter.bi_sector);
  1066. r1_bio->sectors = max_sectors;
  1067. spin_lock_irq(&conf->device_lock);
  1068. if (bio->bi_phys_segments == 0)
  1069. bio->bi_phys_segments = 2;
  1070. else
  1071. bio->bi_phys_segments++;
  1072. spin_unlock_irq(&conf->device_lock);
  1073. /* Cannot call generic_make_request directly
  1074. * as that will be queued in __make_request
  1075. * and subsequent mempool_alloc might block waiting
  1076. * for it. So hand bio over to raid1d.
  1077. */
  1078. reschedule_retry(r1_bio);
  1079. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1080. r1_bio->master_bio = bio;
  1081. r1_bio->sectors = bio_sectors(bio) - sectors_handled;
  1082. r1_bio->state = 0;
  1083. r1_bio->mddev = mddev;
  1084. r1_bio->sector = bio->bi_iter.bi_sector +
  1085. sectors_handled;
  1086. goto read_again;
  1087. } else
  1088. generic_make_request(read_bio);
  1089. return;
  1090. }
  1091. /*
  1092. * WRITE:
  1093. */
  1094. if (conf->pending_count >= max_queued_requests) {
  1095. md_wakeup_thread(mddev->thread);
  1096. wait_event(conf->wait_barrier,
  1097. conf->pending_count < max_queued_requests);
  1098. }
  1099. /* first select target devices under rcu_lock and
  1100. * inc refcount on their rdev. Record them by setting
  1101. * bios[x] to bio
  1102. * If there are known/acknowledged bad blocks on any device on
  1103. * which we have seen a write error, we want to avoid writing those
  1104. * blocks.
  1105. * This potentially requires several writes to write around
  1106. * the bad blocks. Each set of writes gets it's own r1bio
  1107. * with a set of bios attached.
  1108. */
  1109. disks = conf->raid_disks * 2;
  1110. retry_write:
  1111. r1_bio->start_next_window = start_next_window;
  1112. blocked_rdev = NULL;
  1113. rcu_read_lock();
  1114. max_sectors = r1_bio->sectors;
  1115. for (i = 0; i < disks; i++) {
  1116. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1117. if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  1118. atomic_inc(&rdev->nr_pending);
  1119. blocked_rdev = rdev;
  1120. break;
  1121. }
  1122. r1_bio->bios[i] = NULL;
  1123. if (!rdev || test_bit(Faulty, &rdev->flags)
  1124. || test_bit(Unmerged, &rdev->flags)) {
  1125. if (i < conf->raid_disks)
  1126. set_bit(R1BIO_Degraded, &r1_bio->state);
  1127. continue;
  1128. }
  1129. atomic_inc(&rdev->nr_pending);
  1130. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  1131. sector_t first_bad;
  1132. int bad_sectors;
  1133. int is_bad;
  1134. is_bad = is_badblock(rdev, r1_bio->sector,
  1135. max_sectors,
  1136. &first_bad, &bad_sectors);
  1137. if (is_bad < 0) {
  1138. /* mustn't write here until the bad block is
  1139. * acknowledged*/
  1140. set_bit(BlockedBadBlocks, &rdev->flags);
  1141. blocked_rdev = rdev;
  1142. break;
  1143. }
  1144. if (is_bad && first_bad <= r1_bio->sector) {
  1145. /* Cannot write here at all */
  1146. bad_sectors -= (r1_bio->sector - first_bad);
  1147. if (bad_sectors < max_sectors)
  1148. /* mustn't write more than bad_sectors
  1149. * to other devices yet
  1150. */
  1151. max_sectors = bad_sectors;
  1152. rdev_dec_pending(rdev, mddev);
  1153. /* We don't set R1BIO_Degraded as that
  1154. * only applies if the disk is
  1155. * missing, so it might be re-added,
  1156. * and we want to know to recover this
  1157. * chunk.
  1158. * In this case the device is here,
  1159. * and the fact that this chunk is not
  1160. * in-sync is recorded in the bad
  1161. * block log
  1162. */
  1163. continue;
  1164. }
  1165. if (is_bad) {
  1166. int good_sectors = first_bad - r1_bio->sector;
  1167. if (good_sectors < max_sectors)
  1168. max_sectors = good_sectors;
  1169. }
  1170. }
  1171. r1_bio->bios[i] = bio;
  1172. }
  1173. rcu_read_unlock();
  1174. if (unlikely(blocked_rdev)) {
  1175. /* Wait for this device to become unblocked */
  1176. int j;
  1177. sector_t old = start_next_window;
  1178. for (j = 0; j < i; j++)
  1179. if (r1_bio->bios[j])
  1180. rdev_dec_pending(conf->mirrors[j].rdev, mddev);
  1181. r1_bio->state = 0;
  1182. allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
  1183. md_wait_for_blocked_rdev(blocked_rdev, mddev);
  1184. start_next_window = wait_barrier(conf, bio);
  1185. /*
  1186. * We must make sure the multi r1bios of bio have
  1187. * the same value of bi_phys_segments
  1188. */
  1189. if (bio->bi_phys_segments && old &&
  1190. old != start_next_window)
  1191. /* Wait for the former r1bio(s) to complete */
  1192. wait_event(conf->wait_barrier,
  1193. bio->bi_phys_segments == 1);
  1194. goto retry_write;
  1195. }
  1196. if (max_sectors < r1_bio->sectors) {
  1197. /* We are splitting this write into multiple parts, so
  1198. * we need to prepare for allocating another r1_bio.
  1199. */
  1200. r1_bio->sectors = max_sectors;
  1201. spin_lock_irq(&conf->device_lock);
  1202. if (bio->bi_phys_segments == 0)
  1203. bio->bi_phys_segments = 2;
  1204. else
  1205. bio->bi_phys_segments++;
  1206. spin_unlock_irq(&conf->device_lock);
  1207. }
  1208. sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
  1209. atomic_set(&r1_bio->remaining, 1);
  1210. atomic_set(&r1_bio->behind_remaining, 0);
  1211. first_clone = 1;
  1212. for (i = 0; i < disks; i++) {
  1213. struct bio *mbio;
  1214. if (!r1_bio->bios[i])
  1215. continue;
  1216. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1217. bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
  1218. if (first_clone) {
  1219. /* do behind I/O ?
  1220. * Not if there are too many, or cannot
  1221. * allocate memory, or a reader on WriteMostly
  1222. * is waiting for behind writes to flush */
  1223. if (bitmap &&
  1224. (atomic_read(&bitmap->behind_writes)
  1225. < mddev->bitmap_info.max_write_behind) &&
  1226. !waitqueue_active(&bitmap->behind_wait))
  1227. alloc_behind_pages(mbio, r1_bio);
  1228. bitmap_startwrite(bitmap, r1_bio->sector,
  1229. r1_bio->sectors,
  1230. test_bit(R1BIO_BehindIO,
  1231. &r1_bio->state));
  1232. first_clone = 0;
  1233. }
  1234. if (r1_bio->behind_bvecs) {
  1235. struct bio_vec *bvec;
  1236. int j;
  1237. /*
  1238. * We trimmed the bio, so _all is legit
  1239. */
  1240. bio_for_each_segment_all(bvec, mbio, j)
  1241. bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
  1242. if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
  1243. atomic_inc(&r1_bio->behind_remaining);
  1244. }
  1245. r1_bio->bios[i] = mbio;
  1246. mbio->bi_iter.bi_sector = (r1_bio->sector +
  1247. conf->mirrors[i].rdev->data_offset);
  1248. mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1249. mbio->bi_end_io = raid1_end_write_request;
  1250. mbio->bi_rw =
  1251. WRITE | do_flush_fua | do_sync | do_discard | do_same;
  1252. mbio->bi_private = r1_bio;
  1253. atomic_inc(&r1_bio->remaining);
  1254. cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
  1255. if (cb)
  1256. plug = container_of(cb, struct raid1_plug_cb, cb);
  1257. else
  1258. plug = NULL;
  1259. spin_lock_irqsave(&conf->device_lock, flags);
  1260. if (plug) {
  1261. bio_list_add(&plug->pending, mbio);
  1262. plug->pending_cnt++;
  1263. } else {
  1264. bio_list_add(&conf->pending_bio_list, mbio);
  1265. conf->pending_count++;
  1266. }
  1267. spin_unlock_irqrestore(&conf->device_lock, flags);
  1268. if (!plug)
  1269. md_wakeup_thread(mddev->thread);
  1270. }
  1271. /* Mustn't call r1_bio_write_done before this next test,
  1272. * as it could result in the bio being freed.
  1273. */
  1274. if (sectors_handled < bio_sectors(bio)) {
  1275. r1_bio_write_done(r1_bio);
  1276. /* We need another r1_bio. It has already been counted
  1277. * in bio->bi_phys_segments
  1278. */
  1279. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1280. r1_bio->master_bio = bio;
  1281. r1_bio->sectors = bio_sectors(bio) - sectors_handled;
  1282. r1_bio->state = 0;
  1283. r1_bio->mddev = mddev;
  1284. r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
  1285. goto retry_write;
  1286. }
  1287. r1_bio_write_done(r1_bio);
  1288. /* In case raid1d snuck in to freeze_array */
  1289. wake_up(&conf->wait_barrier);
  1290. }
  1291. static void status(struct seq_file *seq, struct mddev *mddev)
  1292. {
  1293. struct r1conf *conf = mddev->private;
  1294. int i;
  1295. seq_printf(seq, " [%d/%d] [", conf->raid_disks,
  1296. conf->raid_disks - mddev->degraded);
  1297. rcu_read_lock();
  1298. for (i = 0; i < conf->raid_disks; i++) {
  1299. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1300. seq_printf(seq, "%s",
  1301. rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
  1302. }
  1303. rcu_read_unlock();
  1304. seq_printf(seq, "]");
  1305. }
  1306. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1307. {
  1308. char b[BDEVNAME_SIZE];
  1309. struct r1conf *conf = mddev->private;
  1310. /*
  1311. * If it is not operational, then we have already marked it as dead
  1312. * else if it is the last working disks, ignore the error, let the
  1313. * next level up know.
  1314. * else mark the drive as failed
  1315. */
  1316. if (test_bit(In_sync, &rdev->flags)
  1317. && (conf->raid_disks - mddev->degraded) == 1) {
  1318. /*
  1319. * Don't fail the drive, act as though we were just a
  1320. * normal single drive.
  1321. * However don't try a recovery from this drive as
  1322. * it is very likely to fail.
  1323. */
  1324. conf->recovery_disabled = mddev->recovery_disabled;
  1325. return;
  1326. }
  1327. set_bit(Blocked, &rdev->flags);
  1328. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  1329. unsigned long flags;
  1330. spin_lock_irqsave(&conf->device_lock, flags);
  1331. mddev->degraded++;
  1332. set_bit(Faulty, &rdev->flags);
  1333. spin_unlock_irqrestore(&conf->device_lock, flags);
  1334. /*
  1335. * if recovery is running, make sure it aborts.
  1336. */
  1337. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1338. } else
  1339. set_bit(Faulty, &rdev->flags);
  1340. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1341. printk(KERN_ALERT
  1342. "md/raid1:%s: Disk failure on %s, disabling device.\n"
  1343. "md/raid1:%s: Operation continuing on %d devices.\n",
  1344. mdname(mddev), bdevname(rdev->bdev, b),
  1345. mdname(mddev), conf->raid_disks - mddev->degraded);
  1346. }
  1347. static void print_conf(struct r1conf *conf)
  1348. {
  1349. int i;
  1350. printk(KERN_DEBUG "RAID1 conf printout:\n");
  1351. if (!conf) {
  1352. printk(KERN_DEBUG "(!conf)\n");
  1353. return;
  1354. }
  1355. printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
  1356. conf->raid_disks);
  1357. rcu_read_lock();
  1358. for (i = 0; i < conf->raid_disks; i++) {
  1359. char b[BDEVNAME_SIZE];
  1360. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1361. if (rdev)
  1362. printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
  1363. i, !test_bit(In_sync, &rdev->flags),
  1364. !test_bit(Faulty, &rdev->flags),
  1365. bdevname(rdev->bdev,b));
  1366. }
  1367. rcu_read_unlock();
  1368. }
  1369. static void close_sync(struct r1conf *conf)
  1370. {
  1371. wait_barrier(conf, NULL);
  1372. allow_barrier(conf, 0, 0);
  1373. mempool_destroy(conf->r1buf_pool);
  1374. conf->r1buf_pool = NULL;
  1375. conf->next_resync = 0;
  1376. conf->start_next_window = MaxSector;
  1377. }
  1378. static int raid1_spare_active(struct mddev *mddev)
  1379. {
  1380. int i;
  1381. struct r1conf *conf = mddev->private;
  1382. int count = 0;
  1383. unsigned long flags;
  1384. /*
  1385. * Find all failed disks within the RAID1 configuration
  1386. * and mark them readable.
  1387. * Called under mddev lock, so rcu protection not needed.
  1388. */
  1389. for (i = 0; i < conf->raid_disks; i++) {
  1390. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1391. struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
  1392. if (repl
  1393. && repl->recovery_offset == MaxSector
  1394. && !test_bit(Faulty, &repl->flags)
  1395. && !test_and_set_bit(In_sync, &repl->flags)) {
  1396. /* replacement has just become active */
  1397. if (!rdev ||
  1398. !test_and_clear_bit(In_sync, &rdev->flags))
  1399. count++;
  1400. if (rdev) {
  1401. /* Replaced device not technically
  1402. * faulty, but we need to be sure
  1403. * it gets removed and never re-added
  1404. */
  1405. set_bit(Faulty, &rdev->flags);
  1406. sysfs_notify_dirent_safe(
  1407. rdev->sysfs_state);
  1408. }
  1409. }
  1410. if (rdev
  1411. && rdev->recovery_offset == MaxSector
  1412. && !test_bit(Faulty, &rdev->flags)
  1413. && !test_and_set_bit(In_sync, &rdev->flags)) {
  1414. count++;
  1415. sysfs_notify_dirent_safe(rdev->sysfs_state);
  1416. }
  1417. }
  1418. spin_lock_irqsave(&conf->device_lock, flags);
  1419. mddev->degraded -= count;
  1420. spin_unlock_irqrestore(&conf->device_lock, flags);
  1421. print_conf(conf);
  1422. return count;
  1423. }
  1424. static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  1425. {
  1426. struct r1conf *conf = mddev->private;
  1427. int err = -EEXIST;
  1428. int mirror = 0;
  1429. struct raid1_info *p;
  1430. int first = 0;
  1431. int last = conf->raid_disks - 1;
  1432. struct request_queue *q = bdev_get_queue(rdev->bdev);
  1433. if (mddev->recovery_disabled == conf->recovery_disabled)
  1434. return -EBUSY;
  1435. if (rdev->raid_disk >= 0)
  1436. first = last = rdev->raid_disk;
  1437. if (q->merge_bvec_fn) {
  1438. set_bit(Unmerged, &rdev->flags);
  1439. mddev->merge_check_needed = 1;
  1440. }
  1441. for (mirror = first; mirror <= last; mirror++) {
  1442. p = conf->mirrors+mirror;
  1443. if (!p->rdev) {
  1444. if (mddev->gendisk)
  1445. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1446. rdev->data_offset << 9);
  1447. p->head_position = 0;
  1448. rdev->raid_disk = mirror;
  1449. err = 0;
  1450. /* As all devices are equivalent, we don't need a full recovery
  1451. * if this was recently any drive of the array
  1452. */
  1453. if (rdev->saved_raid_disk < 0)
  1454. conf->fullsync = 1;
  1455. rcu_assign_pointer(p->rdev, rdev);
  1456. break;
  1457. }
  1458. if (test_bit(WantReplacement, &p->rdev->flags) &&
  1459. p[conf->raid_disks].rdev == NULL) {
  1460. /* Add this device as a replacement */
  1461. clear_bit(In_sync, &rdev->flags);
  1462. set_bit(Replacement, &rdev->flags);
  1463. rdev->raid_disk = mirror;
  1464. err = 0;
  1465. conf->fullsync = 1;
  1466. rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
  1467. break;
  1468. }
  1469. }
  1470. if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
  1471. /* Some requests might not have seen this new
  1472. * merge_bvec_fn. We must wait for them to complete
  1473. * before merging the device fully.
  1474. * First we make sure any code which has tested
  1475. * our function has submitted the request, then
  1476. * we wait for all outstanding requests to complete.
  1477. */
  1478. synchronize_sched();
  1479. freeze_array(conf, 0);
  1480. unfreeze_array(conf);
  1481. clear_bit(Unmerged, &rdev->flags);
  1482. }
  1483. md_integrity_add_rdev(rdev, mddev);
  1484. if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
  1485. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  1486. print_conf(conf);
  1487. return err;
  1488. }
  1489. static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  1490. {
  1491. struct r1conf *conf = mddev->private;
  1492. int err = 0;
  1493. int number = rdev->raid_disk;
  1494. struct raid1_info *p = conf->mirrors + number;
  1495. if (rdev != p->rdev)
  1496. p = conf->mirrors + conf->raid_disks + number;
  1497. print_conf(conf);
  1498. if (rdev == p->rdev) {
  1499. if (test_bit(In_sync, &rdev->flags) ||
  1500. atomic_read(&rdev->nr_pending)) {
  1501. err = -EBUSY;
  1502. goto abort;
  1503. }
  1504. /* Only remove non-faulty devices if recovery
  1505. * is not possible.
  1506. */
  1507. if (!test_bit(Faulty, &rdev->flags) &&
  1508. mddev->recovery_disabled != conf->recovery_disabled &&
  1509. mddev->degraded < conf->raid_disks) {
  1510. err = -EBUSY;
  1511. goto abort;
  1512. }
  1513. p->rdev = NULL;
  1514. synchronize_rcu();
  1515. if (atomic_read(&rdev->nr_pending)) {
  1516. /* lost the race, try later */
  1517. err = -EBUSY;
  1518. p->rdev = rdev;
  1519. goto abort;
  1520. } else if (conf->mirrors[conf->raid_disks + number].rdev) {
  1521. /* We just removed a device that is being replaced.
  1522. * Move down the replacement. We drain all IO before
  1523. * doing this to avoid confusion.
  1524. */
  1525. struct md_rdev *repl =
  1526. conf->mirrors[conf->raid_disks + number].rdev;
  1527. freeze_array(conf, 0);
  1528. clear_bit(Replacement, &repl->flags);
  1529. p->rdev = repl;
  1530. conf->mirrors[conf->raid_disks + number].rdev = NULL;
  1531. unfreeze_array(conf);
  1532. clear_bit(WantReplacement, &rdev->flags);
  1533. } else
  1534. clear_bit(WantReplacement, &rdev->flags);
  1535. err = md_integrity_register(mddev);
  1536. }
  1537. abort:
  1538. print_conf(conf);
  1539. return err;
  1540. }
  1541. static void end_sync_read(struct bio *bio, int error)
  1542. {
  1543. struct r1bio *r1_bio = bio->bi_private;
  1544. update_head_pos(r1_bio->read_disk, r1_bio);
  1545. /*
  1546. * we have read a block, now it needs to be re-written,
  1547. * or re-read if the read failed.
  1548. * We don't do much here, just schedule handling by raid1d
  1549. */
  1550. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  1551. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1552. if (atomic_dec_and_test(&r1_bio->remaining))
  1553. reschedule_retry(r1_bio);
  1554. }
  1555. static void end_sync_write(struct bio *bio, int error)
  1556. {
  1557. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1558. struct r1bio *r1_bio = bio->bi_private;
  1559. struct mddev *mddev = r1_bio->mddev;
  1560. struct r1conf *conf = mddev->private;
  1561. int mirror=0;
  1562. sector_t first_bad;
  1563. int bad_sectors;
  1564. mirror = find_bio_disk(r1_bio, bio);
  1565. if (!uptodate) {
  1566. sector_t sync_blocks = 0;
  1567. sector_t s = r1_bio->sector;
  1568. long sectors_to_go = r1_bio->sectors;
  1569. /* make sure these bits doesn't get cleared. */
  1570. do {
  1571. bitmap_end_sync(mddev->bitmap, s,
  1572. &sync_blocks, 1);
  1573. s += sync_blocks;
  1574. sectors_to_go -= sync_blocks;
  1575. } while (sectors_to_go > 0);
  1576. set_bit(WriteErrorSeen,
  1577. &conf->mirrors[mirror].rdev->flags);
  1578. if (!test_and_set_bit(WantReplacement,
  1579. &conf->mirrors[mirror].rdev->flags))
  1580. set_bit(MD_RECOVERY_NEEDED, &
  1581. mddev->recovery);
  1582. set_bit(R1BIO_WriteError, &r1_bio->state);
  1583. } else if (is_badblock(conf->mirrors[mirror].rdev,
  1584. r1_bio->sector,
  1585. r1_bio->sectors,
  1586. &first_bad, &bad_sectors) &&
  1587. !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
  1588. r1_bio->sector,
  1589. r1_bio->sectors,
  1590. &first_bad, &bad_sectors)
  1591. )
  1592. set_bit(R1BIO_MadeGood, &r1_bio->state);
  1593. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1594. int s = r1_bio->sectors;
  1595. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1596. test_bit(R1BIO_WriteError, &r1_bio->state))
  1597. reschedule_retry(r1_bio);
  1598. else {
  1599. put_buf(r1_bio);
  1600. md_done_sync(mddev, s, uptodate);
  1601. }
  1602. }
  1603. }
  1604. static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
  1605. int sectors, struct page *page, int rw)
  1606. {
  1607. if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
  1608. /* success */
  1609. return 1;
  1610. if (rw == WRITE) {
  1611. set_bit(WriteErrorSeen, &rdev->flags);
  1612. if (!test_and_set_bit(WantReplacement,
  1613. &rdev->flags))
  1614. set_bit(MD_RECOVERY_NEEDED, &
  1615. rdev->mddev->recovery);
  1616. }
  1617. /* need to record an error - either for the block or the device */
  1618. if (!rdev_set_badblocks(rdev, sector, sectors, 0))
  1619. md_error(rdev->mddev, rdev);
  1620. return 0;
  1621. }
  1622. static int fix_sync_read_error(struct r1bio *r1_bio)
  1623. {
  1624. /* Try some synchronous reads of other devices to get
  1625. * good data, much like with normal read errors. Only
  1626. * read into the pages we already have so we don't
  1627. * need to re-issue the read request.
  1628. * We don't need to freeze the array, because being in an
  1629. * active sync request, there is no normal IO, and
  1630. * no overlapping syncs.
  1631. * We don't need to check is_badblock() again as we
  1632. * made sure that anything with a bad block in range
  1633. * will have bi_end_io clear.
  1634. */
  1635. struct mddev *mddev = r1_bio->mddev;
  1636. struct r1conf *conf = mddev->private;
  1637. struct bio *bio = r1_bio->bios[r1_bio->read_disk];
  1638. sector_t sect = r1_bio->sector;
  1639. int sectors = r1_bio->sectors;
  1640. int idx = 0;
  1641. while(sectors) {
  1642. int s = sectors;
  1643. int d = r1_bio->read_disk;
  1644. int success = 0;
  1645. struct md_rdev *rdev;
  1646. int start;
  1647. if (s > (PAGE_SIZE>>9))
  1648. s = PAGE_SIZE >> 9;
  1649. do {
  1650. if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
  1651. /* No rcu protection needed here devices
  1652. * can only be removed when no resync is
  1653. * active, and resync is currently active
  1654. */
  1655. rdev = conf->mirrors[d].rdev;
  1656. if (sync_page_io(rdev, sect, s<<9,
  1657. bio->bi_io_vec[idx].bv_page,
  1658. READ, false)) {
  1659. success = 1;
  1660. break;
  1661. }
  1662. }
  1663. d++;
  1664. if (d == conf->raid_disks * 2)
  1665. d = 0;
  1666. } while (!success && d != r1_bio->read_disk);
  1667. if (!success) {
  1668. char b[BDEVNAME_SIZE];
  1669. int abort = 0;
  1670. /* Cannot read from anywhere, this block is lost.
  1671. * Record a bad block on each device. If that doesn't
  1672. * work just disable and interrupt the recovery.
  1673. * Don't fail devices as that won't really help.
  1674. */
  1675. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
  1676. " for block %llu\n",
  1677. mdname(mddev),
  1678. bdevname(bio->bi_bdev, b),
  1679. (unsigned long long)r1_bio->sector);
  1680. for (d = 0; d < conf->raid_disks * 2; d++) {
  1681. rdev = conf->mirrors[d].rdev;
  1682. if (!rdev || test_bit(Faulty, &rdev->flags))
  1683. continue;
  1684. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1685. abort = 1;
  1686. }
  1687. if (abort) {
  1688. conf->recovery_disabled =
  1689. mddev->recovery_disabled;
  1690. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1691. md_done_sync(mddev, r1_bio->sectors, 0);
  1692. put_buf(r1_bio);
  1693. return 0;
  1694. }
  1695. /* Try next page */
  1696. sectors -= s;
  1697. sect += s;
  1698. idx++;
  1699. continue;
  1700. }
  1701. start = d;
  1702. /* write it back and re-read */
  1703. while (d != r1_bio->read_disk) {
  1704. if (d == 0)
  1705. d = conf->raid_disks * 2;
  1706. d--;
  1707. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1708. continue;
  1709. rdev = conf->mirrors[d].rdev;
  1710. if (r1_sync_page_io(rdev, sect, s,
  1711. bio->bi_io_vec[idx].bv_page,
  1712. WRITE) == 0) {
  1713. r1_bio->bios[d]->bi_end_io = NULL;
  1714. rdev_dec_pending(rdev, mddev);
  1715. }
  1716. }
  1717. d = start;
  1718. while (d != r1_bio->read_disk) {
  1719. if (d == 0)
  1720. d = conf->raid_disks * 2;
  1721. d--;
  1722. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1723. continue;
  1724. rdev = conf->mirrors[d].rdev;
  1725. if (r1_sync_page_io(rdev, sect, s,
  1726. bio->bi_io_vec[idx].bv_page,
  1727. READ) != 0)
  1728. atomic_add(s, &rdev->corrected_errors);
  1729. }
  1730. sectors -= s;
  1731. sect += s;
  1732. idx ++;
  1733. }
  1734. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1735. set_bit(BIO_UPTODATE, &bio->bi_flags);
  1736. return 1;
  1737. }
  1738. static int process_checks(struct r1bio *r1_bio)
  1739. {
  1740. /* We have read all readable devices. If we haven't
  1741. * got the block, then there is no hope left.
  1742. * If we have, then we want to do a comparison
  1743. * and skip the write if everything is the same.
  1744. * If any blocks failed to read, then we need to
  1745. * attempt an over-write
  1746. */
  1747. struct mddev *mddev = r1_bio->mddev;
  1748. struct r1conf *conf = mddev->private;
  1749. int primary;
  1750. int i;
  1751. int vcnt;
  1752. /* Fix variable parts of all bios */
  1753. vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
  1754. for (i = 0; i < conf->raid_disks * 2; i++) {
  1755. int j;
  1756. int size;
  1757. int uptodate;
  1758. struct bio *b = r1_bio->bios[i];
  1759. if (b->bi_end_io != end_sync_read)
  1760. continue;
  1761. /* fixup the bio for reuse, but preserve BIO_UPTODATE */
  1762. uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
  1763. bio_reset(b);
  1764. if (!uptodate)
  1765. clear_bit(BIO_UPTODATE, &b->bi_flags);
  1766. b->bi_vcnt = vcnt;
  1767. b->bi_iter.bi_size = r1_bio->sectors << 9;
  1768. b->bi_iter.bi_sector = r1_bio->sector +
  1769. conf->mirrors[i].rdev->data_offset;
  1770. b->bi_bdev = conf->mirrors[i].rdev->bdev;
  1771. b->bi_end_io = end_sync_read;
  1772. b->bi_private = r1_bio;
  1773. size = b->bi_iter.bi_size;
  1774. for (j = 0; j < vcnt ; j++) {
  1775. struct bio_vec *bi;
  1776. bi = &b->bi_io_vec[j];
  1777. bi->bv_offset = 0;
  1778. if (size > PAGE_SIZE)
  1779. bi->bv_len = PAGE_SIZE;
  1780. else
  1781. bi->bv_len = size;
  1782. size -= PAGE_SIZE;
  1783. }
  1784. }
  1785. for (primary = 0; primary < conf->raid_disks * 2; primary++)
  1786. if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
  1787. test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
  1788. r1_bio->bios[primary]->bi_end_io = NULL;
  1789. rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
  1790. break;
  1791. }
  1792. r1_bio->read_disk = primary;
  1793. for (i = 0; i < conf->raid_disks * 2; i++) {
  1794. int j;
  1795. struct bio *pbio = r1_bio->bios[primary];
  1796. struct bio *sbio = r1_bio->bios[i];
  1797. int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
  1798. if (sbio->bi_end_io != end_sync_read)
  1799. continue;
  1800. /* Now we can 'fixup' the BIO_UPTODATE flag */
  1801. set_bit(BIO_UPTODATE, &sbio->bi_flags);
  1802. if (uptodate) {
  1803. for (j = vcnt; j-- ; ) {
  1804. struct page *p, *s;
  1805. p = pbio->bi_io_vec[j].bv_page;
  1806. s = sbio->bi_io_vec[j].bv_page;
  1807. if (memcmp(page_address(p),
  1808. page_address(s),
  1809. sbio->bi_io_vec[j].bv_len))
  1810. break;
  1811. }
  1812. } else
  1813. j = 0;
  1814. if (j >= 0)
  1815. atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
  1816. if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
  1817. && uptodate)) {
  1818. /* No need to write to this device. */
  1819. sbio->bi_end_io = NULL;
  1820. rdev_dec_pending(conf->mirrors[i].rdev, mddev);
  1821. continue;
  1822. }
  1823. bio_copy_data(sbio, pbio);
  1824. }
  1825. return 0;
  1826. }
  1827. static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
  1828. {
  1829. struct r1conf *conf = mddev->private;
  1830. int i;
  1831. int disks = conf->raid_disks * 2;
  1832. struct bio *bio, *wbio;
  1833. bio = r1_bio->bios[r1_bio->read_disk];
  1834. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  1835. /* ouch - failed to read all of that. */
  1836. if (!fix_sync_read_error(r1_bio))
  1837. return;
  1838. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  1839. if (process_checks(r1_bio) < 0)
  1840. return;
  1841. /*
  1842. * schedule writes
  1843. */
  1844. atomic_set(&r1_bio->remaining, 1);
  1845. for (i = 0; i < disks ; i++) {
  1846. wbio = r1_bio->bios[i];
  1847. if (wbio->bi_end_io == NULL ||
  1848. (wbio->bi_end_io == end_sync_read &&
  1849. (i == r1_bio->read_disk ||
  1850. !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
  1851. continue;
  1852. wbio->bi_rw = WRITE;
  1853. wbio->bi_end_io = end_sync_write;
  1854. atomic_inc(&r1_bio->remaining);
  1855. md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
  1856. generic_make_request(wbio);
  1857. }
  1858. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1859. /* if we're here, all write(s) have completed, so clean up */
  1860. int s = r1_bio->sectors;
  1861. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1862. test_bit(R1BIO_WriteError, &r1_bio->state))
  1863. reschedule_retry(r1_bio);
  1864. else {
  1865. put_buf(r1_bio);
  1866. md_done_sync(mddev, s, 1);
  1867. }
  1868. }
  1869. }
  1870. /*
  1871. * This is a kernel thread which:
  1872. *
  1873. * 1. Retries failed read operations on working mirrors.
  1874. * 2. Updates the raid superblock when problems encounter.
  1875. * 3. Performs writes following reads for array synchronising.
  1876. */
  1877. static void fix_read_error(struct r1conf *conf, int read_disk,
  1878. sector_t sect, int sectors)
  1879. {
  1880. struct mddev *mddev = conf->mddev;
  1881. while(sectors) {
  1882. int s = sectors;
  1883. int d = read_disk;
  1884. int success = 0;
  1885. int start;
  1886. struct md_rdev *rdev;
  1887. if (s > (PAGE_SIZE>>9))
  1888. s = PAGE_SIZE >> 9;
  1889. do {
  1890. /* Note: no rcu protection needed here
  1891. * as this is synchronous in the raid1d thread
  1892. * which is the thread that might remove
  1893. * a device. If raid1d ever becomes multi-threaded....
  1894. */
  1895. sector_t first_bad;
  1896. int bad_sectors;
  1897. rdev = conf->mirrors[d].rdev;
  1898. if (rdev &&
  1899. (test_bit(In_sync, &rdev->flags) ||
  1900. (!test_bit(Faulty, &rdev->flags) &&
  1901. rdev->recovery_offset >= sect + s)) &&
  1902. is_badblock(rdev, sect, s,
  1903. &first_bad, &bad_sectors) == 0 &&
  1904. sync_page_io(rdev, sect, s<<9,
  1905. conf->tmppage, READ, false))
  1906. success = 1;
  1907. else {
  1908. d++;
  1909. if (d == conf->raid_disks * 2)
  1910. d = 0;
  1911. }
  1912. } while (!success && d != read_disk);
  1913. if (!success) {
  1914. /* Cannot read from anywhere - mark it bad */
  1915. struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
  1916. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1917. md_error(mddev, rdev);
  1918. break;
  1919. }
  1920. /* write it back and re-read */
  1921. start = d;
  1922. while (d != read_disk) {
  1923. if (d==0)
  1924. d = conf->raid_disks * 2;
  1925. d--;
  1926. rdev = conf->mirrors[d].rdev;
  1927. if (rdev &&
  1928. test_bit(In_sync, &rdev->flags))
  1929. r1_sync_page_io(rdev, sect, s,
  1930. conf->tmppage, WRITE);
  1931. }
  1932. d = start;
  1933. while (d != read_disk) {
  1934. char b[BDEVNAME_SIZE];
  1935. if (d==0)
  1936. d = conf->raid_disks * 2;
  1937. d--;
  1938. rdev = conf->mirrors[d].rdev;
  1939. if (rdev &&
  1940. test_bit(In_sync, &rdev->flags)) {
  1941. if (r1_sync_page_io(rdev, sect, s,
  1942. conf->tmppage, READ)) {
  1943. atomic_add(s, &rdev->corrected_errors);
  1944. printk(KERN_INFO
  1945. "md/raid1:%s: read error corrected "
  1946. "(%d sectors at %llu on %s)\n",
  1947. mdname(mddev), s,
  1948. (unsigned long long)(sect +
  1949. rdev->data_offset),
  1950. bdevname(rdev->bdev, b));
  1951. }
  1952. }
  1953. }
  1954. sectors -= s;
  1955. sect += s;
  1956. }
  1957. }
  1958. static int narrow_write_error(struct r1bio *r1_bio, int i)
  1959. {
  1960. struct mddev *mddev = r1_bio->mddev;
  1961. struct r1conf *conf = mddev->private;
  1962. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1963. /* bio has the data to be written to device 'i' where
  1964. * we just recently had a write error.
  1965. * We repeatedly clone the bio and trim down to one block,
  1966. * then try the write. Where the write fails we record
  1967. * a bad block.
  1968. * It is conceivable that the bio doesn't exactly align with
  1969. * blocks. We must handle this somehow.
  1970. *
  1971. * We currently own a reference on the rdev.
  1972. */
  1973. int block_sectors;
  1974. sector_t sector;
  1975. int sectors;
  1976. int sect_to_write = r1_bio->sectors;
  1977. int ok = 1;
  1978. if (rdev->badblocks.shift < 0)
  1979. return 0;
  1980. block_sectors = 1 << rdev->badblocks.shift;
  1981. sector = r1_bio->sector;
  1982. sectors = ((sector + block_sectors)
  1983. & ~(sector_t)(block_sectors - 1))
  1984. - sector;
  1985. while (sect_to_write) {
  1986. struct bio *wbio;
  1987. if (sectors > sect_to_write)
  1988. sectors = sect_to_write;
  1989. /* Write at 'sector' for 'sectors'*/
  1990. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  1991. unsigned vcnt = r1_bio->behind_page_count;
  1992. struct bio_vec *vec = r1_bio->behind_bvecs;
  1993. while (!vec->bv_page) {
  1994. vec++;
  1995. vcnt--;
  1996. }
  1997. wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
  1998. memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
  1999. wbio->bi_vcnt = vcnt;
  2000. } else {
  2001. wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  2002. }
  2003. wbio->bi_rw = WRITE;
  2004. wbio->bi_iter.bi_sector = r1_bio->sector;
  2005. wbio->bi_iter.bi_size = r1_bio->sectors << 9;
  2006. bio_trim(wbio, sector - r1_bio->sector, sectors);
  2007. wbio->bi_iter.bi_sector += rdev->data_offset;
  2008. wbio->bi_bdev = rdev->bdev;
  2009. if (submit_bio_wait(WRITE, wbio) == 0)
  2010. /* failure! */
  2011. ok = rdev_set_badblocks(rdev, sector,
  2012. sectors, 0)
  2013. && ok;
  2014. bio_put(wbio);
  2015. sect_to_write -= sectors;
  2016. sector += sectors;
  2017. sectors = block_sectors;
  2018. }
  2019. return ok;
  2020. }
  2021. static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  2022. {
  2023. int m;
  2024. int s = r1_bio->sectors;
  2025. for (m = 0; m < conf->raid_disks * 2 ; m++) {
  2026. struct md_rdev *rdev = conf->mirrors[m].rdev;
  2027. struct bio *bio = r1_bio->bios[m];
  2028. if (bio->bi_end_io == NULL)
  2029. continue;
  2030. if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  2031. test_bit(R1BIO_MadeGood, &r1_bio->state)) {
  2032. rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
  2033. }
  2034. if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  2035. test_bit(R1BIO_WriteError, &r1_bio->state)) {
  2036. if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
  2037. md_error(conf->mddev, rdev);
  2038. }
  2039. }
  2040. put_buf(r1_bio);
  2041. md_done_sync(conf->mddev, s, 1);
  2042. }
  2043. static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  2044. {
  2045. int m;
  2046. for (m = 0; m < conf->raid_disks * 2 ; m++)
  2047. if (r1_bio->bios[m] == IO_MADE_GOOD) {
  2048. struct md_rdev *rdev = conf->mirrors[m].rdev;
  2049. rdev_clear_badblocks(rdev,
  2050. r1_bio->sector,
  2051. r1_bio->sectors, 0);
  2052. rdev_dec_pending(rdev, conf->mddev);
  2053. } else if (r1_bio->bios[m] != NULL) {
  2054. /* This drive got a write error. We need to
  2055. * narrow down and record precise write
  2056. * errors.
  2057. */
  2058. if (!narrow_write_error(r1_bio, m)) {
  2059. md_error(conf->mddev,
  2060. conf->mirrors[m].rdev);
  2061. /* an I/O failed, we can't clear the bitmap */
  2062. set_bit(R1BIO_Degraded, &r1_bio->state);
  2063. }
  2064. rdev_dec_pending(conf->mirrors[m].rdev,
  2065. conf->mddev);
  2066. }
  2067. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  2068. close_write(r1_bio);
  2069. raid_end_bio_io(r1_bio);
  2070. }
  2071. static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
  2072. {
  2073. int disk;
  2074. int max_sectors;
  2075. struct mddev *mddev = conf->mddev;
  2076. struct bio *bio;
  2077. char b[BDEVNAME_SIZE];
  2078. struct md_rdev *rdev;
  2079. clear_bit(R1BIO_ReadError, &r1_bio->state);
  2080. /* we got a read error. Maybe the drive is bad. Maybe just
  2081. * the block and we can fix it.
  2082. * We freeze all other IO, and try reading the block from
  2083. * other devices. When we find one, we re-write
  2084. * and check it that fixes the read error.
  2085. * This is all done synchronously while the array is
  2086. * frozen
  2087. */
  2088. if (mddev->ro == 0) {
  2089. freeze_array(conf, 1);
  2090. fix_read_error(conf, r1_bio->read_disk,
  2091. r1_bio->sector, r1_bio->sectors);
  2092. unfreeze_array(conf);
  2093. } else
  2094. md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
  2095. rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
  2096. bio = r1_bio->bios[r1_bio->read_disk];
  2097. bdevname(bio->bi_bdev, b);
  2098. read_more:
  2099. disk = read_balance(conf, r1_bio, &max_sectors);
  2100. if (disk == -1) {
  2101. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
  2102. " read error for block %llu\n",
  2103. mdname(mddev), b, (unsigned long long)r1_bio->sector);
  2104. raid_end_bio_io(r1_bio);
  2105. } else {
  2106. const unsigned long do_sync
  2107. = r1_bio->master_bio->bi_rw & REQ_SYNC;
  2108. if (bio) {
  2109. r1_bio->bios[r1_bio->read_disk] =
  2110. mddev->ro ? IO_BLOCKED : NULL;
  2111. bio_put(bio);
  2112. }
  2113. r1_bio->read_disk = disk;
  2114. bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  2115. bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
  2116. max_sectors);
  2117. r1_bio->bios[r1_bio->read_disk] = bio;
  2118. rdev = conf->mirrors[disk].rdev;
  2119. printk_ratelimited(KERN_ERR
  2120. "md/raid1:%s: redirecting sector %llu"
  2121. " to other mirror: %s\n",
  2122. mdname(mddev),
  2123. (unsigned long long)r1_bio->sector,
  2124. bdevname(rdev->bdev, b));
  2125. bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
  2126. bio->bi_bdev = rdev->bdev;
  2127. bio->bi_end_io = raid1_end_read_request;
  2128. bio->bi_rw = READ | do_sync;
  2129. bio->bi_private = r1_bio;
  2130. if (max_sectors < r1_bio->sectors) {
  2131. /* Drat - have to split this up more */
  2132. struct bio *mbio = r1_bio->master_bio;
  2133. int sectors_handled = (r1_bio->sector + max_sectors
  2134. - mbio->bi_iter.bi_sector);
  2135. r1_bio->sectors = max_sectors;
  2136. spin_lock_irq(&conf->device_lock);
  2137. if (mbio->bi_phys_segments == 0)
  2138. mbio->bi_phys_segments = 2;
  2139. else
  2140. mbio->bi_phys_segments++;
  2141. spin_unlock_irq(&conf->device_lock);
  2142. generic_make_request(bio);
  2143. bio = NULL;
  2144. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  2145. r1_bio->master_bio = mbio;
  2146. r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
  2147. r1_bio->state = 0;
  2148. set_bit(R1BIO_ReadError, &r1_bio->state);
  2149. r1_bio->mddev = mddev;
  2150. r1_bio->sector = mbio->bi_iter.bi_sector +
  2151. sectors_handled;
  2152. goto read_more;
  2153. } else
  2154. generic_make_request(bio);
  2155. }
  2156. }
  2157. static void raid1d(struct md_thread *thread)
  2158. {
  2159. struct mddev *mddev = thread->mddev;
  2160. struct r1bio *r1_bio;
  2161. unsigned long flags;
  2162. struct r1conf *conf = mddev->private;
  2163. struct list_head *head = &conf->retry_list;
  2164. struct blk_plug plug;
  2165. md_check_recovery(mddev);
  2166. blk_start_plug(&plug);
  2167. for (;;) {
  2168. flush_pending_writes(conf);
  2169. spin_lock_irqsave(&conf->device_lock, flags);
  2170. if (list_empty(head)) {
  2171. spin_unlock_irqrestore(&conf->device_lock, flags);
  2172. break;
  2173. }
  2174. r1_bio = list_entry(head->prev, struct r1bio, retry_list);
  2175. list_del(head->prev);
  2176. conf->nr_queued--;
  2177. spin_unlock_irqrestore(&conf->device_lock, flags);
  2178. mddev = r1_bio->mddev;
  2179. conf = mddev->private;
  2180. if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
  2181. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  2182. test_bit(R1BIO_WriteError, &r1_bio->state))
  2183. handle_sync_write_finished(conf, r1_bio);
  2184. else
  2185. sync_request_write(mddev, r1_bio);
  2186. } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  2187. test_bit(R1BIO_WriteError, &r1_bio->state))
  2188. handle_write_finished(conf, r1_bio);
  2189. else if (test_bit(R1BIO_ReadError, &r1_bio->state))
  2190. handle_read_error(conf, r1_bio);
  2191. else
  2192. /* just a partial read to be scheduled from separate
  2193. * context
  2194. */
  2195. generic_make_request(r1_bio->bios[r1_bio->read_disk]);
  2196. cond_resched();
  2197. if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
  2198. md_check_recovery(mddev);
  2199. }
  2200. blk_finish_plug(&plug);
  2201. }
  2202. static int init_resync(struct r1conf *conf)
  2203. {
  2204. int buffs;
  2205. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  2206. BUG_ON(conf->r1buf_pool);
  2207. conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
  2208. conf->poolinfo);
  2209. if (!conf->r1buf_pool)
  2210. return -ENOMEM;
  2211. conf->next_resync = 0;
  2212. return 0;
  2213. }
  2214. /*
  2215. * perform a "sync" on one "block"
  2216. *
  2217. * We need to make sure that no normal I/O request - particularly write
  2218. * requests - conflict with active sync requests.
  2219. *
  2220. * This is achieved by tracking pending requests and a 'barrier' concept
  2221. * that can be installed to exclude normal IO requests.
  2222. */
  2223. static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  2224. {
  2225. struct r1conf *conf = mddev->private;
  2226. struct r1bio *r1_bio;
  2227. struct bio *bio;
  2228. sector_t max_sector, nr_sectors;
  2229. int disk = -1;
  2230. int i;
  2231. int wonly = -1;
  2232. int write_targets = 0, read_targets = 0;
  2233. sector_t sync_blocks;
  2234. int still_degraded = 0;
  2235. int good_sectors = RESYNC_SECTORS;
  2236. int min_bad = 0; /* number of sectors that are bad in all devices */
  2237. if (!conf->r1buf_pool)
  2238. if (init_resync(conf))
  2239. return 0;
  2240. max_sector = mddev->dev_sectors;
  2241. if (sector_nr >= max_sector) {
  2242. /* If we aborted, we need to abort the
  2243. * sync on the 'current' bitmap chunk (there will
  2244. * only be one in raid1 resync.
  2245. * We can find the current addess in mddev->curr_resync
  2246. */
  2247. if (mddev->curr_resync < max_sector) /* aborted */
  2248. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  2249. &sync_blocks, 1);
  2250. else /* completed sync */
  2251. conf->fullsync = 0;
  2252. bitmap_close_sync(mddev->bitmap);
  2253. close_sync(conf);
  2254. return 0;
  2255. }
  2256. if (mddev->bitmap == NULL &&
  2257. mddev->recovery_cp == MaxSector &&
  2258. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  2259. conf->fullsync == 0) {
  2260. *skipped = 1;
  2261. return max_sector - sector_nr;
  2262. }
  2263. /* before building a request, check if we can skip these blocks..
  2264. * This call the bitmap_start_sync doesn't actually record anything
  2265. */
  2266. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  2267. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2268. /* We can skip this block, and probably several more */
  2269. *skipped = 1;
  2270. return sync_blocks;
  2271. }
  2272. /*
  2273. * If there is non-resync activity waiting for a turn,
  2274. * and resync is going fast enough,
  2275. * then let it though before starting on this new sync request.
  2276. */
  2277. if (!go_faster && conf->nr_waiting)
  2278. msleep_interruptible(1000);
  2279. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  2280. r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
  2281. raise_barrier(conf);
  2282. conf->next_resync = sector_nr;
  2283. rcu_read_lock();
  2284. /*
  2285. * If we get a correctably read error during resync or recovery,
  2286. * we might want to read from a different device. So we
  2287. * flag all drives that could conceivably be read from for READ,
  2288. * and any others (which will be non-In_sync devices) for WRITE.
  2289. * If a read fails, we try reading from something else for which READ
  2290. * is OK.
  2291. */
  2292. r1_bio->mddev = mddev;
  2293. r1_bio->sector = sector_nr;
  2294. r1_bio->state = 0;
  2295. set_bit(R1BIO_IsSync, &r1_bio->state);
  2296. for (i = 0; i < conf->raid_disks * 2; i++) {
  2297. struct md_rdev *rdev;
  2298. bio = r1_bio->bios[i];
  2299. bio_reset(bio);
  2300. rdev = rcu_dereference(conf->mirrors[i].rdev);
  2301. if (rdev == NULL ||
  2302. test_bit(Faulty, &rdev->flags)) {
  2303. if (i < conf->raid_disks)
  2304. still_degraded = 1;
  2305. } else if (!test_bit(In_sync, &rdev->flags)) {
  2306. bio->bi_rw = WRITE;
  2307. bio->bi_end_io = end_sync_write;
  2308. write_targets ++;
  2309. } else {
  2310. /* may need to read from here */
  2311. sector_t first_bad = MaxSector;
  2312. int bad_sectors;
  2313. if (is_badblock(rdev, sector_nr, good_sectors,
  2314. &first_bad, &bad_sectors)) {
  2315. if (first_bad > sector_nr)
  2316. good_sectors = first_bad - sector_nr;
  2317. else {
  2318. bad_sectors -= (sector_nr - first_bad);
  2319. if (min_bad == 0 ||
  2320. min_bad > bad_sectors)
  2321. min_bad = bad_sectors;
  2322. }
  2323. }
  2324. if (sector_nr < first_bad) {
  2325. if (test_bit(WriteMostly, &rdev->flags)) {
  2326. if (wonly < 0)
  2327. wonly = i;
  2328. } else {
  2329. if (disk < 0)
  2330. disk = i;
  2331. }
  2332. bio->bi_rw = READ;
  2333. bio->bi_end_io = end_sync_read;
  2334. read_targets++;
  2335. } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
  2336. test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  2337. !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  2338. /*
  2339. * The device is suitable for reading (InSync),
  2340. * but has bad block(s) here. Let's try to correct them,
  2341. * if we are doing resync or repair. Otherwise, leave
  2342. * this device alone for this sync request.
  2343. */
  2344. bio->bi_rw = WRITE;
  2345. bio->bi_end_io = end_sync_write;
  2346. write_targets++;
  2347. }
  2348. }
  2349. if (bio->bi_end_io) {
  2350. atomic_inc(&rdev->nr_pending);
  2351. bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
  2352. bio->bi_bdev = rdev->bdev;
  2353. bio->bi_private = r1_bio;
  2354. }
  2355. }
  2356. rcu_read_unlock();
  2357. if (disk < 0)
  2358. disk = wonly;
  2359. r1_bio->read_disk = disk;
  2360. if (read_targets == 0 && min_bad > 0) {
  2361. /* These sectors are bad on all InSync devices, so we
  2362. * need to mark them bad on all write targets
  2363. */
  2364. int ok = 1;
  2365. for (i = 0 ; i < conf->raid_disks * 2 ; i++)
  2366. if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
  2367. struct md_rdev *rdev = conf->mirrors[i].rdev;
  2368. ok = rdev_set_badblocks(rdev, sector_nr,
  2369. min_bad, 0
  2370. ) && ok;
  2371. }
  2372. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  2373. *skipped = 1;
  2374. put_buf(r1_bio);
  2375. if (!ok) {
  2376. /* Cannot record the badblocks, so need to
  2377. * abort the resync.
  2378. * If there are multiple read targets, could just
  2379. * fail the really bad ones ???
  2380. */
  2381. conf->recovery_disabled = mddev->recovery_disabled;
  2382. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  2383. return 0;
  2384. } else
  2385. return min_bad;
  2386. }
  2387. if (min_bad > 0 && min_bad < good_sectors) {
  2388. /* only resync enough to reach the next bad->good
  2389. * transition */
  2390. good_sectors = min_bad;
  2391. }
  2392. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
  2393. /* extra read targets are also write targets */
  2394. write_targets += read_targets-1;
  2395. if (write_targets == 0 || read_targets == 0) {
  2396. /* There is nowhere to write, so all non-sync
  2397. * drives must be failed - so we are finished
  2398. */
  2399. sector_t rv;
  2400. if (min_bad > 0)
  2401. max_sector = sector_nr + min_bad;
  2402. rv = max_sector - sector_nr;
  2403. *skipped = 1;
  2404. put_buf(r1_bio);
  2405. return rv;
  2406. }
  2407. if (max_sector > mddev->resync_max)
  2408. max_sector = mddev->resync_max; /* Don't do IO beyond here */
  2409. if (max_sector > sector_nr + good_sectors)
  2410. max_sector = sector_nr + good_sectors;
  2411. nr_sectors = 0;
  2412. sync_blocks = 0;
  2413. do {
  2414. struct page *page;
  2415. int len = PAGE_SIZE;
  2416. if (sector_nr + (len>>9) > max_sector)
  2417. len = (max_sector - sector_nr) << 9;
  2418. if (len == 0)
  2419. break;
  2420. if (sync_blocks == 0) {
  2421. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  2422. &sync_blocks, still_degraded) &&
  2423. !conf->fullsync &&
  2424. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  2425. break;
  2426. BUG_ON(sync_blocks < (PAGE_SIZE>>9));
  2427. if ((len >> 9) > sync_blocks)
  2428. len = sync_blocks<<9;
  2429. }
  2430. for (i = 0 ; i < conf->raid_disks * 2; i++) {
  2431. bio = r1_bio->bios[i];
  2432. if (bio->bi_end_io) {
  2433. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  2434. if (bio_add_page(bio, page, len, 0) == 0) {
  2435. /* stop here */
  2436. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  2437. while (i > 0) {
  2438. i--;
  2439. bio = r1_bio->bios[i];
  2440. if (bio->bi_end_io==NULL)
  2441. continue;
  2442. /* remove last page from this bio */
  2443. bio->bi_vcnt--;
  2444. bio->bi_iter.bi_size -= len;
  2445. bio->bi_flags &= ~(1<< BIO_SEG_VALID);
  2446. }
  2447. goto bio_full;
  2448. }
  2449. }
  2450. }
  2451. nr_sectors += len>>9;
  2452. sector_nr += len>>9;
  2453. sync_blocks -= (len>>9);
  2454. } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
  2455. bio_full:
  2456. r1_bio->sectors = nr_sectors;
  2457. /* For a user-requested sync, we read all readable devices and do a
  2458. * compare
  2459. */
  2460. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2461. atomic_set(&r1_bio->remaining, read_targets);
  2462. for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
  2463. bio = r1_bio->bios[i];
  2464. if (bio->bi_end_io == end_sync_read) {
  2465. read_targets--;
  2466. md_sync_acct(bio->bi_bdev, nr_sectors);
  2467. generic_make_request(bio);
  2468. }
  2469. }
  2470. } else {
  2471. atomic_set(&r1_bio->remaining, 1);
  2472. bio = r1_bio->bios[r1_bio->read_disk];
  2473. md_sync_acct(bio->bi_bdev, nr_sectors);
  2474. generic_make_request(bio);
  2475. }
  2476. return nr_sectors;
  2477. }
  2478. static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  2479. {
  2480. if (sectors)
  2481. return sectors;
  2482. return mddev->dev_sectors;
  2483. }
  2484. static struct r1conf *setup_conf(struct mddev *mddev)
  2485. {
  2486. struct r1conf *conf;
  2487. int i;
  2488. struct raid1_info *disk;
  2489. struct md_rdev *rdev;
  2490. int err = -ENOMEM;
  2491. conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
  2492. if (!conf)
  2493. goto abort;
  2494. conf->mirrors = kzalloc(sizeof(struct raid1_info)
  2495. * mddev->raid_disks * 2,
  2496. GFP_KERNEL);
  2497. if (!conf->mirrors)
  2498. goto abort;
  2499. conf->tmppage = alloc_page(GFP_KERNEL);
  2500. if (!conf->tmppage)
  2501. goto abort;
  2502. conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
  2503. if (!conf->poolinfo)
  2504. goto abort;
  2505. conf->poolinfo->raid_disks = mddev->raid_disks * 2;
  2506. conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2507. r1bio_pool_free,
  2508. conf->poolinfo);
  2509. if (!conf->r1bio_pool)
  2510. goto abort;
  2511. conf->poolinfo->mddev = mddev;
  2512. err = -EINVAL;
  2513. spin_lock_init(&conf->device_lock);
  2514. rdev_for_each(rdev, mddev) {
  2515. struct request_queue *q;
  2516. int disk_idx = rdev->raid_disk;
  2517. if (disk_idx >= mddev->raid_disks
  2518. || disk_idx < 0)
  2519. continue;
  2520. if (test_bit(Replacement, &rdev->flags))
  2521. disk = conf->mirrors + mddev->raid_disks + disk_idx;
  2522. else
  2523. disk = conf->mirrors + disk_idx;
  2524. if (disk->rdev)
  2525. goto abort;
  2526. disk->rdev = rdev;
  2527. q = bdev_get_queue(rdev->bdev);
  2528. if (q->merge_bvec_fn)
  2529. mddev->merge_check_needed = 1;
  2530. disk->head_position = 0;
  2531. disk->seq_start = MaxSector;
  2532. }
  2533. conf->raid_disks = mddev->raid_disks;
  2534. conf->mddev = mddev;
  2535. INIT_LIST_HEAD(&conf->retry_list);
  2536. spin_lock_init(&conf->resync_lock);
  2537. init_waitqueue_head(&conf->wait_barrier);
  2538. bio_list_init(&conf->pending_bio_list);
  2539. conf->pending_count = 0;
  2540. conf->recovery_disabled = mddev->recovery_disabled - 1;
  2541. conf->start_next_window = MaxSector;
  2542. conf->current_window_requests = conf->next_window_requests = 0;
  2543. err = -EIO;
  2544. for (i = 0; i < conf->raid_disks * 2; i++) {
  2545. disk = conf->mirrors + i;
  2546. if (i < conf->raid_disks &&
  2547. disk[conf->raid_disks].rdev) {
  2548. /* This slot has a replacement. */
  2549. if (!disk->rdev) {
  2550. /* No original, just make the replacement
  2551. * a recovering spare
  2552. */
  2553. disk->rdev =
  2554. disk[conf->raid_disks].rdev;
  2555. disk[conf->raid_disks].rdev = NULL;
  2556. } else if (!test_bit(In_sync, &disk->rdev->flags))
  2557. /* Original is not in_sync - bad */
  2558. goto abort;
  2559. }
  2560. if (!disk->rdev ||
  2561. !test_bit(In_sync, &disk->rdev->flags)) {
  2562. disk->head_position = 0;
  2563. if (disk->rdev &&
  2564. (disk->rdev->saved_raid_disk < 0))
  2565. conf->fullsync = 1;
  2566. }
  2567. }
  2568. err = -ENOMEM;
  2569. conf->thread = md_register_thread(raid1d, mddev, "raid1");
  2570. if (!conf->thread) {
  2571. printk(KERN_ERR
  2572. "md/raid1:%s: couldn't allocate thread\n",
  2573. mdname(mddev));
  2574. goto abort;
  2575. }
  2576. return conf;
  2577. abort:
  2578. if (conf) {
  2579. if (conf->r1bio_pool)
  2580. mempool_destroy(conf->r1bio_pool);
  2581. kfree(conf->mirrors);
  2582. safe_put_page(conf->tmppage);
  2583. kfree(conf->poolinfo);
  2584. kfree(conf);
  2585. }
  2586. return ERR_PTR(err);
  2587. }
  2588. static int stop(struct mddev *mddev);
  2589. static int run(struct mddev *mddev)
  2590. {
  2591. struct r1conf *conf;
  2592. int i;
  2593. struct md_rdev *rdev;
  2594. int ret;
  2595. bool discard_supported = false;
  2596. if (mddev->level != 1) {
  2597. printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
  2598. mdname(mddev), mddev->level);
  2599. return -EIO;
  2600. }
  2601. if (mddev->reshape_position != MaxSector) {
  2602. printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
  2603. mdname(mddev));
  2604. return -EIO;
  2605. }
  2606. /*
  2607. * copy the already verified devices into our private RAID1
  2608. * bookkeeping area. [whatever we allocate in run(),
  2609. * should be freed in stop()]
  2610. */
  2611. if (mddev->private == NULL)
  2612. conf = setup_conf(mddev);
  2613. else
  2614. conf = mddev->private;
  2615. if (IS_ERR(conf))
  2616. return PTR_ERR(conf);
  2617. if (mddev->queue)
  2618. blk_queue_max_write_same_sectors(mddev->queue, 0);
  2619. rdev_for_each(rdev, mddev) {
  2620. if (!mddev->gendisk)
  2621. continue;
  2622. disk_stack_limits(mddev->gendisk, rdev->bdev,
  2623. rdev->data_offset << 9);
  2624. if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
  2625. discard_supported = true;
  2626. }
  2627. mddev->degraded = 0;
  2628. for (i=0; i < conf->raid_disks; i++)
  2629. if (conf->mirrors[i].rdev == NULL ||
  2630. !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
  2631. test_bit(Faulty, &conf->mirrors[i].rdev->flags))
  2632. mddev->degraded++;
  2633. if (conf->raid_disks - mddev->degraded == 1)
  2634. mddev->recovery_cp = MaxSector;
  2635. if (mddev->recovery_cp != MaxSector)
  2636. printk(KERN_NOTICE "md/raid1:%s: not clean"
  2637. " -- starting background reconstruction\n",
  2638. mdname(mddev));
  2639. printk(KERN_INFO
  2640. "md/raid1:%s: active with %d out of %d mirrors\n",
  2641. mdname(mddev), mddev->raid_disks - mddev->degraded,
  2642. mddev->raid_disks);
  2643. /*
  2644. * Ok, everything is just fine now
  2645. */
  2646. mddev->thread = conf->thread;
  2647. conf->thread = NULL;
  2648. mddev->private = conf;
  2649. md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
  2650. if (mddev->queue) {
  2651. mddev->queue->backing_dev_info.congested_fn = raid1_congested;
  2652. mddev->queue->backing_dev_info.congested_data = mddev;
  2653. blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
  2654. if (discard_supported)
  2655. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  2656. mddev->queue);
  2657. else
  2658. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  2659. mddev->queue);
  2660. }
  2661. ret = md_integrity_register(mddev);
  2662. if (ret)
  2663. stop(mddev);
  2664. return ret;
  2665. }
  2666. static int stop(struct mddev *mddev)
  2667. {
  2668. struct r1conf *conf = mddev->private;
  2669. struct bitmap *bitmap = mddev->bitmap;
  2670. /* wait for behind writes to complete */
  2671. if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
  2672. printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
  2673. mdname(mddev));
  2674. /* need to kick something here to make sure I/O goes? */
  2675. wait_event(bitmap->behind_wait,
  2676. atomic_read(&bitmap->behind_writes) == 0);
  2677. }
  2678. freeze_array(conf, 0);
  2679. unfreeze_array(conf);
  2680. md_unregister_thread(&mddev->thread);
  2681. if (conf->r1bio_pool)
  2682. mempool_destroy(conf->r1bio_pool);
  2683. kfree(conf->mirrors);
  2684. safe_put_page(conf->tmppage);
  2685. kfree(conf->poolinfo);
  2686. kfree(conf);
  2687. mddev->private = NULL;
  2688. return 0;
  2689. }
  2690. static int raid1_resize(struct mddev *mddev, sector_t sectors)
  2691. {
  2692. /* no resync is happening, and there is enough space
  2693. * on all devices, so we can resize.
  2694. * We need to make sure resync covers any new space.
  2695. * If the array is shrinking we should possibly wait until
  2696. * any io in the removed space completes, but it hardly seems
  2697. * worth it.
  2698. */
  2699. sector_t newsize = raid1_size(mddev, sectors, 0);
  2700. if (mddev->external_size &&
  2701. mddev->array_sectors > newsize)
  2702. return -EINVAL;
  2703. if (mddev->bitmap) {
  2704. int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
  2705. if (ret)
  2706. return ret;
  2707. }
  2708. md_set_array_sectors(mddev, newsize);
  2709. set_capacity(mddev->gendisk, mddev->array_sectors);
  2710. revalidate_disk(mddev->gendisk);
  2711. if (sectors > mddev->dev_sectors &&
  2712. mddev->recovery_cp > mddev->dev_sectors) {
  2713. mddev->recovery_cp = mddev->dev_sectors;
  2714. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2715. }
  2716. mddev->dev_sectors = sectors;
  2717. mddev->resync_max_sectors = sectors;
  2718. return 0;
  2719. }
  2720. static int raid1_reshape(struct mddev *mddev)
  2721. {
  2722. /* We need to:
  2723. * 1/ resize the r1bio_pool
  2724. * 2/ resize conf->mirrors
  2725. *
  2726. * We allocate a new r1bio_pool if we can.
  2727. * Then raise a device barrier and wait until all IO stops.
  2728. * Then resize conf->mirrors and swap in the new r1bio pool.
  2729. *
  2730. * At the same time, we "pack" the devices so that all the missing
  2731. * devices have the higher raid_disk numbers.
  2732. */
  2733. mempool_t *newpool, *oldpool;
  2734. struct pool_info *newpoolinfo;
  2735. struct raid1_info *newmirrors;
  2736. struct r1conf *conf = mddev->private;
  2737. int cnt, raid_disks;
  2738. unsigned long flags;
  2739. int d, d2, err;
  2740. /* Cannot change chunk_size, layout, or level */
  2741. if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
  2742. mddev->layout != mddev->new_layout ||
  2743. mddev->level != mddev->new_level) {
  2744. mddev->new_chunk_sectors = mddev->chunk_sectors;
  2745. mddev->new_layout = mddev->layout;
  2746. mddev->new_level = mddev->level;
  2747. return -EINVAL;
  2748. }
  2749. err = md_allow_write(mddev);
  2750. if (err)
  2751. return err;
  2752. raid_disks = mddev->raid_disks + mddev->delta_disks;
  2753. if (raid_disks < conf->raid_disks) {
  2754. cnt=0;
  2755. for (d= 0; d < conf->raid_disks; d++)
  2756. if (conf->mirrors[d].rdev)
  2757. cnt++;
  2758. if (cnt > raid_disks)
  2759. return -EBUSY;
  2760. }
  2761. newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
  2762. if (!newpoolinfo)
  2763. return -ENOMEM;
  2764. newpoolinfo->mddev = mddev;
  2765. newpoolinfo->raid_disks = raid_disks * 2;
  2766. newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2767. r1bio_pool_free, newpoolinfo);
  2768. if (!newpool) {
  2769. kfree(newpoolinfo);
  2770. return -ENOMEM;
  2771. }
  2772. newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
  2773. GFP_KERNEL);
  2774. if (!newmirrors) {
  2775. kfree(newpoolinfo);
  2776. mempool_destroy(newpool);
  2777. return -ENOMEM;
  2778. }
  2779. freeze_array(conf, 0);
  2780. /* ok, everything is stopped */
  2781. oldpool = conf->r1bio_pool;
  2782. conf->r1bio_pool = newpool;
  2783. for (d = d2 = 0; d < conf->raid_disks; d++) {
  2784. struct md_rdev *rdev = conf->mirrors[d].rdev;
  2785. if (rdev && rdev->raid_disk != d2) {
  2786. sysfs_unlink_rdev(mddev, rdev);
  2787. rdev->raid_disk = d2;
  2788. sysfs_unlink_rdev(mddev, rdev);
  2789. if (sysfs_link_rdev(mddev, rdev))
  2790. printk(KERN_WARNING
  2791. "md/raid1:%s: cannot register rd%d\n",
  2792. mdname(mddev), rdev->raid_disk);
  2793. }
  2794. if (rdev)
  2795. newmirrors[d2++].rdev = rdev;
  2796. }
  2797. kfree(conf->mirrors);
  2798. conf->mirrors = newmirrors;
  2799. kfree(conf->poolinfo);
  2800. conf->poolinfo = newpoolinfo;
  2801. spin_lock_irqsave(&conf->device_lock, flags);
  2802. mddev->degraded += (raid_disks - conf->raid_disks);
  2803. spin_unlock_irqrestore(&conf->device_lock, flags);
  2804. conf->raid_disks = mddev->raid_disks = raid_disks;
  2805. mddev->delta_disks = 0;
  2806. unfreeze_array(conf);
  2807. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2808. md_wakeup_thread(mddev->thread);
  2809. mempool_destroy(oldpool);
  2810. return 0;
  2811. }
  2812. static void raid1_quiesce(struct mddev *mddev, int state)
  2813. {
  2814. struct r1conf *conf = mddev->private;
  2815. switch(state) {
  2816. case 2: /* wake for suspend */
  2817. wake_up(&conf->wait_barrier);
  2818. break;
  2819. case 1:
  2820. freeze_array(conf, 0);
  2821. break;
  2822. case 0:
  2823. unfreeze_array(conf);
  2824. break;
  2825. }
  2826. }
  2827. static void *raid1_takeover(struct mddev *mddev)
  2828. {
  2829. /* raid1 can take over:
  2830. * raid5 with 2 devices, any layout or chunk size
  2831. */
  2832. if (mddev->level == 5 && mddev->raid_disks == 2) {
  2833. struct r1conf *conf;
  2834. mddev->new_level = 1;
  2835. mddev->new_layout = 0;
  2836. mddev->new_chunk_sectors = 0;
  2837. conf = setup_conf(mddev);
  2838. if (!IS_ERR(conf))
  2839. /* Array must appear to be quiesced */
  2840. conf->array_frozen = 1;
  2841. return conf;
  2842. }
  2843. return ERR_PTR(-EINVAL);
  2844. }
  2845. static struct md_personality raid1_personality =
  2846. {
  2847. .name = "raid1",
  2848. .level = 1,
  2849. .owner = THIS_MODULE,
  2850. .make_request = make_request,
  2851. .run = run,
  2852. .stop = stop,
  2853. .status = status,
  2854. .error_handler = error,
  2855. .hot_add_disk = raid1_add_disk,
  2856. .hot_remove_disk= raid1_remove_disk,
  2857. .spare_active = raid1_spare_active,
  2858. .sync_request = sync_request,
  2859. .resize = raid1_resize,
  2860. .size = raid1_size,
  2861. .check_reshape = raid1_reshape,
  2862. .quiesce = raid1_quiesce,
  2863. .takeover = raid1_takeover,
  2864. };
  2865. static int __init raid_init(void)
  2866. {
  2867. return register_md_personality(&raid1_personality);
  2868. }
  2869. static void raid_exit(void)
  2870. {
  2871. unregister_md_personality(&raid1_personality);
  2872. }
  2873. module_init(raid_init);
  2874. module_exit(raid_exit);
  2875. MODULE_LICENSE("GPL");
  2876. MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
  2877. MODULE_ALIAS("md-personality-3"); /* RAID1 */
  2878. MODULE_ALIAS("md-raid1");
  2879. MODULE_ALIAS("md-level-1");
  2880. module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);