raid1.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219
  1. /*
  2. * raid1.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
  5. *
  6. * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  7. *
  8. * RAID-1 management functions.
  9. *
  10. * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11. *
  12. * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13. * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14. *
  15. * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16. * bitmapped intelligence in resync:
  17. *
  18. * - bitmap marked during normal i/o
  19. * - bitmap used to skip nondirty blocks during sync
  20. *
  21. * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22. * - persistent bitmap code
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License as published by
  26. * the Free Software Foundation; either version 2, or (at your option)
  27. * any later version.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * (for example /usr/src/linux/COPYING); if not, write to the Free
  31. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32. */
  33. #include <linux/slab.h>
  34. #include <linux/delay.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/module.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/ratelimit.h>
  39. #include "md.h"
  40. #include "raid1.h"
  41. #include "bitmap.h"
  42. /*
  43. * Number of guaranteed r1bios in case of extreme VM load:
  44. */
  45. #define NR_RAID1_BIOS 256
  46. /* when we get a read error on a read-only array, we redirect to another
  47. * device without failing the first device, or trying to over-write to
  48. * correct the read error. To keep track of bad blocks on a per-bio
  49. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  50. */
  51. #define IO_BLOCKED ((struct bio *)1)
  52. /* When we successfully write to a known bad-block, we need to remove the
  53. * bad-block marking which must be done from process context. So we record
  54. * the success by setting devs[n].bio to IO_MADE_GOOD
  55. */
  56. #define IO_MADE_GOOD ((struct bio *)2)
  57. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  58. /* When there are this many requests queue to be written by
  59. * the raid1 thread, we become 'congested' to provide back-pressure
  60. * for writeback.
  61. */
  62. static int max_queued_requests = 1024;
  63. static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  64. sector_t bi_sector);
  65. static void lower_barrier(struct r1conf *conf);
  66. static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  67. {
  68. struct pool_info *pi = data;
  69. int size = offsetof(struct r1bio, bios[pi->raid_disks]);
  70. /* allocate a r1bio with room for raid_disks entries in the bios array */
  71. return kzalloc(size, gfp_flags);
  72. }
  73. static void r1bio_pool_free(void *r1_bio, void *data)
  74. {
  75. kfree(r1_bio);
  76. }
  77. #define RESYNC_BLOCK_SIZE (64*1024)
  78. #define RESYNC_DEPTH 32
  79. #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  80. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  81. #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
  82. #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
  83. #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
  84. static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  85. {
  86. struct pool_info *pi = data;
  87. struct r1bio *r1_bio;
  88. struct bio *bio;
  89. int need_pages;
  90. int i, j;
  91. r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  92. if (!r1_bio)
  93. return NULL;
  94. /*
  95. * Allocate bios : 1 for reading, n-1 for writing
  96. */
  97. for (j = pi->raid_disks ; j-- ; ) {
  98. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  99. if (!bio)
  100. goto out_free_bio;
  101. r1_bio->bios[j] = bio;
  102. }
  103. /*
  104. * Allocate RESYNC_PAGES data pages and attach them to
  105. * the first bio.
  106. * If this is a user-requested check/repair, allocate
  107. * RESYNC_PAGES for each bio.
  108. */
  109. if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
  110. need_pages = pi->raid_disks;
  111. else
  112. need_pages = 1;
  113. for (j = 0; j < need_pages; j++) {
  114. bio = r1_bio->bios[j];
  115. bio->bi_vcnt = RESYNC_PAGES;
  116. if (bio_alloc_pages(bio, gfp_flags))
  117. goto out_free_pages;
  118. }
  119. /* If not user-requests, copy the page pointers to all bios */
  120. if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
  121. for (i=0; i<RESYNC_PAGES ; i++)
  122. for (j=1; j<pi->raid_disks; j++)
  123. r1_bio->bios[j]->bi_io_vec[i].bv_page =
  124. r1_bio->bios[0]->bi_io_vec[i].bv_page;
  125. }
  126. r1_bio->master_bio = NULL;
  127. return r1_bio;
  128. out_free_pages:
  129. while (--j >= 0) {
  130. struct bio_vec *bv;
  131. bio_for_each_segment_all(bv, r1_bio->bios[j], i)
  132. __free_page(bv->bv_page);
  133. }
  134. out_free_bio:
  135. while (++j < pi->raid_disks)
  136. bio_put(r1_bio->bios[j]);
  137. r1bio_pool_free(r1_bio, data);
  138. return NULL;
  139. }
  140. static void r1buf_pool_free(void *__r1_bio, void *data)
  141. {
  142. struct pool_info *pi = data;
  143. int i,j;
  144. struct r1bio *r1bio = __r1_bio;
  145. for (i = 0; i < RESYNC_PAGES; i++)
  146. for (j = pi->raid_disks; j-- ;) {
  147. if (j == 0 ||
  148. r1bio->bios[j]->bi_io_vec[i].bv_page !=
  149. r1bio->bios[0]->bi_io_vec[i].bv_page)
  150. safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
  151. }
  152. for (i=0 ; i < pi->raid_disks; i++)
  153. bio_put(r1bio->bios[i]);
  154. r1bio_pool_free(r1bio, data);
  155. }
  156. static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
  157. {
  158. int i;
  159. for (i = 0; i < conf->raid_disks * 2; i++) {
  160. struct bio **bio = r1_bio->bios + i;
  161. if (!BIO_SPECIAL(*bio))
  162. bio_put(*bio);
  163. *bio = NULL;
  164. }
  165. }
  166. static void free_r1bio(struct r1bio *r1_bio)
  167. {
  168. struct r1conf *conf = r1_bio->mddev->private;
  169. put_all_bios(conf, r1_bio);
  170. mempool_free(r1_bio, conf->r1bio_pool);
  171. }
  172. static void put_buf(struct r1bio *r1_bio)
  173. {
  174. struct r1conf *conf = r1_bio->mddev->private;
  175. int i;
  176. for (i = 0; i < conf->raid_disks * 2; i++) {
  177. struct bio *bio = r1_bio->bios[i];
  178. if (bio->bi_end_io)
  179. rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
  180. }
  181. mempool_free(r1_bio, conf->r1buf_pool);
  182. lower_barrier(conf);
  183. }
  184. static void reschedule_retry(struct r1bio *r1_bio)
  185. {
  186. unsigned long flags;
  187. struct mddev *mddev = r1_bio->mddev;
  188. struct r1conf *conf = mddev->private;
  189. spin_lock_irqsave(&conf->device_lock, flags);
  190. list_add(&r1_bio->retry_list, &conf->retry_list);
  191. conf->nr_queued ++;
  192. spin_unlock_irqrestore(&conf->device_lock, flags);
  193. wake_up(&conf->wait_barrier);
  194. md_wakeup_thread(mddev->thread);
  195. }
  196. /*
  197. * raid_end_bio_io() is called when we have finished servicing a mirrored
  198. * operation and are ready to return a success/failure code to the buffer
  199. * cache layer.
  200. */
  201. static void call_bio_endio(struct r1bio *r1_bio)
  202. {
  203. struct bio *bio = r1_bio->master_bio;
  204. int done;
  205. struct r1conf *conf = r1_bio->mddev->private;
  206. sector_t start_next_window = r1_bio->start_next_window;
  207. sector_t bi_sector = bio->bi_iter.bi_sector;
  208. if (bio->bi_phys_segments) {
  209. unsigned long flags;
  210. spin_lock_irqsave(&conf->device_lock, flags);
  211. bio->bi_phys_segments--;
  212. done = (bio->bi_phys_segments == 0);
  213. spin_unlock_irqrestore(&conf->device_lock, flags);
  214. /*
  215. * make_request() might be waiting for
  216. * bi_phys_segments to decrease
  217. */
  218. wake_up(&conf->wait_barrier);
  219. } else
  220. done = 1;
  221. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  222. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  223. if (done) {
  224. bio_endio(bio, 0);
  225. /*
  226. * Wake up any possible resync thread that waits for the device
  227. * to go idle.
  228. */
  229. allow_barrier(conf, start_next_window, bi_sector);
  230. }
  231. }
  232. static void raid_end_bio_io(struct r1bio *r1_bio)
  233. {
  234. struct bio *bio = r1_bio->master_bio;
  235. /* if nobody has done the final endio yet, do it now */
  236. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  237. pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
  238. (bio_data_dir(bio) == WRITE) ? "write" : "read",
  239. (unsigned long long) bio->bi_iter.bi_sector,
  240. (unsigned long long) bio_end_sector(bio) - 1);
  241. call_bio_endio(r1_bio);
  242. }
  243. free_r1bio(r1_bio);
  244. }
  245. /*
  246. * Update disk head position estimator based on IRQ completion info.
  247. */
  248. static inline void update_head_pos(int disk, struct r1bio *r1_bio)
  249. {
  250. struct r1conf *conf = r1_bio->mddev->private;
  251. conf->mirrors[disk].head_position =
  252. r1_bio->sector + (r1_bio->sectors);
  253. }
  254. /*
  255. * Find the disk number which triggered given bio
  256. */
  257. static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
  258. {
  259. int mirror;
  260. struct r1conf *conf = r1_bio->mddev->private;
  261. int raid_disks = conf->raid_disks;
  262. for (mirror = 0; mirror < raid_disks * 2; mirror++)
  263. if (r1_bio->bios[mirror] == bio)
  264. break;
  265. BUG_ON(mirror == raid_disks * 2);
  266. update_head_pos(mirror, r1_bio);
  267. return mirror;
  268. }
  269. static void raid1_end_read_request(struct bio *bio, int error)
  270. {
  271. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  272. struct r1bio *r1_bio = bio->bi_private;
  273. int mirror;
  274. struct r1conf *conf = r1_bio->mddev->private;
  275. mirror = r1_bio->read_disk;
  276. /*
  277. * this branch is our 'one mirror IO has finished' event handler:
  278. */
  279. update_head_pos(mirror, r1_bio);
  280. if (uptodate)
  281. set_bit(R1BIO_Uptodate, &r1_bio->state);
  282. else {
  283. /* If all other devices have failed, we want to return
  284. * the error upwards rather than fail the last device.
  285. * Here we redefine "uptodate" to mean "Don't want to retry"
  286. */
  287. unsigned long flags;
  288. spin_lock_irqsave(&conf->device_lock, flags);
  289. if (r1_bio->mddev->degraded == conf->raid_disks ||
  290. (r1_bio->mddev->degraded == conf->raid_disks-1 &&
  291. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
  292. uptodate = 1;
  293. spin_unlock_irqrestore(&conf->device_lock, flags);
  294. }
  295. if (uptodate) {
  296. raid_end_bio_io(r1_bio);
  297. rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
  298. } else {
  299. /*
  300. * oops, read error:
  301. */
  302. char b[BDEVNAME_SIZE];
  303. printk_ratelimited(
  304. KERN_ERR "md/raid1:%s: %s: "
  305. "rescheduling sector %llu\n",
  306. mdname(conf->mddev),
  307. bdevname(conf->mirrors[mirror].rdev->bdev,
  308. b),
  309. (unsigned long long)r1_bio->sector);
  310. set_bit(R1BIO_ReadError, &r1_bio->state);
  311. reschedule_retry(r1_bio);
  312. /* don't drop the reference on read_disk yet */
  313. }
  314. }
  315. static void close_write(struct r1bio *r1_bio)
  316. {
  317. /* it really is the end of this request */
  318. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  319. /* free extra copy of the data pages */
  320. int i = r1_bio->behind_page_count;
  321. while (i--)
  322. safe_put_page(r1_bio->behind_bvecs[i].bv_page);
  323. kfree(r1_bio->behind_bvecs);
  324. r1_bio->behind_bvecs = NULL;
  325. }
  326. /* clear the bitmap if all writes complete successfully */
  327. bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
  328. r1_bio->sectors,
  329. !test_bit(R1BIO_Degraded, &r1_bio->state),
  330. test_bit(R1BIO_BehindIO, &r1_bio->state));
  331. md_write_end(r1_bio->mddev);
  332. }
  333. static void r1_bio_write_done(struct r1bio *r1_bio)
  334. {
  335. if (!atomic_dec_and_test(&r1_bio->remaining))
  336. return;
  337. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  338. reschedule_retry(r1_bio);
  339. else {
  340. close_write(r1_bio);
  341. if (test_bit(R1BIO_MadeGood, &r1_bio->state))
  342. reschedule_retry(r1_bio);
  343. else
  344. raid_end_bio_io(r1_bio);
  345. }
  346. }
  347. static void raid1_end_write_request(struct bio *bio, int error)
  348. {
  349. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  350. struct r1bio *r1_bio = bio->bi_private;
  351. int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
  352. struct r1conf *conf = r1_bio->mddev->private;
  353. struct bio *to_put = NULL;
  354. mirror = find_bio_disk(r1_bio, bio);
  355. /*
  356. * 'one mirror IO has finished' event handler:
  357. */
  358. if (!uptodate) {
  359. set_bit(WriteErrorSeen,
  360. &conf->mirrors[mirror].rdev->flags);
  361. if (!test_and_set_bit(WantReplacement,
  362. &conf->mirrors[mirror].rdev->flags))
  363. set_bit(MD_RECOVERY_NEEDED, &
  364. conf->mddev->recovery);
  365. set_bit(R1BIO_WriteError, &r1_bio->state);
  366. } else {
  367. /*
  368. * Set R1BIO_Uptodate in our master bio, so that we
  369. * will return a good error code for to the higher
  370. * levels even if IO on some other mirrored buffer
  371. * fails.
  372. *
  373. * The 'master' represents the composite IO operation
  374. * to user-side. So if something waits for IO, then it
  375. * will wait for the 'master' bio.
  376. */
  377. sector_t first_bad;
  378. int bad_sectors;
  379. r1_bio->bios[mirror] = NULL;
  380. to_put = bio;
  381. /*
  382. * Do not set R1BIO_Uptodate if the current device is
  383. * rebuilding or Faulty. This is because we cannot use
  384. * such device for properly reading the data back (we could
  385. * potentially use it, if the current write would have felt
  386. * before rdev->recovery_offset, but for simplicity we don't
  387. * check this here.
  388. */
  389. if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
  390. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
  391. set_bit(R1BIO_Uptodate, &r1_bio->state);
  392. /* Maybe we can clear some bad blocks. */
  393. if (is_badblock(conf->mirrors[mirror].rdev,
  394. r1_bio->sector, r1_bio->sectors,
  395. &first_bad, &bad_sectors)) {
  396. r1_bio->bios[mirror] = IO_MADE_GOOD;
  397. set_bit(R1BIO_MadeGood, &r1_bio->state);
  398. }
  399. }
  400. if (behind) {
  401. if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
  402. atomic_dec(&r1_bio->behind_remaining);
  403. /*
  404. * In behind mode, we ACK the master bio once the I/O
  405. * has safely reached all non-writemostly
  406. * disks. Setting the Returned bit ensures that this
  407. * gets done only once -- we don't ever want to return
  408. * -EIO here, instead we'll wait
  409. */
  410. if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
  411. test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  412. /* Maybe we can return now */
  413. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  414. struct bio *mbio = r1_bio->master_bio;
  415. pr_debug("raid1: behind end write sectors"
  416. " %llu-%llu\n",
  417. (unsigned long long) mbio->bi_iter.bi_sector,
  418. (unsigned long long) bio_end_sector(mbio) - 1);
  419. call_bio_endio(r1_bio);
  420. }
  421. }
  422. }
  423. if (r1_bio->bios[mirror] == NULL)
  424. rdev_dec_pending(conf->mirrors[mirror].rdev,
  425. conf->mddev);
  426. /*
  427. * Let's see if all mirrored write operations have finished
  428. * already.
  429. */
  430. r1_bio_write_done(r1_bio);
  431. if (to_put)
  432. bio_put(to_put);
  433. }
  434. /*
  435. * This routine returns the disk from which the requested read should
  436. * be done. There is a per-array 'next expected sequential IO' sector
  437. * number - if this matches on the next IO then we use the last disk.
  438. * There is also a per-disk 'last know head position' sector that is
  439. * maintained from IRQ contexts, both the normal and the resync IO
  440. * completion handlers update this position correctly. If there is no
  441. * perfect sequential match then we pick the disk whose head is closest.
  442. *
  443. * If there are 2 mirrors in the same 2 devices, performance degrades
  444. * because position is mirror, not device based.
  445. *
  446. * The rdev for the device selected will have nr_pending incremented.
  447. */
  448. static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
  449. {
  450. const sector_t this_sector = r1_bio->sector;
  451. int sectors;
  452. int best_good_sectors;
  453. int best_disk, best_dist_disk, best_pending_disk;
  454. int has_nonrot_disk;
  455. int disk;
  456. sector_t best_dist;
  457. unsigned int min_pending;
  458. struct md_rdev *rdev;
  459. int choose_first;
  460. int choose_next_idle;
  461. rcu_read_lock();
  462. /*
  463. * Check if we can balance. We can balance on the whole
  464. * device if no resync is going on, or below the resync window.
  465. * We take the first readable disk when above the resync window.
  466. */
  467. retry:
  468. sectors = r1_bio->sectors;
  469. best_disk = -1;
  470. best_dist_disk = -1;
  471. best_dist = MaxSector;
  472. best_pending_disk = -1;
  473. min_pending = UINT_MAX;
  474. best_good_sectors = 0;
  475. has_nonrot_disk = 0;
  476. choose_next_idle = 0;
  477. if (conf->mddev->recovery_cp < MaxSector &&
  478. (this_sector + sectors >= conf->next_resync))
  479. choose_first = 1;
  480. else
  481. choose_first = 0;
  482. for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
  483. sector_t dist;
  484. sector_t first_bad;
  485. int bad_sectors;
  486. unsigned int pending;
  487. bool nonrot;
  488. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  489. if (r1_bio->bios[disk] == IO_BLOCKED
  490. || rdev == NULL
  491. || test_bit(Unmerged, &rdev->flags)
  492. || test_bit(Faulty, &rdev->flags))
  493. continue;
  494. if (!test_bit(In_sync, &rdev->flags) &&
  495. rdev->recovery_offset < this_sector + sectors)
  496. continue;
  497. if (test_bit(WriteMostly, &rdev->flags)) {
  498. /* Don't balance among write-mostly, just
  499. * use the first as a last resort */
  500. if (best_disk < 0) {
  501. if (is_badblock(rdev, this_sector, sectors,
  502. &first_bad, &bad_sectors)) {
  503. if (first_bad < this_sector)
  504. /* Cannot use this */
  505. continue;
  506. best_good_sectors = first_bad - this_sector;
  507. } else
  508. best_good_sectors = sectors;
  509. best_disk = disk;
  510. }
  511. continue;
  512. }
  513. /* This is a reasonable device to use. It might
  514. * even be best.
  515. */
  516. if (is_badblock(rdev, this_sector, sectors,
  517. &first_bad, &bad_sectors)) {
  518. if (best_dist < MaxSector)
  519. /* already have a better device */
  520. continue;
  521. if (first_bad <= this_sector) {
  522. /* cannot read here. If this is the 'primary'
  523. * device, then we must not read beyond
  524. * bad_sectors from another device..
  525. */
  526. bad_sectors -= (this_sector - first_bad);
  527. if (choose_first && sectors > bad_sectors)
  528. sectors = bad_sectors;
  529. if (best_good_sectors > sectors)
  530. best_good_sectors = sectors;
  531. } else {
  532. sector_t good_sectors = first_bad - this_sector;
  533. if (good_sectors > best_good_sectors) {
  534. best_good_sectors = good_sectors;
  535. best_disk = disk;
  536. }
  537. if (choose_first)
  538. break;
  539. }
  540. continue;
  541. } else
  542. best_good_sectors = sectors;
  543. nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
  544. has_nonrot_disk |= nonrot;
  545. pending = atomic_read(&rdev->nr_pending);
  546. dist = abs(this_sector - conf->mirrors[disk].head_position);
  547. if (choose_first) {
  548. best_disk = disk;
  549. break;
  550. }
  551. /* Don't change to another disk for sequential reads */
  552. if (conf->mirrors[disk].next_seq_sect == this_sector
  553. || dist == 0) {
  554. int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
  555. struct raid1_info *mirror = &conf->mirrors[disk];
  556. best_disk = disk;
  557. /*
  558. * If buffered sequential IO size exceeds optimal
  559. * iosize, check if there is idle disk. If yes, choose
  560. * the idle disk. read_balance could already choose an
  561. * idle disk before noticing it's a sequential IO in
  562. * this disk. This doesn't matter because this disk
  563. * will idle, next time it will be utilized after the
  564. * first disk has IO size exceeds optimal iosize. In
  565. * this way, iosize of the first disk will be optimal
  566. * iosize at least. iosize of the second disk might be
  567. * small, but not a big deal since when the second disk
  568. * starts IO, the first disk is likely still busy.
  569. */
  570. if (nonrot && opt_iosize > 0 &&
  571. mirror->seq_start != MaxSector &&
  572. mirror->next_seq_sect > opt_iosize &&
  573. mirror->next_seq_sect - opt_iosize >=
  574. mirror->seq_start) {
  575. choose_next_idle = 1;
  576. continue;
  577. }
  578. break;
  579. }
  580. /* If device is idle, use it */
  581. if (pending == 0) {
  582. best_disk = disk;
  583. break;
  584. }
  585. if (choose_next_idle)
  586. continue;
  587. if (min_pending > pending) {
  588. min_pending = pending;
  589. best_pending_disk = disk;
  590. }
  591. if (dist < best_dist) {
  592. best_dist = dist;
  593. best_dist_disk = disk;
  594. }
  595. }
  596. /*
  597. * If all disks are rotational, choose the closest disk. If any disk is
  598. * non-rotational, choose the disk with less pending request even the
  599. * disk is rotational, which might/might not be optimal for raids with
  600. * mixed ratation/non-rotational disks depending on workload.
  601. */
  602. if (best_disk == -1) {
  603. if (has_nonrot_disk)
  604. best_disk = best_pending_disk;
  605. else
  606. best_disk = best_dist_disk;
  607. }
  608. if (best_disk >= 0) {
  609. rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
  610. if (!rdev)
  611. goto retry;
  612. atomic_inc(&rdev->nr_pending);
  613. if (test_bit(Faulty, &rdev->flags)) {
  614. /* cannot risk returning a device that failed
  615. * before we inc'ed nr_pending
  616. */
  617. rdev_dec_pending(rdev, conf->mddev);
  618. goto retry;
  619. }
  620. sectors = best_good_sectors;
  621. if (conf->mirrors[best_disk].next_seq_sect != this_sector)
  622. conf->mirrors[best_disk].seq_start = this_sector;
  623. conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
  624. }
  625. rcu_read_unlock();
  626. *max_sectors = sectors;
  627. return best_disk;
  628. }
  629. static int raid1_mergeable_bvec(struct request_queue *q,
  630. struct bvec_merge_data *bvm,
  631. struct bio_vec *biovec)
  632. {
  633. struct mddev *mddev = q->queuedata;
  634. struct r1conf *conf = mddev->private;
  635. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  636. int max = biovec->bv_len;
  637. if (mddev->merge_check_needed) {
  638. int disk;
  639. rcu_read_lock();
  640. for (disk = 0; disk < conf->raid_disks * 2; disk++) {
  641. struct md_rdev *rdev = rcu_dereference(
  642. conf->mirrors[disk].rdev);
  643. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  644. struct request_queue *q =
  645. bdev_get_queue(rdev->bdev);
  646. if (q->merge_bvec_fn) {
  647. bvm->bi_sector = sector +
  648. rdev->data_offset;
  649. bvm->bi_bdev = rdev->bdev;
  650. max = min(max, q->merge_bvec_fn(
  651. q, bvm, biovec));
  652. }
  653. }
  654. }
  655. rcu_read_unlock();
  656. }
  657. return max;
  658. }
  659. int md_raid1_congested(struct mddev *mddev, int bits)
  660. {
  661. struct r1conf *conf = mddev->private;
  662. int i, ret = 0;
  663. if ((bits & (1 << BDI_async_congested)) &&
  664. conf->pending_count >= max_queued_requests)
  665. return 1;
  666. rcu_read_lock();
  667. for (i = 0; i < conf->raid_disks * 2; i++) {
  668. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  669. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  670. struct request_queue *q = bdev_get_queue(rdev->bdev);
  671. BUG_ON(!q);
  672. /* Note the '|| 1' - when read_balance prefers
  673. * non-congested targets, it can be removed
  674. */
  675. if ((bits & (1<<BDI_async_congested)) || 1)
  676. ret |= bdi_congested(&q->backing_dev_info, bits);
  677. else
  678. ret &= bdi_congested(&q->backing_dev_info, bits);
  679. }
  680. }
  681. rcu_read_unlock();
  682. return ret;
  683. }
  684. EXPORT_SYMBOL_GPL(md_raid1_congested);
  685. static int raid1_congested(void *data, int bits)
  686. {
  687. struct mddev *mddev = data;
  688. return mddev_congested(mddev, bits) ||
  689. md_raid1_congested(mddev, bits);
  690. }
  691. static void flush_pending_writes(struct r1conf *conf)
  692. {
  693. /* Any writes that have been queued but are awaiting
  694. * bitmap updates get flushed here.
  695. */
  696. spin_lock_irq(&conf->device_lock);
  697. if (conf->pending_bio_list.head) {
  698. struct bio *bio;
  699. bio = bio_list_get(&conf->pending_bio_list);
  700. conf->pending_count = 0;
  701. spin_unlock_irq(&conf->device_lock);
  702. /* flush any pending bitmap writes to
  703. * disk before proceeding w/ I/O */
  704. bitmap_unplug(conf->mddev->bitmap);
  705. wake_up(&conf->wait_barrier);
  706. while (bio) { /* submit pending writes */
  707. struct bio *next = bio->bi_next;
  708. bio->bi_next = NULL;
  709. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  710. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  711. /* Just ignore it */
  712. bio_endio(bio, 0);
  713. else
  714. generic_make_request(bio);
  715. bio = next;
  716. }
  717. } else
  718. spin_unlock_irq(&conf->device_lock);
  719. }
  720. /* Barriers....
  721. * Sometimes we need to suspend IO while we do something else,
  722. * either some resync/recovery, or reconfigure the array.
  723. * To do this we raise a 'barrier'.
  724. * The 'barrier' is a counter that can be raised multiple times
  725. * to count how many activities are happening which preclude
  726. * normal IO.
  727. * We can only raise the barrier if there is no pending IO.
  728. * i.e. if nr_pending == 0.
  729. * We choose only to raise the barrier if no-one is waiting for the
  730. * barrier to go down. This means that as soon as an IO request
  731. * is ready, no other operations which require a barrier will start
  732. * until the IO request has had a chance.
  733. *
  734. * So: regular IO calls 'wait_barrier'. When that returns there
  735. * is no backgroup IO happening, It must arrange to call
  736. * allow_barrier when it has finished its IO.
  737. * backgroup IO calls must call raise_barrier. Once that returns
  738. * there is no normal IO happeing. It must arrange to call
  739. * lower_barrier when the particular background IO completes.
  740. */
  741. static void raise_barrier(struct r1conf *conf)
  742. {
  743. spin_lock_irq(&conf->resync_lock);
  744. /* Wait until no block IO is waiting */
  745. wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
  746. conf->resync_lock);
  747. /* block any new IO from starting */
  748. conf->barrier++;
  749. /* For these conditions we must wait:
  750. * A: while the array is in frozen state
  751. * B: while barrier >= RESYNC_DEPTH, meaning resync reach
  752. * the max count which allowed.
  753. * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
  754. * next resync will reach to the window which normal bios are
  755. * handling.
  756. */
  757. wait_event_lock_irq(conf->wait_barrier,
  758. !conf->array_frozen &&
  759. conf->barrier < RESYNC_DEPTH &&
  760. (conf->start_next_window >=
  761. conf->next_resync + RESYNC_SECTORS),
  762. conf->resync_lock);
  763. spin_unlock_irq(&conf->resync_lock);
  764. }
  765. static void lower_barrier(struct r1conf *conf)
  766. {
  767. unsigned long flags;
  768. BUG_ON(conf->barrier <= 0);
  769. spin_lock_irqsave(&conf->resync_lock, flags);
  770. conf->barrier--;
  771. spin_unlock_irqrestore(&conf->resync_lock, flags);
  772. wake_up(&conf->wait_barrier);
  773. }
  774. static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
  775. {
  776. bool wait = false;
  777. if (conf->array_frozen || !bio)
  778. wait = true;
  779. else if (conf->barrier && bio_data_dir(bio) == WRITE) {
  780. if (conf->next_resync < RESYNC_WINDOW_SECTORS)
  781. wait = true;
  782. else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
  783. >= bio_end_sector(bio)) ||
  784. (conf->next_resync + NEXT_NORMALIO_DISTANCE
  785. <= bio->bi_iter.bi_sector))
  786. wait = false;
  787. else
  788. wait = true;
  789. }
  790. return wait;
  791. }
  792. static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
  793. {
  794. sector_t sector = 0;
  795. spin_lock_irq(&conf->resync_lock);
  796. if (need_to_wait_for_sync(conf, bio)) {
  797. conf->nr_waiting++;
  798. /* Wait for the barrier to drop.
  799. * However if there are already pending
  800. * requests (preventing the barrier from
  801. * rising completely), and the
  802. * pre-process bio queue isn't empty,
  803. * then don't wait, as we need to empty
  804. * that queue to get the nr_pending
  805. * count down.
  806. */
  807. wait_event_lock_irq(conf->wait_barrier,
  808. !conf->array_frozen &&
  809. (!conf->barrier ||
  810. ((conf->start_next_window <
  811. conf->next_resync + RESYNC_SECTORS) &&
  812. current->bio_list &&
  813. !bio_list_empty(current->bio_list))),
  814. conf->resync_lock);
  815. conf->nr_waiting--;
  816. }
  817. if (bio && bio_data_dir(bio) == WRITE) {
  818. if (conf->next_resync + NEXT_NORMALIO_DISTANCE
  819. <= bio->bi_iter.bi_sector) {
  820. if (conf->start_next_window == MaxSector)
  821. conf->start_next_window =
  822. conf->next_resync +
  823. NEXT_NORMALIO_DISTANCE;
  824. if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
  825. <= bio->bi_iter.bi_sector)
  826. conf->next_window_requests++;
  827. else
  828. conf->current_window_requests++;
  829. sector = conf->start_next_window;
  830. }
  831. }
  832. conf->nr_pending++;
  833. spin_unlock_irq(&conf->resync_lock);
  834. return sector;
  835. }
  836. static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  837. sector_t bi_sector)
  838. {
  839. unsigned long flags;
  840. spin_lock_irqsave(&conf->resync_lock, flags);
  841. conf->nr_pending--;
  842. if (start_next_window) {
  843. if (start_next_window == conf->start_next_window) {
  844. if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
  845. <= bi_sector)
  846. conf->next_window_requests--;
  847. else
  848. conf->current_window_requests--;
  849. } else
  850. conf->current_window_requests--;
  851. if (!conf->current_window_requests) {
  852. if (conf->next_window_requests) {
  853. conf->current_window_requests =
  854. conf->next_window_requests;
  855. conf->next_window_requests = 0;
  856. conf->start_next_window +=
  857. NEXT_NORMALIO_DISTANCE;
  858. } else
  859. conf->start_next_window = MaxSector;
  860. }
  861. }
  862. spin_unlock_irqrestore(&conf->resync_lock, flags);
  863. wake_up(&conf->wait_barrier);
  864. }
  865. static void freeze_array(struct r1conf *conf, int extra)
  866. {
  867. /* stop syncio and normal IO and wait for everything to
  868. * go quite.
  869. * We wait until nr_pending match nr_queued+extra
  870. * This is called in the context of one normal IO request
  871. * that has failed. Thus any sync request that might be pending
  872. * will be blocked by nr_pending, and we need to wait for
  873. * pending IO requests to complete or be queued for re-try.
  874. * Thus the number queued (nr_queued) plus this request (extra)
  875. * must match the number of pending IOs (nr_pending) before
  876. * we continue.
  877. */
  878. spin_lock_irq(&conf->resync_lock);
  879. conf->array_frozen = 1;
  880. wait_event_lock_irq_cmd(conf->wait_barrier,
  881. conf->nr_pending == conf->nr_queued+extra,
  882. conf->resync_lock,
  883. flush_pending_writes(conf));
  884. spin_unlock_irq(&conf->resync_lock);
  885. }
  886. static void unfreeze_array(struct r1conf *conf)
  887. {
  888. /* reverse the effect of the freeze */
  889. spin_lock_irq(&conf->resync_lock);
  890. conf->array_frozen = 0;
  891. wake_up(&conf->wait_barrier);
  892. spin_unlock_irq(&conf->resync_lock);
  893. }
  894. /* duplicate the data pages for behind I/O
  895. */
  896. static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
  897. {
  898. int i;
  899. struct bio_vec *bvec;
  900. struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
  901. GFP_NOIO);
  902. if (unlikely(!bvecs))
  903. return;
  904. bio_for_each_segment_all(bvec, bio, i) {
  905. bvecs[i] = *bvec;
  906. bvecs[i].bv_page = alloc_page(GFP_NOIO);
  907. if (unlikely(!bvecs[i].bv_page))
  908. goto do_sync_io;
  909. memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
  910. kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
  911. kunmap(bvecs[i].bv_page);
  912. kunmap(bvec->bv_page);
  913. }
  914. r1_bio->behind_bvecs = bvecs;
  915. r1_bio->behind_page_count = bio->bi_vcnt;
  916. set_bit(R1BIO_BehindIO, &r1_bio->state);
  917. return;
  918. do_sync_io:
  919. for (i = 0; i < bio->bi_vcnt; i++)
  920. if (bvecs[i].bv_page)
  921. put_page(bvecs[i].bv_page);
  922. kfree(bvecs);
  923. pr_debug("%dB behind alloc failed, doing sync I/O\n",
  924. bio->bi_iter.bi_size);
  925. }
  926. struct raid1_plug_cb {
  927. struct blk_plug_cb cb;
  928. struct bio_list pending;
  929. int pending_cnt;
  930. };
  931. static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
  932. {
  933. struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
  934. cb);
  935. struct mddev *mddev = plug->cb.data;
  936. struct r1conf *conf = mddev->private;
  937. struct bio *bio;
  938. if (from_schedule || current->bio_list) {
  939. spin_lock_irq(&conf->device_lock);
  940. bio_list_merge(&conf->pending_bio_list, &plug->pending);
  941. conf->pending_count += plug->pending_cnt;
  942. spin_unlock_irq(&conf->device_lock);
  943. wake_up(&conf->wait_barrier);
  944. md_wakeup_thread(mddev->thread);
  945. kfree(plug);
  946. return;
  947. }
  948. /* we aren't scheduling, so we can do the write-out directly. */
  949. bio = bio_list_get(&plug->pending);
  950. bitmap_unplug(mddev->bitmap);
  951. wake_up(&conf->wait_barrier);
  952. while (bio) { /* submit pending writes */
  953. struct bio *next = bio->bi_next;
  954. bio->bi_next = NULL;
  955. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  956. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  957. /* Just ignore it */
  958. bio_endio(bio, 0);
  959. else
  960. generic_make_request(bio);
  961. bio = next;
  962. }
  963. kfree(plug);
  964. }
  965. static void make_request(struct mddev *mddev, struct bio * bio)
  966. {
  967. struct r1conf *conf = mddev->private;
  968. struct raid1_info *mirror;
  969. struct r1bio *r1_bio;
  970. struct bio *read_bio;
  971. int i, disks;
  972. struct bitmap *bitmap;
  973. unsigned long flags;
  974. const int rw = bio_data_dir(bio);
  975. const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
  976. const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
  977. const unsigned long do_discard = (bio->bi_rw
  978. & (REQ_DISCARD | REQ_SECURE));
  979. const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
  980. struct md_rdev *blocked_rdev;
  981. struct blk_plug_cb *cb;
  982. struct raid1_plug_cb *plug = NULL;
  983. int first_clone;
  984. int sectors_handled;
  985. int max_sectors;
  986. sector_t start_next_window;
  987. /*
  988. * Register the new request and wait if the reconstruction
  989. * thread has put up a bar for new requests.
  990. * Continue immediately if no resync is active currently.
  991. */
  992. md_write_start(mddev, bio); /* wait on superblock update early */
  993. if (bio_data_dir(bio) == WRITE &&
  994. bio_end_sector(bio) > mddev->suspend_lo &&
  995. bio->bi_iter.bi_sector < mddev->suspend_hi) {
  996. /* As the suspend_* range is controlled by
  997. * userspace, we want an interruptible
  998. * wait.
  999. */
  1000. DEFINE_WAIT(w);
  1001. for (;;) {
  1002. flush_signals(current);
  1003. prepare_to_wait(&conf->wait_barrier,
  1004. &w, TASK_INTERRUPTIBLE);
  1005. if (bio_end_sector(bio) <= mddev->suspend_lo ||
  1006. bio->bi_iter.bi_sector >= mddev->suspend_hi)
  1007. break;
  1008. schedule();
  1009. }
  1010. finish_wait(&conf->wait_barrier, &w);
  1011. }
  1012. start_next_window = wait_barrier(conf, bio);
  1013. bitmap = mddev->bitmap;
  1014. /*
  1015. * make_request() can abort the operation when READA is being
  1016. * used and no empty request is available.
  1017. *
  1018. */
  1019. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1020. r1_bio->master_bio = bio;
  1021. r1_bio->sectors = bio_sectors(bio);
  1022. r1_bio->state = 0;
  1023. r1_bio->mddev = mddev;
  1024. r1_bio->sector = bio->bi_iter.bi_sector;
  1025. /* We might need to issue multiple reads to different
  1026. * devices if there are bad blocks around, so we keep
  1027. * track of the number of reads in bio->bi_phys_segments.
  1028. * If this is 0, there is only one r1_bio and no locking
  1029. * will be needed when requests complete. If it is
  1030. * non-zero, then it is the number of not-completed requests.
  1031. */
  1032. bio->bi_phys_segments = 0;
  1033. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  1034. if (rw == READ) {
  1035. /*
  1036. * read balancing logic:
  1037. */
  1038. int rdisk;
  1039. read_again:
  1040. rdisk = read_balance(conf, r1_bio, &max_sectors);
  1041. if (rdisk < 0) {
  1042. /* couldn't find anywhere to read from */
  1043. raid_end_bio_io(r1_bio);
  1044. return;
  1045. }
  1046. mirror = conf->mirrors + rdisk;
  1047. if (test_bit(WriteMostly, &mirror->rdev->flags) &&
  1048. bitmap) {
  1049. /* Reading from a write-mostly device must
  1050. * take care not to over-take any writes
  1051. * that are 'behind'
  1052. */
  1053. wait_event(bitmap->behind_wait,
  1054. atomic_read(&bitmap->behind_writes) == 0);
  1055. }
  1056. r1_bio->read_disk = rdisk;
  1057. read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1058. bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
  1059. max_sectors);
  1060. r1_bio->bios[rdisk] = read_bio;
  1061. read_bio->bi_iter.bi_sector = r1_bio->sector +
  1062. mirror->rdev->data_offset;
  1063. read_bio->bi_bdev = mirror->rdev->bdev;
  1064. read_bio->bi_end_io = raid1_end_read_request;
  1065. read_bio->bi_rw = READ | do_sync;
  1066. read_bio->bi_private = r1_bio;
  1067. if (max_sectors < r1_bio->sectors) {
  1068. /* could not read all from this device, so we will
  1069. * need another r1_bio.
  1070. */
  1071. sectors_handled = (r1_bio->sector + max_sectors
  1072. - bio->bi_iter.bi_sector);
  1073. r1_bio->sectors = max_sectors;
  1074. spin_lock_irq(&conf->device_lock);
  1075. if (bio->bi_phys_segments == 0)
  1076. bio->bi_phys_segments = 2;
  1077. else
  1078. bio->bi_phys_segments++;
  1079. spin_unlock_irq(&conf->device_lock);
  1080. /* Cannot call generic_make_request directly
  1081. * as that will be queued in __make_request
  1082. * and subsequent mempool_alloc might block waiting
  1083. * for it. So hand bio over to raid1d.
  1084. */
  1085. reschedule_retry(r1_bio);
  1086. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1087. r1_bio->master_bio = bio;
  1088. r1_bio->sectors = bio_sectors(bio) - sectors_handled;
  1089. r1_bio->state = 0;
  1090. r1_bio->mddev = mddev;
  1091. r1_bio->sector = bio->bi_iter.bi_sector +
  1092. sectors_handled;
  1093. goto read_again;
  1094. } else
  1095. generic_make_request(read_bio);
  1096. return;
  1097. }
  1098. /*
  1099. * WRITE:
  1100. */
  1101. if (conf->pending_count >= max_queued_requests) {
  1102. md_wakeup_thread(mddev->thread);
  1103. wait_event(conf->wait_barrier,
  1104. conf->pending_count < max_queued_requests);
  1105. }
  1106. /* first select target devices under rcu_lock and
  1107. * inc refcount on their rdev. Record them by setting
  1108. * bios[x] to bio
  1109. * If there are known/acknowledged bad blocks on any device on
  1110. * which we have seen a write error, we want to avoid writing those
  1111. * blocks.
  1112. * This potentially requires several writes to write around
  1113. * the bad blocks. Each set of writes gets it's own r1bio
  1114. * with a set of bios attached.
  1115. */
  1116. disks = conf->raid_disks * 2;
  1117. retry_write:
  1118. r1_bio->start_next_window = start_next_window;
  1119. blocked_rdev = NULL;
  1120. rcu_read_lock();
  1121. max_sectors = r1_bio->sectors;
  1122. for (i = 0; i < disks; i++) {
  1123. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1124. if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  1125. atomic_inc(&rdev->nr_pending);
  1126. blocked_rdev = rdev;
  1127. break;
  1128. }
  1129. r1_bio->bios[i] = NULL;
  1130. if (!rdev || test_bit(Faulty, &rdev->flags)
  1131. || test_bit(Unmerged, &rdev->flags)) {
  1132. if (i < conf->raid_disks)
  1133. set_bit(R1BIO_Degraded, &r1_bio->state);
  1134. continue;
  1135. }
  1136. atomic_inc(&rdev->nr_pending);
  1137. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  1138. sector_t first_bad;
  1139. int bad_sectors;
  1140. int is_bad;
  1141. is_bad = is_badblock(rdev, r1_bio->sector,
  1142. max_sectors,
  1143. &first_bad, &bad_sectors);
  1144. if (is_bad < 0) {
  1145. /* mustn't write here until the bad block is
  1146. * acknowledged*/
  1147. set_bit(BlockedBadBlocks, &rdev->flags);
  1148. blocked_rdev = rdev;
  1149. break;
  1150. }
  1151. if (is_bad && first_bad <= r1_bio->sector) {
  1152. /* Cannot write here at all */
  1153. bad_sectors -= (r1_bio->sector - first_bad);
  1154. if (bad_sectors < max_sectors)
  1155. /* mustn't write more than bad_sectors
  1156. * to other devices yet
  1157. */
  1158. max_sectors = bad_sectors;
  1159. rdev_dec_pending(rdev, mddev);
  1160. /* We don't set R1BIO_Degraded as that
  1161. * only applies if the disk is
  1162. * missing, so it might be re-added,
  1163. * and we want to know to recover this
  1164. * chunk.
  1165. * In this case the device is here,
  1166. * and the fact that this chunk is not
  1167. * in-sync is recorded in the bad
  1168. * block log
  1169. */
  1170. continue;
  1171. }
  1172. if (is_bad) {
  1173. int good_sectors = first_bad - r1_bio->sector;
  1174. if (good_sectors < max_sectors)
  1175. max_sectors = good_sectors;
  1176. }
  1177. }
  1178. r1_bio->bios[i] = bio;
  1179. }
  1180. rcu_read_unlock();
  1181. if (unlikely(blocked_rdev)) {
  1182. /* Wait for this device to become unblocked */
  1183. int j;
  1184. sector_t old = start_next_window;
  1185. for (j = 0; j < i; j++)
  1186. if (r1_bio->bios[j])
  1187. rdev_dec_pending(conf->mirrors[j].rdev, mddev);
  1188. r1_bio->state = 0;
  1189. allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
  1190. md_wait_for_blocked_rdev(blocked_rdev, mddev);
  1191. start_next_window = wait_barrier(conf, bio);
  1192. /*
  1193. * We must make sure the multi r1bios of bio have
  1194. * the same value of bi_phys_segments
  1195. */
  1196. if (bio->bi_phys_segments && old &&
  1197. old != start_next_window)
  1198. /* Wait for the former r1bio(s) to complete */
  1199. wait_event(conf->wait_barrier,
  1200. bio->bi_phys_segments == 1);
  1201. goto retry_write;
  1202. }
  1203. if (max_sectors < r1_bio->sectors) {
  1204. /* We are splitting this write into multiple parts, so
  1205. * we need to prepare for allocating another r1_bio.
  1206. */
  1207. r1_bio->sectors = max_sectors;
  1208. spin_lock_irq(&conf->device_lock);
  1209. if (bio->bi_phys_segments == 0)
  1210. bio->bi_phys_segments = 2;
  1211. else
  1212. bio->bi_phys_segments++;
  1213. spin_unlock_irq(&conf->device_lock);
  1214. }
  1215. sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
  1216. atomic_set(&r1_bio->remaining, 1);
  1217. atomic_set(&r1_bio->behind_remaining, 0);
  1218. first_clone = 1;
  1219. for (i = 0; i < disks; i++) {
  1220. struct bio *mbio;
  1221. if (!r1_bio->bios[i])
  1222. continue;
  1223. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1224. bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
  1225. if (first_clone) {
  1226. /* do behind I/O ?
  1227. * Not if there are too many, or cannot
  1228. * allocate memory, or a reader on WriteMostly
  1229. * is waiting for behind writes to flush */
  1230. if (bitmap &&
  1231. (atomic_read(&bitmap->behind_writes)
  1232. < mddev->bitmap_info.max_write_behind) &&
  1233. !waitqueue_active(&bitmap->behind_wait))
  1234. alloc_behind_pages(mbio, r1_bio);
  1235. bitmap_startwrite(bitmap, r1_bio->sector,
  1236. r1_bio->sectors,
  1237. test_bit(R1BIO_BehindIO,
  1238. &r1_bio->state));
  1239. first_clone = 0;
  1240. }
  1241. if (r1_bio->behind_bvecs) {
  1242. struct bio_vec *bvec;
  1243. int j;
  1244. /*
  1245. * We trimmed the bio, so _all is legit
  1246. */
  1247. bio_for_each_segment_all(bvec, mbio, j)
  1248. bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
  1249. if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
  1250. atomic_inc(&r1_bio->behind_remaining);
  1251. }
  1252. r1_bio->bios[i] = mbio;
  1253. mbio->bi_iter.bi_sector = (r1_bio->sector +
  1254. conf->mirrors[i].rdev->data_offset);
  1255. mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1256. mbio->bi_end_io = raid1_end_write_request;
  1257. mbio->bi_rw =
  1258. WRITE | do_flush_fua | do_sync | do_discard | do_same;
  1259. mbio->bi_private = r1_bio;
  1260. atomic_inc(&r1_bio->remaining);
  1261. cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
  1262. if (cb)
  1263. plug = container_of(cb, struct raid1_plug_cb, cb);
  1264. else
  1265. plug = NULL;
  1266. spin_lock_irqsave(&conf->device_lock, flags);
  1267. if (plug) {
  1268. bio_list_add(&plug->pending, mbio);
  1269. plug->pending_cnt++;
  1270. } else {
  1271. bio_list_add(&conf->pending_bio_list, mbio);
  1272. conf->pending_count++;
  1273. }
  1274. spin_unlock_irqrestore(&conf->device_lock, flags);
  1275. if (!plug)
  1276. md_wakeup_thread(mddev->thread);
  1277. }
  1278. /* Mustn't call r1_bio_write_done before this next test,
  1279. * as it could result in the bio being freed.
  1280. */
  1281. if (sectors_handled < bio_sectors(bio)) {
  1282. r1_bio_write_done(r1_bio);
  1283. /* We need another r1_bio. It has already been counted
  1284. * in bio->bi_phys_segments
  1285. */
  1286. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1287. r1_bio->master_bio = bio;
  1288. r1_bio->sectors = bio_sectors(bio) - sectors_handled;
  1289. r1_bio->state = 0;
  1290. r1_bio->mddev = mddev;
  1291. r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
  1292. goto retry_write;
  1293. }
  1294. r1_bio_write_done(r1_bio);
  1295. /* In case raid1d snuck in to freeze_array */
  1296. wake_up(&conf->wait_barrier);
  1297. }
  1298. static void status(struct seq_file *seq, struct mddev *mddev)
  1299. {
  1300. struct r1conf *conf = mddev->private;
  1301. int i;
  1302. seq_printf(seq, " [%d/%d] [", conf->raid_disks,
  1303. conf->raid_disks - mddev->degraded);
  1304. rcu_read_lock();
  1305. for (i = 0; i < conf->raid_disks; i++) {
  1306. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1307. seq_printf(seq, "%s",
  1308. rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
  1309. }
  1310. rcu_read_unlock();
  1311. seq_printf(seq, "]");
  1312. }
  1313. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1314. {
  1315. char b[BDEVNAME_SIZE];
  1316. struct r1conf *conf = mddev->private;
  1317. /*
  1318. * If it is not operational, then we have already marked it as dead
  1319. * else if it is the last working disks, ignore the error, let the
  1320. * next level up know.
  1321. * else mark the drive as failed
  1322. */
  1323. if (test_bit(In_sync, &rdev->flags)
  1324. && (conf->raid_disks - mddev->degraded) == 1) {
  1325. /*
  1326. * Don't fail the drive, act as though we were just a
  1327. * normal single drive.
  1328. * However don't try a recovery from this drive as
  1329. * it is very likely to fail.
  1330. */
  1331. conf->recovery_disabled = mddev->recovery_disabled;
  1332. return;
  1333. }
  1334. set_bit(Blocked, &rdev->flags);
  1335. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  1336. unsigned long flags;
  1337. spin_lock_irqsave(&conf->device_lock, flags);
  1338. mddev->degraded++;
  1339. set_bit(Faulty, &rdev->flags);
  1340. spin_unlock_irqrestore(&conf->device_lock, flags);
  1341. /*
  1342. * if recovery is running, make sure it aborts.
  1343. */
  1344. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1345. } else
  1346. set_bit(Faulty, &rdev->flags);
  1347. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1348. printk(KERN_ALERT
  1349. "md/raid1:%s: Disk failure on %s, disabling device.\n"
  1350. "md/raid1:%s: Operation continuing on %d devices.\n",
  1351. mdname(mddev), bdevname(rdev->bdev, b),
  1352. mdname(mddev), conf->raid_disks - mddev->degraded);
  1353. }
  1354. static void print_conf(struct r1conf *conf)
  1355. {
  1356. int i;
  1357. printk(KERN_DEBUG "RAID1 conf printout:\n");
  1358. if (!conf) {
  1359. printk(KERN_DEBUG "(!conf)\n");
  1360. return;
  1361. }
  1362. printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
  1363. conf->raid_disks);
  1364. rcu_read_lock();
  1365. for (i = 0; i < conf->raid_disks; i++) {
  1366. char b[BDEVNAME_SIZE];
  1367. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1368. if (rdev)
  1369. printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
  1370. i, !test_bit(In_sync, &rdev->flags),
  1371. !test_bit(Faulty, &rdev->flags),
  1372. bdevname(rdev->bdev,b));
  1373. }
  1374. rcu_read_unlock();
  1375. }
  1376. static void close_sync(struct r1conf *conf)
  1377. {
  1378. wait_barrier(conf, NULL);
  1379. allow_barrier(conf, 0, 0);
  1380. mempool_destroy(conf->r1buf_pool);
  1381. conf->r1buf_pool = NULL;
  1382. conf->next_resync = 0;
  1383. conf->start_next_window = MaxSector;
  1384. }
  1385. static int raid1_spare_active(struct mddev *mddev)
  1386. {
  1387. int i;
  1388. struct r1conf *conf = mddev->private;
  1389. int count = 0;
  1390. unsigned long flags;
  1391. /*
  1392. * Find all failed disks within the RAID1 configuration
  1393. * and mark them readable.
  1394. * Called under mddev lock, so rcu protection not needed.
  1395. */
  1396. for (i = 0; i < conf->raid_disks; i++) {
  1397. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1398. struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
  1399. if (repl
  1400. && repl->recovery_offset == MaxSector
  1401. && !test_bit(Faulty, &repl->flags)
  1402. && !test_and_set_bit(In_sync, &repl->flags)) {
  1403. /* replacement has just become active */
  1404. if (!rdev ||
  1405. !test_and_clear_bit(In_sync, &rdev->flags))
  1406. count++;
  1407. if (rdev) {
  1408. /* Replaced device not technically
  1409. * faulty, but we need to be sure
  1410. * it gets removed and never re-added
  1411. */
  1412. set_bit(Faulty, &rdev->flags);
  1413. sysfs_notify_dirent_safe(
  1414. rdev->sysfs_state);
  1415. }
  1416. }
  1417. if (rdev
  1418. && rdev->recovery_offset == MaxSector
  1419. && !test_bit(Faulty, &rdev->flags)
  1420. && !test_and_set_bit(In_sync, &rdev->flags)) {
  1421. count++;
  1422. sysfs_notify_dirent_safe(rdev->sysfs_state);
  1423. }
  1424. }
  1425. spin_lock_irqsave(&conf->device_lock, flags);
  1426. mddev->degraded -= count;
  1427. spin_unlock_irqrestore(&conf->device_lock, flags);
  1428. print_conf(conf);
  1429. return count;
  1430. }
  1431. static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  1432. {
  1433. struct r1conf *conf = mddev->private;
  1434. int err = -EEXIST;
  1435. int mirror = 0;
  1436. struct raid1_info *p;
  1437. int first = 0;
  1438. int last = conf->raid_disks - 1;
  1439. struct request_queue *q = bdev_get_queue(rdev->bdev);
  1440. if (mddev->recovery_disabled == conf->recovery_disabled)
  1441. return -EBUSY;
  1442. if (rdev->raid_disk >= 0)
  1443. first = last = rdev->raid_disk;
  1444. if (q->merge_bvec_fn) {
  1445. set_bit(Unmerged, &rdev->flags);
  1446. mddev->merge_check_needed = 1;
  1447. }
  1448. for (mirror = first; mirror <= last; mirror++) {
  1449. p = conf->mirrors+mirror;
  1450. if (!p->rdev) {
  1451. if (mddev->gendisk)
  1452. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1453. rdev->data_offset << 9);
  1454. p->head_position = 0;
  1455. rdev->raid_disk = mirror;
  1456. err = 0;
  1457. /* As all devices are equivalent, we don't need a full recovery
  1458. * if this was recently any drive of the array
  1459. */
  1460. if (rdev->saved_raid_disk < 0)
  1461. conf->fullsync = 1;
  1462. rcu_assign_pointer(p->rdev, rdev);
  1463. break;
  1464. }
  1465. if (test_bit(WantReplacement, &p->rdev->flags) &&
  1466. p[conf->raid_disks].rdev == NULL) {
  1467. /* Add this device as a replacement */
  1468. clear_bit(In_sync, &rdev->flags);
  1469. set_bit(Replacement, &rdev->flags);
  1470. rdev->raid_disk = mirror;
  1471. err = 0;
  1472. conf->fullsync = 1;
  1473. rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
  1474. break;
  1475. }
  1476. }
  1477. if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
  1478. /* Some requests might not have seen this new
  1479. * merge_bvec_fn. We must wait for them to complete
  1480. * before merging the device fully.
  1481. * First we make sure any code which has tested
  1482. * our function has submitted the request, then
  1483. * we wait for all outstanding requests to complete.
  1484. */
  1485. synchronize_sched();
  1486. freeze_array(conf, 0);
  1487. unfreeze_array(conf);
  1488. clear_bit(Unmerged, &rdev->flags);
  1489. }
  1490. md_integrity_add_rdev(rdev, mddev);
  1491. if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
  1492. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  1493. print_conf(conf);
  1494. return err;
  1495. }
  1496. static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  1497. {
  1498. struct r1conf *conf = mddev->private;
  1499. int err = 0;
  1500. int number = rdev->raid_disk;
  1501. struct raid1_info *p = conf->mirrors + number;
  1502. if (rdev != p->rdev)
  1503. p = conf->mirrors + conf->raid_disks + number;
  1504. print_conf(conf);
  1505. if (rdev == p->rdev) {
  1506. if (test_bit(In_sync, &rdev->flags) ||
  1507. atomic_read(&rdev->nr_pending)) {
  1508. err = -EBUSY;
  1509. goto abort;
  1510. }
  1511. /* Only remove non-faulty devices if recovery
  1512. * is not possible.
  1513. */
  1514. if (!test_bit(Faulty, &rdev->flags) &&
  1515. mddev->recovery_disabled != conf->recovery_disabled &&
  1516. mddev->degraded < conf->raid_disks) {
  1517. err = -EBUSY;
  1518. goto abort;
  1519. }
  1520. p->rdev = NULL;
  1521. synchronize_rcu();
  1522. if (atomic_read(&rdev->nr_pending)) {
  1523. /* lost the race, try later */
  1524. err = -EBUSY;
  1525. p->rdev = rdev;
  1526. goto abort;
  1527. } else if (conf->mirrors[conf->raid_disks + number].rdev) {
  1528. /* We just removed a device that is being replaced.
  1529. * Move down the replacement. We drain all IO before
  1530. * doing this to avoid confusion.
  1531. */
  1532. struct md_rdev *repl =
  1533. conf->mirrors[conf->raid_disks + number].rdev;
  1534. freeze_array(conf, 0);
  1535. clear_bit(Replacement, &repl->flags);
  1536. p->rdev = repl;
  1537. conf->mirrors[conf->raid_disks + number].rdev = NULL;
  1538. unfreeze_array(conf);
  1539. clear_bit(WantReplacement, &rdev->flags);
  1540. } else
  1541. clear_bit(WantReplacement, &rdev->flags);
  1542. err = md_integrity_register(mddev);
  1543. }
  1544. abort:
  1545. print_conf(conf);
  1546. return err;
  1547. }
  1548. static void end_sync_read(struct bio *bio, int error)
  1549. {
  1550. struct r1bio *r1_bio = bio->bi_private;
  1551. update_head_pos(r1_bio->read_disk, r1_bio);
  1552. /*
  1553. * we have read a block, now it needs to be re-written,
  1554. * or re-read if the read failed.
  1555. * We don't do much here, just schedule handling by raid1d
  1556. */
  1557. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  1558. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1559. if (atomic_dec_and_test(&r1_bio->remaining))
  1560. reschedule_retry(r1_bio);
  1561. }
  1562. static void end_sync_write(struct bio *bio, int error)
  1563. {
  1564. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1565. struct r1bio *r1_bio = bio->bi_private;
  1566. struct mddev *mddev = r1_bio->mddev;
  1567. struct r1conf *conf = mddev->private;
  1568. int mirror=0;
  1569. sector_t first_bad;
  1570. int bad_sectors;
  1571. mirror = find_bio_disk(r1_bio, bio);
  1572. if (!uptodate) {
  1573. sector_t sync_blocks = 0;
  1574. sector_t s = r1_bio->sector;
  1575. long sectors_to_go = r1_bio->sectors;
  1576. /* make sure these bits doesn't get cleared. */
  1577. do {
  1578. bitmap_end_sync(mddev->bitmap, s,
  1579. &sync_blocks, 1);
  1580. s += sync_blocks;
  1581. sectors_to_go -= sync_blocks;
  1582. } while (sectors_to_go > 0);
  1583. set_bit(WriteErrorSeen,
  1584. &conf->mirrors[mirror].rdev->flags);
  1585. if (!test_and_set_bit(WantReplacement,
  1586. &conf->mirrors[mirror].rdev->flags))
  1587. set_bit(MD_RECOVERY_NEEDED, &
  1588. mddev->recovery);
  1589. set_bit(R1BIO_WriteError, &r1_bio->state);
  1590. } else if (is_badblock(conf->mirrors[mirror].rdev,
  1591. r1_bio->sector,
  1592. r1_bio->sectors,
  1593. &first_bad, &bad_sectors) &&
  1594. !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
  1595. r1_bio->sector,
  1596. r1_bio->sectors,
  1597. &first_bad, &bad_sectors)
  1598. )
  1599. set_bit(R1BIO_MadeGood, &r1_bio->state);
  1600. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1601. int s = r1_bio->sectors;
  1602. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1603. test_bit(R1BIO_WriteError, &r1_bio->state))
  1604. reschedule_retry(r1_bio);
  1605. else {
  1606. put_buf(r1_bio);
  1607. md_done_sync(mddev, s, uptodate);
  1608. }
  1609. }
  1610. }
  1611. static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
  1612. int sectors, struct page *page, int rw)
  1613. {
  1614. if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
  1615. /* success */
  1616. return 1;
  1617. if (rw == WRITE) {
  1618. set_bit(WriteErrorSeen, &rdev->flags);
  1619. if (!test_and_set_bit(WantReplacement,
  1620. &rdev->flags))
  1621. set_bit(MD_RECOVERY_NEEDED, &
  1622. rdev->mddev->recovery);
  1623. }
  1624. /* need to record an error - either for the block or the device */
  1625. if (!rdev_set_badblocks(rdev, sector, sectors, 0))
  1626. md_error(rdev->mddev, rdev);
  1627. return 0;
  1628. }
  1629. static int fix_sync_read_error(struct r1bio *r1_bio)
  1630. {
  1631. /* Try some synchronous reads of other devices to get
  1632. * good data, much like with normal read errors. Only
  1633. * read into the pages we already have so we don't
  1634. * need to re-issue the read request.
  1635. * We don't need to freeze the array, because being in an
  1636. * active sync request, there is no normal IO, and
  1637. * no overlapping syncs.
  1638. * We don't need to check is_badblock() again as we
  1639. * made sure that anything with a bad block in range
  1640. * will have bi_end_io clear.
  1641. */
  1642. struct mddev *mddev = r1_bio->mddev;
  1643. struct r1conf *conf = mddev->private;
  1644. struct bio *bio = r1_bio->bios[r1_bio->read_disk];
  1645. sector_t sect = r1_bio->sector;
  1646. int sectors = r1_bio->sectors;
  1647. int idx = 0;
  1648. while(sectors) {
  1649. int s = sectors;
  1650. int d = r1_bio->read_disk;
  1651. int success = 0;
  1652. struct md_rdev *rdev;
  1653. int start;
  1654. if (s > (PAGE_SIZE>>9))
  1655. s = PAGE_SIZE >> 9;
  1656. do {
  1657. if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
  1658. /* No rcu protection needed here devices
  1659. * can only be removed when no resync is
  1660. * active, and resync is currently active
  1661. */
  1662. rdev = conf->mirrors[d].rdev;
  1663. if (sync_page_io(rdev, sect, s<<9,
  1664. bio->bi_io_vec[idx].bv_page,
  1665. READ, false)) {
  1666. success = 1;
  1667. break;
  1668. }
  1669. }
  1670. d++;
  1671. if (d == conf->raid_disks * 2)
  1672. d = 0;
  1673. } while (!success && d != r1_bio->read_disk);
  1674. if (!success) {
  1675. char b[BDEVNAME_SIZE];
  1676. int abort = 0;
  1677. /* Cannot read from anywhere, this block is lost.
  1678. * Record a bad block on each device. If that doesn't
  1679. * work just disable and interrupt the recovery.
  1680. * Don't fail devices as that won't really help.
  1681. */
  1682. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
  1683. " for block %llu\n",
  1684. mdname(mddev),
  1685. bdevname(bio->bi_bdev, b),
  1686. (unsigned long long)r1_bio->sector);
  1687. for (d = 0; d < conf->raid_disks * 2; d++) {
  1688. rdev = conf->mirrors[d].rdev;
  1689. if (!rdev || test_bit(Faulty, &rdev->flags))
  1690. continue;
  1691. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1692. abort = 1;
  1693. }
  1694. if (abort) {
  1695. conf->recovery_disabled =
  1696. mddev->recovery_disabled;
  1697. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1698. md_done_sync(mddev, r1_bio->sectors, 0);
  1699. put_buf(r1_bio);
  1700. return 0;
  1701. }
  1702. /* Try next page */
  1703. sectors -= s;
  1704. sect += s;
  1705. idx++;
  1706. continue;
  1707. }
  1708. start = d;
  1709. /* write it back and re-read */
  1710. while (d != r1_bio->read_disk) {
  1711. if (d == 0)
  1712. d = conf->raid_disks * 2;
  1713. d--;
  1714. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1715. continue;
  1716. rdev = conf->mirrors[d].rdev;
  1717. if (r1_sync_page_io(rdev, sect, s,
  1718. bio->bi_io_vec[idx].bv_page,
  1719. WRITE) == 0) {
  1720. r1_bio->bios[d]->bi_end_io = NULL;
  1721. rdev_dec_pending(rdev, mddev);
  1722. }
  1723. }
  1724. d = start;
  1725. while (d != r1_bio->read_disk) {
  1726. if (d == 0)
  1727. d = conf->raid_disks * 2;
  1728. d--;
  1729. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1730. continue;
  1731. rdev = conf->mirrors[d].rdev;
  1732. if (r1_sync_page_io(rdev, sect, s,
  1733. bio->bi_io_vec[idx].bv_page,
  1734. READ) != 0)
  1735. atomic_add(s, &rdev->corrected_errors);
  1736. }
  1737. sectors -= s;
  1738. sect += s;
  1739. idx ++;
  1740. }
  1741. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1742. set_bit(BIO_UPTODATE, &bio->bi_flags);
  1743. return 1;
  1744. }
  1745. static int process_checks(struct r1bio *r1_bio)
  1746. {
  1747. /* We have read all readable devices. If we haven't
  1748. * got the block, then there is no hope left.
  1749. * If we have, then we want to do a comparison
  1750. * and skip the write if everything is the same.
  1751. * If any blocks failed to read, then we need to
  1752. * attempt an over-write
  1753. */
  1754. struct mddev *mddev = r1_bio->mddev;
  1755. struct r1conf *conf = mddev->private;
  1756. int primary;
  1757. int i;
  1758. int vcnt;
  1759. /* Fix variable parts of all bios */
  1760. vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
  1761. for (i = 0; i < conf->raid_disks * 2; i++) {
  1762. int j;
  1763. int size;
  1764. int uptodate;
  1765. struct bio *b = r1_bio->bios[i];
  1766. if (b->bi_end_io != end_sync_read)
  1767. continue;
  1768. /* fixup the bio for reuse, but preserve BIO_UPTODATE */
  1769. uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
  1770. bio_reset(b);
  1771. if (!uptodate)
  1772. clear_bit(BIO_UPTODATE, &b->bi_flags);
  1773. b->bi_vcnt = vcnt;
  1774. b->bi_iter.bi_size = r1_bio->sectors << 9;
  1775. b->bi_iter.bi_sector = r1_bio->sector +
  1776. conf->mirrors[i].rdev->data_offset;
  1777. b->bi_bdev = conf->mirrors[i].rdev->bdev;
  1778. b->bi_end_io = end_sync_read;
  1779. b->bi_private = r1_bio;
  1780. size = b->bi_iter.bi_size;
  1781. for (j = 0; j < vcnt ; j++) {
  1782. struct bio_vec *bi;
  1783. bi = &b->bi_io_vec[j];
  1784. bi->bv_offset = 0;
  1785. if (size > PAGE_SIZE)
  1786. bi->bv_len = PAGE_SIZE;
  1787. else
  1788. bi->bv_len = size;
  1789. size -= PAGE_SIZE;
  1790. }
  1791. }
  1792. for (primary = 0; primary < conf->raid_disks * 2; primary++)
  1793. if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
  1794. test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
  1795. r1_bio->bios[primary]->bi_end_io = NULL;
  1796. rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
  1797. break;
  1798. }
  1799. r1_bio->read_disk = primary;
  1800. for (i = 0; i < conf->raid_disks * 2; i++) {
  1801. int j;
  1802. struct bio *pbio = r1_bio->bios[primary];
  1803. struct bio *sbio = r1_bio->bios[i];
  1804. int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
  1805. if (sbio->bi_end_io != end_sync_read)
  1806. continue;
  1807. /* Now we can 'fixup' the BIO_UPTODATE flag */
  1808. set_bit(BIO_UPTODATE, &sbio->bi_flags);
  1809. if (uptodate) {
  1810. for (j = vcnt; j-- ; ) {
  1811. struct page *p, *s;
  1812. p = pbio->bi_io_vec[j].bv_page;
  1813. s = sbio->bi_io_vec[j].bv_page;
  1814. if (memcmp(page_address(p),
  1815. page_address(s),
  1816. sbio->bi_io_vec[j].bv_len))
  1817. break;
  1818. }
  1819. } else
  1820. j = 0;
  1821. if (j >= 0)
  1822. atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
  1823. if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
  1824. && uptodate)) {
  1825. /* No need to write to this device. */
  1826. sbio->bi_end_io = NULL;
  1827. rdev_dec_pending(conf->mirrors[i].rdev, mddev);
  1828. continue;
  1829. }
  1830. bio_copy_data(sbio, pbio);
  1831. }
  1832. return 0;
  1833. }
  1834. static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
  1835. {
  1836. struct r1conf *conf = mddev->private;
  1837. int i;
  1838. int disks = conf->raid_disks * 2;
  1839. struct bio *bio, *wbio;
  1840. bio = r1_bio->bios[r1_bio->read_disk];
  1841. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  1842. /* ouch - failed to read all of that. */
  1843. if (!fix_sync_read_error(r1_bio))
  1844. return;
  1845. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  1846. if (process_checks(r1_bio) < 0)
  1847. return;
  1848. /*
  1849. * schedule writes
  1850. */
  1851. atomic_set(&r1_bio->remaining, 1);
  1852. for (i = 0; i < disks ; i++) {
  1853. wbio = r1_bio->bios[i];
  1854. if (wbio->bi_end_io == NULL ||
  1855. (wbio->bi_end_io == end_sync_read &&
  1856. (i == r1_bio->read_disk ||
  1857. !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
  1858. continue;
  1859. wbio->bi_rw = WRITE;
  1860. wbio->bi_end_io = end_sync_write;
  1861. atomic_inc(&r1_bio->remaining);
  1862. md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
  1863. generic_make_request(wbio);
  1864. }
  1865. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1866. /* if we're here, all write(s) have completed, so clean up */
  1867. int s = r1_bio->sectors;
  1868. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1869. test_bit(R1BIO_WriteError, &r1_bio->state))
  1870. reschedule_retry(r1_bio);
  1871. else {
  1872. put_buf(r1_bio);
  1873. md_done_sync(mddev, s, 1);
  1874. }
  1875. }
  1876. }
  1877. /*
  1878. * This is a kernel thread which:
  1879. *
  1880. * 1. Retries failed read operations on working mirrors.
  1881. * 2. Updates the raid superblock when problems encounter.
  1882. * 3. Performs writes following reads for array synchronising.
  1883. */
  1884. static void fix_read_error(struct r1conf *conf, int read_disk,
  1885. sector_t sect, int sectors)
  1886. {
  1887. struct mddev *mddev = conf->mddev;
  1888. while(sectors) {
  1889. int s = sectors;
  1890. int d = read_disk;
  1891. int success = 0;
  1892. int start;
  1893. struct md_rdev *rdev;
  1894. if (s > (PAGE_SIZE>>9))
  1895. s = PAGE_SIZE >> 9;
  1896. do {
  1897. /* Note: no rcu protection needed here
  1898. * as this is synchronous in the raid1d thread
  1899. * which is the thread that might remove
  1900. * a device. If raid1d ever becomes multi-threaded....
  1901. */
  1902. sector_t first_bad;
  1903. int bad_sectors;
  1904. rdev = conf->mirrors[d].rdev;
  1905. if (rdev &&
  1906. (test_bit(In_sync, &rdev->flags) ||
  1907. (!test_bit(Faulty, &rdev->flags) &&
  1908. rdev->recovery_offset >= sect + s)) &&
  1909. is_badblock(rdev, sect, s,
  1910. &first_bad, &bad_sectors) == 0 &&
  1911. sync_page_io(rdev, sect, s<<9,
  1912. conf->tmppage, READ, false))
  1913. success = 1;
  1914. else {
  1915. d++;
  1916. if (d == conf->raid_disks * 2)
  1917. d = 0;
  1918. }
  1919. } while (!success && d != read_disk);
  1920. if (!success) {
  1921. /* Cannot read from anywhere - mark it bad */
  1922. struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
  1923. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1924. md_error(mddev, rdev);
  1925. break;
  1926. }
  1927. /* write it back and re-read */
  1928. start = d;
  1929. while (d != read_disk) {
  1930. if (d==0)
  1931. d = conf->raid_disks * 2;
  1932. d--;
  1933. rdev = conf->mirrors[d].rdev;
  1934. if (rdev &&
  1935. test_bit(In_sync, &rdev->flags))
  1936. r1_sync_page_io(rdev, sect, s,
  1937. conf->tmppage, WRITE);
  1938. }
  1939. d = start;
  1940. while (d != read_disk) {
  1941. char b[BDEVNAME_SIZE];
  1942. if (d==0)
  1943. d = conf->raid_disks * 2;
  1944. d--;
  1945. rdev = conf->mirrors[d].rdev;
  1946. if (rdev &&
  1947. test_bit(In_sync, &rdev->flags)) {
  1948. if (r1_sync_page_io(rdev, sect, s,
  1949. conf->tmppage, READ)) {
  1950. atomic_add(s, &rdev->corrected_errors);
  1951. printk(KERN_INFO
  1952. "md/raid1:%s: read error corrected "
  1953. "(%d sectors at %llu on %s)\n",
  1954. mdname(mddev), s,
  1955. (unsigned long long)(sect +
  1956. rdev->data_offset),
  1957. bdevname(rdev->bdev, b));
  1958. }
  1959. }
  1960. }
  1961. sectors -= s;
  1962. sect += s;
  1963. }
  1964. }
  1965. static int narrow_write_error(struct r1bio *r1_bio, int i)
  1966. {
  1967. struct mddev *mddev = r1_bio->mddev;
  1968. struct r1conf *conf = mddev->private;
  1969. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1970. /* bio has the data to be written to device 'i' where
  1971. * we just recently had a write error.
  1972. * We repeatedly clone the bio and trim down to one block,
  1973. * then try the write. Where the write fails we record
  1974. * a bad block.
  1975. * It is conceivable that the bio doesn't exactly align with
  1976. * blocks. We must handle this somehow.
  1977. *
  1978. * We currently own a reference on the rdev.
  1979. */
  1980. int block_sectors;
  1981. sector_t sector;
  1982. int sectors;
  1983. int sect_to_write = r1_bio->sectors;
  1984. int ok = 1;
  1985. if (rdev->badblocks.shift < 0)
  1986. return 0;
  1987. block_sectors = 1 << rdev->badblocks.shift;
  1988. sector = r1_bio->sector;
  1989. sectors = ((sector + block_sectors)
  1990. & ~(sector_t)(block_sectors - 1))
  1991. - sector;
  1992. while (sect_to_write) {
  1993. struct bio *wbio;
  1994. if (sectors > sect_to_write)
  1995. sectors = sect_to_write;
  1996. /* Write at 'sector' for 'sectors'*/
  1997. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  1998. unsigned vcnt = r1_bio->behind_page_count;
  1999. struct bio_vec *vec = r1_bio->behind_bvecs;
  2000. while (!vec->bv_page) {
  2001. vec++;
  2002. vcnt--;
  2003. }
  2004. wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
  2005. memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
  2006. wbio->bi_vcnt = vcnt;
  2007. } else {
  2008. wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  2009. }
  2010. wbio->bi_rw = WRITE;
  2011. wbio->bi_iter.bi_sector = r1_bio->sector;
  2012. wbio->bi_iter.bi_size = r1_bio->sectors << 9;
  2013. bio_trim(wbio, sector - r1_bio->sector, sectors);
  2014. wbio->bi_iter.bi_sector += rdev->data_offset;
  2015. wbio->bi_bdev = rdev->bdev;
  2016. if (submit_bio_wait(WRITE, wbio) == 0)
  2017. /* failure! */
  2018. ok = rdev_set_badblocks(rdev, sector,
  2019. sectors, 0)
  2020. && ok;
  2021. bio_put(wbio);
  2022. sect_to_write -= sectors;
  2023. sector += sectors;
  2024. sectors = block_sectors;
  2025. }
  2026. return ok;
  2027. }
  2028. static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  2029. {
  2030. int m;
  2031. int s = r1_bio->sectors;
  2032. for (m = 0; m < conf->raid_disks * 2 ; m++) {
  2033. struct md_rdev *rdev = conf->mirrors[m].rdev;
  2034. struct bio *bio = r1_bio->bios[m];
  2035. if (bio->bi_end_io == NULL)
  2036. continue;
  2037. if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  2038. test_bit(R1BIO_MadeGood, &r1_bio->state)) {
  2039. rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
  2040. }
  2041. if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  2042. test_bit(R1BIO_WriteError, &r1_bio->state)) {
  2043. if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
  2044. md_error(conf->mddev, rdev);
  2045. }
  2046. }
  2047. put_buf(r1_bio);
  2048. md_done_sync(conf->mddev, s, 1);
  2049. }
  2050. static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  2051. {
  2052. int m;
  2053. for (m = 0; m < conf->raid_disks * 2 ; m++)
  2054. if (r1_bio->bios[m] == IO_MADE_GOOD) {
  2055. struct md_rdev *rdev = conf->mirrors[m].rdev;
  2056. rdev_clear_badblocks(rdev,
  2057. r1_bio->sector,
  2058. r1_bio->sectors, 0);
  2059. rdev_dec_pending(rdev, conf->mddev);
  2060. } else if (r1_bio->bios[m] != NULL) {
  2061. /* This drive got a write error. We need to
  2062. * narrow down and record precise write
  2063. * errors.
  2064. */
  2065. if (!narrow_write_error(r1_bio, m)) {
  2066. md_error(conf->mddev,
  2067. conf->mirrors[m].rdev);
  2068. /* an I/O failed, we can't clear the bitmap */
  2069. set_bit(R1BIO_Degraded, &r1_bio->state);
  2070. }
  2071. rdev_dec_pending(conf->mirrors[m].rdev,
  2072. conf->mddev);
  2073. }
  2074. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  2075. close_write(r1_bio);
  2076. raid_end_bio_io(r1_bio);
  2077. }
  2078. static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
  2079. {
  2080. int disk;
  2081. int max_sectors;
  2082. struct mddev *mddev = conf->mddev;
  2083. struct bio *bio;
  2084. char b[BDEVNAME_SIZE];
  2085. struct md_rdev *rdev;
  2086. clear_bit(R1BIO_ReadError, &r1_bio->state);
  2087. /* we got a read error. Maybe the drive is bad. Maybe just
  2088. * the block and we can fix it.
  2089. * We freeze all other IO, and try reading the block from
  2090. * other devices. When we find one, we re-write
  2091. * and check it that fixes the read error.
  2092. * This is all done synchronously while the array is
  2093. * frozen
  2094. */
  2095. if (mddev->ro == 0) {
  2096. freeze_array(conf, 1);
  2097. fix_read_error(conf, r1_bio->read_disk,
  2098. r1_bio->sector, r1_bio->sectors);
  2099. unfreeze_array(conf);
  2100. } else
  2101. md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
  2102. rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
  2103. bio = r1_bio->bios[r1_bio->read_disk];
  2104. bdevname(bio->bi_bdev, b);
  2105. read_more:
  2106. disk = read_balance(conf, r1_bio, &max_sectors);
  2107. if (disk == -1) {
  2108. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
  2109. " read error for block %llu\n",
  2110. mdname(mddev), b, (unsigned long long)r1_bio->sector);
  2111. raid_end_bio_io(r1_bio);
  2112. } else {
  2113. const unsigned long do_sync
  2114. = r1_bio->master_bio->bi_rw & REQ_SYNC;
  2115. if (bio) {
  2116. r1_bio->bios[r1_bio->read_disk] =
  2117. mddev->ro ? IO_BLOCKED : NULL;
  2118. bio_put(bio);
  2119. }
  2120. r1_bio->read_disk = disk;
  2121. bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  2122. bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
  2123. max_sectors);
  2124. r1_bio->bios[r1_bio->read_disk] = bio;
  2125. rdev = conf->mirrors[disk].rdev;
  2126. printk_ratelimited(KERN_ERR
  2127. "md/raid1:%s: redirecting sector %llu"
  2128. " to other mirror: %s\n",
  2129. mdname(mddev),
  2130. (unsigned long long)r1_bio->sector,
  2131. bdevname(rdev->bdev, b));
  2132. bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
  2133. bio->bi_bdev = rdev->bdev;
  2134. bio->bi_end_io = raid1_end_read_request;
  2135. bio->bi_rw = READ | do_sync;
  2136. bio->bi_private = r1_bio;
  2137. if (max_sectors < r1_bio->sectors) {
  2138. /* Drat - have to split this up more */
  2139. struct bio *mbio = r1_bio->master_bio;
  2140. int sectors_handled = (r1_bio->sector + max_sectors
  2141. - mbio->bi_iter.bi_sector);
  2142. r1_bio->sectors = max_sectors;
  2143. spin_lock_irq(&conf->device_lock);
  2144. if (mbio->bi_phys_segments == 0)
  2145. mbio->bi_phys_segments = 2;
  2146. else
  2147. mbio->bi_phys_segments++;
  2148. spin_unlock_irq(&conf->device_lock);
  2149. generic_make_request(bio);
  2150. bio = NULL;
  2151. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  2152. r1_bio->master_bio = mbio;
  2153. r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
  2154. r1_bio->state = 0;
  2155. set_bit(R1BIO_ReadError, &r1_bio->state);
  2156. r1_bio->mddev = mddev;
  2157. r1_bio->sector = mbio->bi_iter.bi_sector +
  2158. sectors_handled;
  2159. goto read_more;
  2160. } else
  2161. generic_make_request(bio);
  2162. }
  2163. }
  2164. static void raid1d(struct md_thread *thread)
  2165. {
  2166. struct mddev *mddev = thread->mddev;
  2167. struct r1bio *r1_bio;
  2168. unsigned long flags;
  2169. struct r1conf *conf = mddev->private;
  2170. struct list_head *head = &conf->retry_list;
  2171. struct blk_plug plug;
  2172. md_check_recovery(mddev);
  2173. blk_start_plug(&plug);
  2174. for (;;) {
  2175. flush_pending_writes(conf);
  2176. spin_lock_irqsave(&conf->device_lock, flags);
  2177. if (list_empty(head)) {
  2178. spin_unlock_irqrestore(&conf->device_lock, flags);
  2179. break;
  2180. }
  2181. r1_bio = list_entry(head->prev, struct r1bio, retry_list);
  2182. list_del(head->prev);
  2183. conf->nr_queued--;
  2184. spin_unlock_irqrestore(&conf->device_lock, flags);
  2185. mddev = r1_bio->mddev;
  2186. conf = mddev->private;
  2187. if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
  2188. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  2189. test_bit(R1BIO_WriteError, &r1_bio->state))
  2190. handle_sync_write_finished(conf, r1_bio);
  2191. else
  2192. sync_request_write(mddev, r1_bio);
  2193. } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  2194. test_bit(R1BIO_WriteError, &r1_bio->state))
  2195. handle_write_finished(conf, r1_bio);
  2196. else if (test_bit(R1BIO_ReadError, &r1_bio->state))
  2197. handle_read_error(conf, r1_bio);
  2198. else
  2199. /* just a partial read to be scheduled from separate
  2200. * context
  2201. */
  2202. generic_make_request(r1_bio->bios[r1_bio->read_disk]);
  2203. cond_resched();
  2204. if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
  2205. md_check_recovery(mddev);
  2206. }
  2207. blk_finish_plug(&plug);
  2208. }
  2209. static int init_resync(struct r1conf *conf)
  2210. {
  2211. int buffs;
  2212. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  2213. BUG_ON(conf->r1buf_pool);
  2214. conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
  2215. conf->poolinfo);
  2216. if (!conf->r1buf_pool)
  2217. return -ENOMEM;
  2218. conf->next_resync = 0;
  2219. return 0;
  2220. }
  2221. /*
  2222. * perform a "sync" on one "block"
  2223. *
  2224. * We need to make sure that no normal I/O request - particularly write
  2225. * requests - conflict with active sync requests.
  2226. *
  2227. * This is achieved by tracking pending requests and a 'barrier' concept
  2228. * that can be installed to exclude normal IO requests.
  2229. */
  2230. static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  2231. {
  2232. struct r1conf *conf = mddev->private;
  2233. struct r1bio *r1_bio;
  2234. struct bio *bio;
  2235. sector_t max_sector, nr_sectors;
  2236. int disk = -1;
  2237. int i;
  2238. int wonly = -1;
  2239. int write_targets = 0, read_targets = 0;
  2240. sector_t sync_blocks;
  2241. int still_degraded = 0;
  2242. int good_sectors = RESYNC_SECTORS;
  2243. int min_bad = 0; /* number of sectors that are bad in all devices */
  2244. if (!conf->r1buf_pool)
  2245. if (init_resync(conf))
  2246. return 0;
  2247. max_sector = mddev->dev_sectors;
  2248. if (sector_nr >= max_sector) {
  2249. /* If we aborted, we need to abort the
  2250. * sync on the 'current' bitmap chunk (there will
  2251. * only be one in raid1 resync.
  2252. * We can find the current addess in mddev->curr_resync
  2253. */
  2254. if (mddev->curr_resync < max_sector) /* aborted */
  2255. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  2256. &sync_blocks, 1);
  2257. else /* completed sync */
  2258. conf->fullsync = 0;
  2259. bitmap_close_sync(mddev->bitmap);
  2260. close_sync(conf);
  2261. return 0;
  2262. }
  2263. if (mddev->bitmap == NULL &&
  2264. mddev->recovery_cp == MaxSector &&
  2265. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  2266. conf->fullsync == 0) {
  2267. *skipped = 1;
  2268. return max_sector - sector_nr;
  2269. }
  2270. /* before building a request, check if we can skip these blocks..
  2271. * This call the bitmap_start_sync doesn't actually record anything
  2272. */
  2273. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  2274. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2275. /* We can skip this block, and probably several more */
  2276. *skipped = 1;
  2277. return sync_blocks;
  2278. }
  2279. /*
  2280. * If there is non-resync activity waiting for a turn,
  2281. * and resync is going fast enough,
  2282. * then let it though before starting on this new sync request.
  2283. */
  2284. if (!go_faster && conf->nr_waiting)
  2285. msleep_interruptible(1000);
  2286. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  2287. r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
  2288. raise_barrier(conf);
  2289. conf->next_resync = sector_nr;
  2290. rcu_read_lock();
  2291. /*
  2292. * If we get a correctably read error during resync or recovery,
  2293. * we might want to read from a different device. So we
  2294. * flag all drives that could conceivably be read from for READ,
  2295. * and any others (which will be non-In_sync devices) for WRITE.
  2296. * If a read fails, we try reading from something else for which READ
  2297. * is OK.
  2298. */
  2299. r1_bio->mddev = mddev;
  2300. r1_bio->sector = sector_nr;
  2301. r1_bio->state = 0;
  2302. set_bit(R1BIO_IsSync, &r1_bio->state);
  2303. for (i = 0; i < conf->raid_disks * 2; i++) {
  2304. struct md_rdev *rdev;
  2305. bio = r1_bio->bios[i];
  2306. bio_reset(bio);
  2307. rdev = rcu_dereference(conf->mirrors[i].rdev);
  2308. if (rdev == NULL ||
  2309. test_bit(Faulty, &rdev->flags)) {
  2310. if (i < conf->raid_disks)
  2311. still_degraded = 1;
  2312. } else if (!test_bit(In_sync, &rdev->flags)) {
  2313. bio->bi_rw = WRITE;
  2314. bio->bi_end_io = end_sync_write;
  2315. write_targets ++;
  2316. } else {
  2317. /* may need to read from here */
  2318. sector_t first_bad = MaxSector;
  2319. int bad_sectors;
  2320. if (is_badblock(rdev, sector_nr, good_sectors,
  2321. &first_bad, &bad_sectors)) {
  2322. if (first_bad > sector_nr)
  2323. good_sectors = first_bad - sector_nr;
  2324. else {
  2325. bad_sectors -= (sector_nr - first_bad);
  2326. if (min_bad == 0 ||
  2327. min_bad > bad_sectors)
  2328. min_bad = bad_sectors;
  2329. }
  2330. }
  2331. if (sector_nr < first_bad) {
  2332. if (test_bit(WriteMostly, &rdev->flags)) {
  2333. if (wonly < 0)
  2334. wonly = i;
  2335. } else {
  2336. if (disk < 0)
  2337. disk = i;
  2338. }
  2339. bio->bi_rw = READ;
  2340. bio->bi_end_io = end_sync_read;
  2341. read_targets++;
  2342. } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
  2343. test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  2344. !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  2345. /*
  2346. * The device is suitable for reading (InSync),
  2347. * but has bad block(s) here. Let's try to correct them,
  2348. * if we are doing resync or repair. Otherwise, leave
  2349. * this device alone for this sync request.
  2350. */
  2351. bio->bi_rw = WRITE;
  2352. bio->bi_end_io = end_sync_write;
  2353. write_targets++;
  2354. }
  2355. }
  2356. if (bio->bi_end_io) {
  2357. atomic_inc(&rdev->nr_pending);
  2358. bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
  2359. bio->bi_bdev = rdev->bdev;
  2360. bio->bi_private = r1_bio;
  2361. }
  2362. }
  2363. rcu_read_unlock();
  2364. if (disk < 0)
  2365. disk = wonly;
  2366. r1_bio->read_disk = disk;
  2367. if (read_targets == 0 && min_bad > 0) {
  2368. /* These sectors are bad on all InSync devices, so we
  2369. * need to mark them bad on all write targets
  2370. */
  2371. int ok = 1;
  2372. for (i = 0 ; i < conf->raid_disks * 2 ; i++)
  2373. if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
  2374. struct md_rdev *rdev = conf->mirrors[i].rdev;
  2375. ok = rdev_set_badblocks(rdev, sector_nr,
  2376. min_bad, 0
  2377. ) && ok;
  2378. }
  2379. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  2380. *skipped = 1;
  2381. put_buf(r1_bio);
  2382. if (!ok) {
  2383. /* Cannot record the badblocks, so need to
  2384. * abort the resync.
  2385. * If there are multiple read targets, could just
  2386. * fail the really bad ones ???
  2387. */
  2388. conf->recovery_disabled = mddev->recovery_disabled;
  2389. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  2390. return 0;
  2391. } else
  2392. return min_bad;
  2393. }
  2394. if (min_bad > 0 && min_bad < good_sectors) {
  2395. /* only resync enough to reach the next bad->good
  2396. * transition */
  2397. good_sectors = min_bad;
  2398. }
  2399. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
  2400. /* extra read targets are also write targets */
  2401. write_targets += read_targets-1;
  2402. if (write_targets == 0 || read_targets == 0) {
  2403. /* There is nowhere to write, so all non-sync
  2404. * drives must be failed - so we are finished
  2405. */
  2406. sector_t rv;
  2407. if (min_bad > 0)
  2408. max_sector = sector_nr + min_bad;
  2409. rv = max_sector - sector_nr;
  2410. *skipped = 1;
  2411. put_buf(r1_bio);
  2412. return rv;
  2413. }
  2414. if (max_sector > mddev->resync_max)
  2415. max_sector = mddev->resync_max; /* Don't do IO beyond here */
  2416. if (max_sector > sector_nr + good_sectors)
  2417. max_sector = sector_nr + good_sectors;
  2418. nr_sectors = 0;
  2419. sync_blocks = 0;
  2420. do {
  2421. struct page *page;
  2422. int len = PAGE_SIZE;
  2423. if (sector_nr + (len>>9) > max_sector)
  2424. len = (max_sector - sector_nr) << 9;
  2425. if (len == 0)
  2426. break;
  2427. if (sync_blocks == 0) {
  2428. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  2429. &sync_blocks, still_degraded) &&
  2430. !conf->fullsync &&
  2431. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  2432. break;
  2433. BUG_ON(sync_blocks < (PAGE_SIZE>>9));
  2434. if ((len >> 9) > sync_blocks)
  2435. len = sync_blocks<<9;
  2436. }
  2437. for (i = 0 ; i < conf->raid_disks * 2; i++) {
  2438. bio = r1_bio->bios[i];
  2439. if (bio->bi_end_io) {
  2440. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  2441. if (bio_add_page(bio, page, len, 0) == 0) {
  2442. /* stop here */
  2443. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  2444. while (i > 0) {
  2445. i--;
  2446. bio = r1_bio->bios[i];
  2447. if (bio->bi_end_io==NULL)
  2448. continue;
  2449. /* remove last page from this bio */
  2450. bio->bi_vcnt--;
  2451. bio->bi_iter.bi_size -= len;
  2452. bio->bi_flags &= ~(1<< BIO_SEG_VALID);
  2453. }
  2454. goto bio_full;
  2455. }
  2456. }
  2457. }
  2458. nr_sectors += len>>9;
  2459. sector_nr += len>>9;
  2460. sync_blocks -= (len>>9);
  2461. } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
  2462. bio_full:
  2463. r1_bio->sectors = nr_sectors;
  2464. /* For a user-requested sync, we read all readable devices and do a
  2465. * compare
  2466. */
  2467. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2468. atomic_set(&r1_bio->remaining, read_targets);
  2469. for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
  2470. bio = r1_bio->bios[i];
  2471. if (bio->bi_end_io == end_sync_read) {
  2472. read_targets--;
  2473. md_sync_acct(bio->bi_bdev, nr_sectors);
  2474. generic_make_request(bio);
  2475. }
  2476. }
  2477. } else {
  2478. atomic_set(&r1_bio->remaining, 1);
  2479. bio = r1_bio->bios[r1_bio->read_disk];
  2480. md_sync_acct(bio->bi_bdev, nr_sectors);
  2481. generic_make_request(bio);
  2482. }
  2483. return nr_sectors;
  2484. }
  2485. static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  2486. {
  2487. if (sectors)
  2488. return sectors;
  2489. return mddev->dev_sectors;
  2490. }
  2491. static struct r1conf *setup_conf(struct mddev *mddev)
  2492. {
  2493. struct r1conf *conf;
  2494. int i;
  2495. struct raid1_info *disk;
  2496. struct md_rdev *rdev;
  2497. int err = -ENOMEM;
  2498. conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
  2499. if (!conf)
  2500. goto abort;
  2501. conf->mirrors = kzalloc(sizeof(struct raid1_info)
  2502. * mddev->raid_disks * 2,
  2503. GFP_KERNEL);
  2504. if (!conf->mirrors)
  2505. goto abort;
  2506. conf->tmppage = alloc_page(GFP_KERNEL);
  2507. if (!conf->tmppage)
  2508. goto abort;
  2509. conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
  2510. if (!conf->poolinfo)
  2511. goto abort;
  2512. conf->poolinfo->raid_disks = mddev->raid_disks * 2;
  2513. conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2514. r1bio_pool_free,
  2515. conf->poolinfo);
  2516. if (!conf->r1bio_pool)
  2517. goto abort;
  2518. conf->poolinfo->mddev = mddev;
  2519. err = -EINVAL;
  2520. spin_lock_init(&conf->device_lock);
  2521. rdev_for_each(rdev, mddev) {
  2522. struct request_queue *q;
  2523. int disk_idx = rdev->raid_disk;
  2524. if (disk_idx >= mddev->raid_disks
  2525. || disk_idx < 0)
  2526. continue;
  2527. if (test_bit(Replacement, &rdev->flags))
  2528. disk = conf->mirrors + mddev->raid_disks + disk_idx;
  2529. else
  2530. disk = conf->mirrors + disk_idx;
  2531. if (disk->rdev)
  2532. goto abort;
  2533. disk->rdev = rdev;
  2534. q = bdev_get_queue(rdev->bdev);
  2535. if (q->merge_bvec_fn)
  2536. mddev->merge_check_needed = 1;
  2537. disk->head_position = 0;
  2538. disk->seq_start = MaxSector;
  2539. }
  2540. conf->raid_disks = mddev->raid_disks;
  2541. conf->mddev = mddev;
  2542. INIT_LIST_HEAD(&conf->retry_list);
  2543. spin_lock_init(&conf->resync_lock);
  2544. init_waitqueue_head(&conf->wait_barrier);
  2545. bio_list_init(&conf->pending_bio_list);
  2546. conf->pending_count = 0;
  2547. conf->recovery_disabled = mddev->recovery_disabled - 1;
  2548. conf->start_next_window = MaxSector;
  2549. conf->current_window_requests = conf->next_window_requests = 0;
  2550. err = -EIO;
  2551. for (i = 0; i < conf->raid_disks * 2; i++) {
  2552. disk = conf->mirrors + i;
  2553. if (i < conf->raid_disks &&
  2554. disk[conf->raid_disks].rdev) {
  2555. /* This slot has a replacement. */
  2556. if (!disk->rdev) {
  2557. /* No original, just make the replacement
  2558. * a recovering spare
  2559. */
  2560. disk->rdev =
  2561. disk[conf->raid_disks].rdev;
  2562. disk[conf->raid_disks].rdev = NULL;
  2563. } else if (!test_bit(In_sync, &disk->rdev->flags))
  2564. /* Original is not in_sync - bad */
  2565. goto abort;
  2566. }
  2567. if (!disk->rdev ||
  2568. !test_bit(In_sync, &disk->rdev->flags)) {
  2569. disk->head_position = 0;
  2570. if (disk->rdev &&
  2571. (disk->rdev->saved_raid_disk < 0))
  2572. conf->fullsync = 1;
  2573. }
  2574. }
  2575. err = -ENOMEM;
  2576. conf->thread = md_register_thread(raid1d, mddev, "raid1");
  2577. if (!conf->thread) {
  2578. printk(KERN_ERR
  2579. "md/raid1:%s: couldn't allocate thread\n",
  2580. mdname(mddev));
  2581. goto abort;
  2582. }
  2583. return conf;
  2584. abort:
  2585. if (conf) {
  2586. if (conf->r1bio_pool)
  2587. mempool_destroy(conf->r1bio_pool);
  2588. kfree(conf->mirrors);
  2589. safe_put_page(conf->tmppage);
  2590. kfree(conf->poolinfo);
  2591. kfree(conf);
  2592. }
  2593. return ERR_PTR(err);
  2594. }
  2595. static int stop(struct mddev *mddev);
  2596. static int run(struct mddev *mddev)
  2597. {
  2598. struct r1conf *conf;
  2599. int i;
  2600. struct md_rdev *rdev;
  2601. int ret;
  2602. bool discard_supported = false;
  2603. if (mddev->level != 1) {
  2604. printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
  2605. mdname(mddev), mddev->level);
  2606. return -EIO;
  2607. }
  2608. if (mddev->reshape_position != MaxSector) {
  2609. printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
  2610. mdname(mddev));
  2611. return -EIO;
  2612. }
  2613. /*
  2614. * copy the already verified devices into our private RAID1
  2615. * bookkeeping area. [whatever we allocate in run(),
  2616. * should be freed in stop()]
  2617. */
  2618. if (mddev->private == NULL)
  2619. conf = setup_conf(mddev);
  2620. else
  2621. conf = mddev->private;
  2622. if (IS_ERR(conf))
  2623. return PTR_ERR(conf);
  2624. if (mddev->queue)
  2625. blk_queue_max_write_same_sectors(mddev->queue, 0);
  2626. rdev_for_each(rdev, mddev) {
  2627. if (!mddev->gendisk)
  2628. continue;
  2629. disk_stack_limits(mddev->gendisk, rdev->bdev,
  2630. rdev->data_offset << 9);
  2631. if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
  2632. discard_supported = true;
  2633. }
  2634. mddev->degraded = 0;
  2635. for (i=0; i < conf->raid_disks; i++)
  2636. if (conf->mirrors[i].rdev == NULL ||
  2637. !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
  2638. test_bit(Faulty, &conf->mirrors[i].rdev->flags))
  2639. mddev->degraded++;
  2640. if (conf->raid_disks - mddev->degraded == 1)
  2641. mddev->recovery_cp = MaxSector;
  2642. if (mddev->recovery_cp != MaxSector)
  2643. printk(KERN_NOTICE "md/raid1:%s: not clean"
  2644. " -- starting background reconstruction\n",
  2645. mdname(mddev));
  2646. printk(KERN_INFO
  2647. "md/raid1:%s: active with %d out of %d mirrors\n",
  2648. mdname(mddev), mddev->raid_disks - mddev->degraded,
  2649. mddev->raid_disks);
  2650. /*
  2651. * Ok, everything is just fine now
  2652. */
  2653. mddev->thread = conf->thread;
  2654. conf->thread = NULL;
  2655. mddev->private = conf;
  2656. md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
  2657. if (mddev->queue) {
  2658. mddev->queue->backing_dev_info.congested_fn = raid1_congested;
  2659. mddev->queue->backing_dev_info.congested_data = mddev;
  2660. blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
  2661. if (discard_supported)
  2662. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  2663. mddev->queue);
  2664. else
  2665. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  2666. mddev->queue);
  2667. }
  2668. ret = md_integrity_register(mddev);
  2669. if (ret)
  2670. stop(mddev);
  2671. return ret;
  2672. }
  2673. static int stop(struct mddev *mddev)
  2674. {
  2675. struct r1conf *conf = mddev->private;
  2676. struct bitmap *bitmap = mddev->bitmap;
  2677. /* wait for behind writes to complete */
  2678. if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
  2679. printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
  2680. mdname(mddev));
  2681. /* need to kick something here to make sure I/O goes? */
  2682. wait_event(bitmap->behind_wait,
  2683. atomic_read(&bitmap->behind_writes) == 0);
  2684. }
  2685. freeze_array(conf, 0);
  2686. unfreeze_array(conf);
  2687. md_unregister_thread(&mddev->thread);
  2688. if (conf->r1bio_pool)
  2689. mempool_destroy(conf->r1bio_pool);
  2690. kfree(conf->mirrors);
  2691. safe_put_page(conf->tmppage);
  2692. kfree(conf->poolinfo);
  2693. kfree(conf);
  2694. mddev->private = NULL;
  2695. return 0;
  2696. }
  2697. static int raid1_resize(struct mddev *mddev, sector_t sectors)
  2698. {
  2699. /* no resync is happening, and there is enough space
  2700. * on all devices, so we can resize.
  2701. * We need to make sure resync covers any new space.
  2702. * If the array is shrinking we should possibly wait until
  2703. * any io in the removed space completes, but it hardly seems
  2704. * worth it.
  2705. */
  2706. sector_t newsize = raid1_size(mddev, sectors, 0);
  2707. if (mddev->external_size &&
  2708. mddev->array_sectors > newsize)
  2709. return -EINVAL;
  2710. if (mddev->bitmap) {
  2711. int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
  2712. if (ret)
  2713. return ret;
  2714. }
  2715. md_set_array_sectors(mddev, newsize);
  2716. set_capacity(mddev->gendisk, mddev->array_sectors);
  2717. revalidate_disk(mddev->gendisk);
  2718. if (sectors > mddev->dev_sectors &&
  2719. mddev->recovery_cp > mddev->dev_sectors) {
  2720. mddev->recovery_cp = mddev->dev_sectors;
  2721. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2722. }
  2723. mddev->dev_sectors = sectors;
  2724. mddev->resync_max_sectors = sectors;
  2725. return 0;
  2726. }
  2727. static int raid1_reshape(struct mddev *mddev)
  2728. {
  2729. /* We need to:
  2730. * 1/ resize the r1bio_pool
  2731. * 2/ resize conf->mirrors
  2732. *
  2733. * We allocate a new r1bio_pool if we can.
  2734. * Then raise a device barrier and wait until all IO stops.
  2735. * Then resize conf->mirrors and swap in the new r1bio pool.
  2736. *
  2737. * At the same time, we "pack" the devices so that all the missing
  2738. * devices have the higher raid_disk numbers.
  2739. */
  2740. mempool_t *newpool, *oldpool;
  2741. struct pool_info *newpoolinfo;
  2742. struct raid1_info *newmirrors;
  2743. struct r1conf *conf = mddev->private;
  2744. int cnt, raid_disks;
  2745. unsigned long flags;
  2746. int d, d2, err;
  2747. /* Cannot change chunk_size, layout, or level */
  2748. if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
  2749. mddev->layout != mddev->new_layout ||
  2750. mddev->level != mddev->new_level) {
  2751. mddev->new_chunk_sectors = mddev->chunk_sectors;
  2752. mddev->new_layout = mddev->layout;
  2753. mddev->new_level = mddev->level;
  2754. return -EINVAL;
  2755. }
  2756. err = md_allow_write(mddev);
  2757. if (err)
  2758. return err;
  2759. raid_disks = mddev->raid_disks + mddev->delta_disks;
  2760. if (raid_disks < conf->raid_disks) {
  2761. cnt=0;
  2762. for (d= 0; d < conf->raid_disks; d++)
  2763. if (conf->mirrors[d].rdev)
  2764. cnt++;
  2765. if (cnt > raid_disks)
  2766. return -EBUSY;
  2767. }
  2768. newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
  2769. if (!newpoolinfo)
  2770. return -ENOMEM;
  2771. newpoolinfo->mddev = mddev;
  2772. newpoolinfo->raid_disks = raid_disks * 2;
  2773. newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2774. r1bio_pool_free, newpoolinfo);
  2775. if (!newpool) {
  2776. kfree(newpoolinfo);
  2777. return -ENOMEM;
  2778. }
  2779. newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
  2780. GFP_KERNEL);
  2781. if (!newmirrors) {
  2782. kfree(newpoolinfo);
  2783. mempool_destroy(newpool);
  2784. return -ENOMEM;
  2785. }
  2786. freeze_array(conf, 0);
  2787. /* ok, everything is stopped */
  2788. oldpool = conf->r1bio_pool;
  2789. conf->r1bio_pool = newpool;
  2790. for (d = d2 = 0; d < conf->raid_disks; d++) {
  2791. struct md_rdev *rdev = conf->mirrors[d].rdev;
  2792. if (rdev && rdev->raid_disk != d2) {
  2793. sysfs_unlink_rdev(mddev, rdev);
  2794. rdev->raid_disk = d2;
  2795. sysfs_unlink_rdev(mddev, rdev);
  2796. if (sysfs_link_rdev(mddev, rdev))
  2797. printk(KERN_WARNING
  2798. "md/raid1:%s: cannot register rd%d\n",
  2799. mdname(mddev), rdev->raid_disk);
  2800. }
  2801. if (rdev)
  2802. newmirrors[d2++].rdev = rdev;
  2803. }
  2804. kfree(conf->mirrors);
  2805. conf->mirrors = newmirrors;
  2806. kfree(conf->poolinfo);
  2807. conf->poolinfo = newpoolinfo;
  2808. spin_lock_irqsave(&conf->device_lock, flags);
  2809. mddev->degraded += (raid_disks - conf->raid_disks);
  2810. spin_unlock_irqrestore(&conf->device_lock, flags);
  2811. conf->raid_disks = mddev->raid_disks = raid_disks;
  2812. mddev->delta_disks = 0;
  2813. unfreeze_array(conf);
  2814. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2815. md_wakeup_thread(mddev->thread);
  2816. mempool_destroy(oldpool);
  2817. return 0;
  2818. }
  2819. static void raid1_quiesce(struct mddev *mddev, int state)
  2820. {
  2821. struct r1conf *conf = mddev->private;
  2822. switch(state) {
  2823. case 2: /* wake for suspend */
  2824. wake_up(&conf->wait_barrier);
  2825. break;
  2826. case 1:
  2827. freeze_array(conf, 0);
  2828. break;
  2829. case 0:
  2830. unfreeze_array(conf);
  2831. break;
  2832. }
  2833. }
  2834. static void *raid1_takeover(struct mddev *mddev)
  2835. {
  2836. /* raid1 can take over:
  2837. * raid5 with 2 devices, any layout or chunk size
  2838. */
  2839. if (mddev->level == 5 && mddev->raid_disks == 2) {
  2840. struct r1conf *conf;
  2841. mddev->new_level = 1;
  2842. mddev->new_layout = 0;
  2843. mddev->new_chunk_sectors = 0;
  2844. conf = setup_conf(mddev);
  2845. if (!IS_ERR(conf))
  2846. /* Array must appear to be quiesced */
  2847. conf->array_frozen = 1;
  2848. return conf;
  2849. }
  2850. return ERR_PTR(-EINVAL);
  2851. }
  2852. static struct md_personality raid1_personality =
  2853. {
  2854. .name = "raid1",
  2855. .level = 1,
  2856. .owner = THIS_MODULE,
  2857. .make_request = make_request,
  2858. .run = run,
  2859. .stop = stop,
  2860. .status = status,
  2861. .error_handler = error,
  2862. .hot_add_disk = raid1_add_disk,
  2863. .hot_remove_disk= raid1_remove_disk,
  2864. .spare_active = raid1_spare_active,
  2865. .sync_request = sync_request,
  2866. .resize = raid1_resize,
  2867. .size = raid1_size,
  2868. .check_reshape = raid1_reshape,
  2869. .quiesce = raid1_quiesce,
  2870. .takeover = raid1_takeover,
  2871. };
  2872. static int __init raid_init(void)
  2873. {
  2874. return register_md_personality(&raid1_personality);
  2875. }
  2876. static void raid_exit(void)
  2877. {
  2878. unregister_md_personality(&raid1_personality);
  2879. }
  2880. module_init(raid_init);
  2881. module_exit(raid_exit);
  2882. MODULE_LICENSE("GPL");
  2883. MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
  2884. MODULE_ALIAS("md-personality-3"); /* RAID1 */
  2885. MODULE_ALIAS("md-raid1");
  2886. MODULE_ALIAS("md-level-1");
  2887. module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);