raid1.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216
  1. /*
  2. * raid1.c : Multiple Devices driver for Linux
  3. *
  4. * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
  5. *
  6. * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
  7. *
  8. * RAID-1 management functions.
  9. *
  10. * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  11. *
  12. * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
  13. * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  14. *
  15. * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
  16. * bitmapped intelligence in resync:
  17. *
  18. * - bitmap marked during normal i/o
  19. * - bitmap used to skip nondirty blocks during sync
  20. *
  21. * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
  22. * - persistent bitmap code
  23. *
  24. * This program is free software; you can redistribute it and/or modify
  25. * it under the terms of the GNU General Public License as published by
  26. * the Free Software Foundation; either version 2, or (at your option)
  27. * any later version.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * (for example /usr/src/linux/COPYING); if not, write to the Free
  31. * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  32. */
  33. #include <linux/slab.h>
  34. #include <linux/delay.h>
  35. #include <linux/blkdev.h>
  36. #include <linux/module.h>
  37. #include <linux/seq_file.h>
  38. #include <linux/ratelimit.h>
  39. #include "md.h"
  40. #include "raid1.h"
  41. #include "bitmap.h"
  42. /*
  43. * Number of guaranteed r1bios in case of extreme VM load:
  44. */
  45. #define NR_RAID1_BIOS 256
  46. /* when we get a read error on a read-only array, we redirect to another
  47. * device without failing the first device, or trying to over-write to
  48. * correct the read error. To keep track of bad blocks on a per-bio
  49. * level, we store IO_BLOCKED in the appropriate 'bios' pointer
  50. */
  51. #define IO_BLOCKED ((struct bio *)1)
  52. /* When we successfully write to a known bad-block, we need to remove the
  53. * bad-block marking which must be done from process context. So we record
  54. * the success by setting devs[n].bio to IO_MADE_GOOD
  55. */
  56. #define IO_MADE_GOOD ((struct bio *)2)
  57. #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
  58. /* When there are this many requests queue to be written by
  59. * the raid1 thread, we become 'congested' to provide back-pressure
  60. * for writeback.
  61. */
  62. static int max_queued_requests = 1024;
  63. static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  64. sector_t bi_sector);
  65. static void lower_barrier(struct r1conf *conf);
  66. static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
  67. {
  68. struct pool_info *pi = data;
  69. int size = offsetof(struct r1bio, bios[pi->raid_disks]);
  70. /* allocate a r1bio with room for raid_disks entries in the bios array */
  71. return kzalloc(size, gfp_flags);
  72. }
  73. static void r1bio_pool_free(void *r1_bio, void *data)
  74. {
  75. kfree(r1_bio);
  76. }
  77. #define RESYNC_BLOCK_SIZE (64*1024)
  78. #define RESYNC_DEPTH 32
  79. #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
  80. #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
  81. #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
  82. #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
  83. #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
  84. static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
  85. {
  86. struct pool_info *pi = data;
  87. struct r1bio *r1_bio;
  88. struct bio *bio;
  89. int need_pages;
  90. int i, j;
  91. r1_bio = r1bio_pool_alloc(gfp_flags, pi);
  92. if (!r1_bio)
  93. return NULL;
  94. /*
  95. * Allocate bios : 1 for reading, n-1 for writing
  96. */
  97. for (j = pi->raid_disks ; j-- ; ) {
  98. bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
  99. if (!bio)
  100. goto out_free_bio;
  101. r1_bio->bios[j] = bio;
  102. }
  103. /*
  104. * Allocate RESYNC_PAGES data pages and attach them to
  105. * the first bio.
  106. * If this is a user-requested check/repair, allocate
  107. * RESYNC_PAGES for each bio.
  108. */
  109. if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
  110. need_pages = pi->raid_disks;
  111. else
  112. need_pages = 1;
  113. for (j = 0; j < need_pages; j++) {
  114. bio = r1_bio->bios[j];
  115. bio->bi_vcnt = RESYNC_PAGES;
  116. if (bio_alloc_pages(bio, gfp_flags))
  117. goto out_free_pages;
  118. }
  119. /* If not user-requests, copy the page pointers to all bios */
  120. if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
  121. for (i=0; i<RESYNC_PAGES ; i++)
  122. for (j=1; j<pi->raid_disks; j++)
  123. r1_bio->bios[j]->bi_io_vec[i].bv_page =
  124. r1_bio->bios[0]->bi_io_vec[i].bv_page;
  125. }
  126. r1_bio->master_bio = NULL;
  127. return r1_bio;
  128. out_free_pages:
  129. while (--j >= 0) {
  130. struct bio_vec *bv;
  131. bio_for_each_segment_all(bv, r1_bio->bios[j], i)
  132. __free_page(bv->bv_page);
  133. }
  134. out_free_bio:
  135. while (++j < pi->raid_disks)
  136. bio_put(r1_bio->bios[j]);
  137. r1bio_pool_free(r1_bio, data);
  138. return NULL;
  139. }
  140. static void r1buf_pool_free(void *__r1_bio, void *data)
  141. {
  142. struct pool_info *pi = data;
  143. int i,j;
  144. struct r1bio *r1bio = __r1_bio;
  145. for (i = 0; i < RESYNC_PAGES; i++)
  146. for (j = pi->raid_disks; j-- ;) {
  147. if (j == 0 ||
  148. r1bio->bios[j]->bi_io_vec[i].bv_page !=
  149. r1bio->bios[0]->bi_io_vec[i].bv_page)
  150. safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
  151. }
  152. for (i=0 ; i < pi->raid_disks; i++)
  153. bio_put(r1bio->bios[i]);
  154. r1bio_pool_free(r1bio, data);
  155. }
  156. static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
  157. {
  158. int i;
  159. for (i = 0; i < conf->raid_disks * 2; i++) {
  160. struct bio **bio = r1_bio->bios + i;
  161. if (!BIO_SPECIAL(*bio))
  162. bio_put(*bio);
  163. *bio = NULL;
  164. }
  165. }
  166. static void free_r1bio(struct r1bio *r1_bio)
  167. {
  168. struct r1conf *conf = r1_bio->mddev->private;
  169. put_all_bios(conf, r1_bio);
  170. mempool_free(r1_bio, conf->r1bio_pool);
  171. }
  172. static void put_buf(struct r1bio *r1_bio)
  173. {
  174. struct r1conf *conf = r1_bio->mddev->private;
  175. int i;
  176. for (i = 0; i < conf->raid_disks * 2; i++) {
  177. struct bio *bio = r1_bio->bios[i];
  178. if (bio->bi_end_io)
  179. rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
  180. }
  181. mempool_free(r1_bio, conf->r1buf_pool);
  182. lower_barrier(conf);
  183. }
  184. static void reschedule_retry(struct r1bio *r1_bio)
  185. {
  186. unsigned long flags;
  187. struct mddev *mddev = r1_bio->mddev;
  188. struct r1conf *conf = mddev->private;
  189. spin_lock_irqsave(&conf->device_lock, flags);
  190. list_add(&r1_bio->retry_list, &conf->retry_list);
  191. conf->nr_queued ++;
  192. spin_unlock_irqrestore(&conf->device_lock, flags);
  193. wake_up(&conf->wait_barrier);
  194. md_wakeup_thread(mddev->thread);
  195. }
  196. /*
  197. * raid_end_bio_io() is called when we have finished servicing a mirrored
  198. * operation and are ready to return a success/failure code to the buffer
  199. * cache layer.
  200. */
  201. static void call_bio_endio(struct r1bio *r1_bio)
  202. {
  203. struct bio *bio = r1_bio->master_bio;
  204. int done;
  205. struct r1conf *conf = r1_bio->mddev->private;
  206. sector_t start_next_window = r1_bio->start_next_window;
  207. sector_t bi_sector = bio->bi_iter.bi_sector;
  208. if (bio->bi_phys_segments) {
  209. unsigned long flags;
  210. spin_lock_irqsave(&conf->device_lock, flags);
  211. bio->bi_phys_segments--;
  212. done = (bio->bi_phys_segments == 0);
  213. spin_unlock_irqrestore(&conf->device_lock, flags);
  214. /*
  215. * make_request() might be waiting for
  216. * bi_phys_segments to decrease
  217. */
  218. wake_up(&conf->wait_barrier);
  219. } else
  220. done = 1;
  221. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  222. clear_bit(BIO_UPTODATE, &bio->bi_flags);
  223. if (done) {
  224. bio_endio(bio, 0);
  225. /*
  226. * Wake up any possible resync thread that waits for the device
  227. * to go idle.
  228. */
  229. allow_barrier(conf, start_next_window, bi_sector);
  230. }
  231. }
  232. static void raid_end_bio_io(struct r1bio *r1_bio)
  233. {
  234. struct bio *bio = r1_bio->master_bio;
  235. /* if nobody has done the final endio yet, do it now */
  236. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  237. pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
  238. (bio_data_dir(bio) == WRITE) ? "write" : "read",
  239. (unsigned long long) bio->bi_iter.bi_sector,
  240. (unsigned long long) bio_end_sector(bio) - 1);
  241. call_bio_endio(r1_bio);
  242. }
  243. free_r1bio(r1_bio);
  244. }
  245. /*
  246. * Update disk head position estimator based on IRQ completion info.
  247. */
  248. static inline void update_head_pos(int disk, struct r1bio *r1_bio)
  249. {
  250. struct r1conf *conf = r1_bio->mddev->private;
  251. conf->mirrors[disk].head_position =
  252. r1_bio->sector + (r1_bio->sectors);
  253. }
  254. /*
  255. * Find the disk number which triggered given bio
  256. */
  257. static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
  258. {
  259. int mirror;
  260. struct r1conf *conf = r1_bio->mddev->private;
  261. int raid_disks = conf->raid_disks;
  262. for (mirror = 0; mirror < raid_disks * 2; mirror++)
  263. if (r1_bio->bios[mirror] == bio)
  264. break;
  265. BUG_ON(mirror == raid_disks * 2);
  266. update_head_pos(mirror, r1_bio);
  267. return mirror;
  268. }
  269. static void raid1_end_read_request(struct bio *bio, int error)
  270. {
  271. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  272. struct r1bio *r1_bio = bio->bi_private;
  273. int mirror;
  274. struct r1conf *conf = r1_bio->mddev->private;
  275. mirror = r1_bio->read_disk;
  276. /*
  277. * this branch is our 'one mirror IO has finished' event handler:
  278. */
  279. update_head_pos(mirror, r1_bio);
  280. if (uptodate)
  281. set_bit(R1BIO_Uptodate, &r1_bio->state);
  282. else {
  283. /* If all other devices have failed, we want to return
  284. * the error upwards rather than fail the last device.
  285. * Here we redefine "uptodate" to mean "Don't want to retry"
  286. */
  287. unsigned long flags;
  288. spin_lock_irqsave(&conf->device_lock, flags);
  289. if (r1_bio->mddev->degraded == conf->raid_disks ||
  290. (r1_bio->mddev->degraded == conf->raid_disks-1 &&
  291. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
  292. uptodate = 1;
  293. spin_unlock_irqrestore(&conf->device_lock, flags);
  294. }
  295. if (uptodate) {
  296. raid_end_bio_io(r1_bio);
  297. rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
  298. } else {
  299. /*
  300. * oops, read error:
  301. */
  302. char b[BDEVNAME_SIZE];
  303. printk_ratelimited(
  304. KERN_ERR "md/raid1:%s: %s: "
  305. "rescheduling sector %llu\n",
  306. mdname(conf->mddev),
  307. bdevname(conf->mirrors[mirror].rdev->bdev,
  308. b),
  309. (unsigned long long)r1_bio->sector);
  310. set_bit(R1BIO_ReadError, &r1_bio->state);
  311. reschedule_retry(r1_bio);
  312. /* don't drop the reference on read_disk yet */
  313. }
  314. }
  315. static void close_write(struct r1bio *r1_bio)
  316. {
  317. /* it really is the end of this request */
  318. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  319. /* free extra copy of the data pages */
  320. int i = r1_bio->behind_page_count;
  321. while (i--)
  322. safe_put_page(r1_bio->behind_bvecs[i].bv_page);
  323. kfree(r1_bio->behind_bvecs);
  324. r1_bio->behind_bvecs = NULL;
  325. }
  326. /* clear the bitmap if all writes complete successfully */
  327. bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
  328. r1_bio->sectors,
  329. !test_bit(R1BIO_Degraded, &r1_bio->state),
  330. test_bit(R1BIO_BehindIO, &r1_bio->state));
  331. md_write_end(r1_bio->mddev);
  332. }
  333. static void r1_bio_write_done(struct r1bio *r1_bio)
  334. {
  335. if (!atomic_dec_and_test(&r1_bio->remaining))
  336. return;
  337. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  338. reschedule_retry(r1_bio);
  339. else {
  340. close_write(r1_bio);
  341. if (test_bit(R1BIO_MadeGood, &r1_bio->state))
  342. reschedule_retry(r1_bio);
  343. else
  344. raid_end_bio_io(r1_bio);
  345. }
  346. }
  347. static void raid1_end_write_request(struct bio *bio, int error)
  348. {
  349. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  350. struct r1bio *r1_bio = bio->bi_private;
  351. int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
  352. struct r1conf *conf = r1_bio->mddev->private;
  353. struct bio *to_put = NULL;
  354. mirror = find_bio_disk(r1_bio, bio);
  355. /*
  356. * 'one mirror IO has finished' event handler:
  357. */
  358. if (!uptodate) {
  359. set_bit(WriteErrorSeen,
  360. &conf->mirrors[mirror].rdev->flags);
  361. if (!test_and_set_bit(WantReplacement,
  362. &conf->mirrors[mirror].rdev->flags))
  363. set_bit(MD_RECOVERY_NEEDED, &
  364. conf->mddev->recovery);
  365. set_bit(R1BIO_WriteError, &r1_bio->state);
  366. } else {
  367. /*
  368. * Set R1BIO_Uptodate in our master bio, so that we
  369. * will return a good error code for to the higher
  370. * levels even if IO on some other mirrored buffer
  371. * fails.
  372. *
  373. * The 'master' represents the composite IO operation
  374. * to user-side. So if something waits for IO, then it
  375. * will wait for the 'master' bio.
  376. */
  377. sector_t first_bad;
  378. int bad_sectors;
  379. r1_bio->bios[mirror] = NULL;
  380. to_put = bio;
  381. /*
  382. * Do not set R1BIO_Uptodate if the current device is
  383. * rebuilding or Faulty. This is because we cannot use
  384. * such device for properly reading the data back (we could
  385. * potentially use it, if the current write would have felt
  386. * before rdev->recovery_offset, but for simplicity we don't
  387. * check this here.
  388. */
  389. if (test_bit(In_sync, &conf->mirrors[mirror].rdev->flags) &&
  390. !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))
  391. set_bit(R1BIO_Uptodate, &r1_bio->state);
  392. /* Maybe we can clear some bad blocks. */
  393. if (is_badblock(conf->mirrors[mirror].rdev,
  394. r1_bio->sector, r1_bio->sectors,
  395. &first_bad, &bad_sectors)) {
  396. r1_bio->bios[mirror] = IO_MADE_GOOD;
  397. set_bit(R1BIO_MadeGood, &r1_bio->state);
  398. }
  399. }
  400. if (behind) {
  401. if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
  402. atomic_dec(&r1_bio->behind_remaining);
  403. /*
  404. * In behind mode, we ACK the master bio once the I/O
  405. * has safely reached all non-writemostly
  406. * disks. Setting the Returned bit ensures that this
  407. * gets done only once -- we don't ever want to return
  408. * -EIO here, instead we'll wait
  409. */
  410. if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
  411. test_bit(R1BIO_Uptodate, &r1_bio->state)) {
  412. /* Maybe we can return now */
  413. if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
  414. struct bio *mbio = r1_bio->master_bio;
  415. pr_debug("raid1: behind end write sectors"
  416. " %llu-%llu\n",
  417. (unsigned long long) mbio->bi_iter.bi_sector,
  418. (unsigned long long) bio_end_sector(mbio) - 1);
  419. call_bio_endio(r1_bio);
  420. }
  421. }
  422. }
  423. if (r1_bio->bios[mirror] == NULL)
  424. rdev_dec_pending(conf->mirrors[mirror].rdev,
  425. conf->mddev);
  426. /*
  427. * Let's see if all mirrored write operations have finished
  428. * already.
  429. */
  430. r1_bio_write_done(r1_bio);
  431. if (to_put)
  432. bio_put(to_put);
  433. }
  434. /*
  435. * This routine returns the disk from which the requested read should
  436. * be done. There is a per-array 'next expected sequential IO' sector
  437. * number - if this matches on the next IO then we use the last disk.
  438. * There is also a per-disk 'last know head position' sector that is
  439. * maintained from IRQ contexts, both the normal and the resync IO
  440. * completion handlers update this position correctly. If there is no
  441. * perfect sequential match then we pick the disk whose head is closest.
  442. *
  443. * If there are 2 mirrors in the same 2 devices, performance degrades
  444. * because position is mirror, not device based.
  445. *
  446. * The rdev for the device selected will have nr_pending incremented.
  447. */
  448. static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
  449. {
  450. const sector_t this_sector = r1_bio->sector;
  451. int sectors;
  452. int best_good_sectors;
  453. int best_disk, best_dist_disk, best_pending_disk;
  454. int has_nonrot_disk;
  455. int disk;
  456. sector_t best_dist;
  457. unsigned int min_pending;
  458. struct md_rdev *rdev;
  459. int choose_first;
  460. int choose_next_idle;
  461. rcu_read_lock();
  462. /*
  463. * Check if we can balance. We can balance on the whole
  464. * device if no resync is going on, or below the resync window.
  465. * We take the first readable disk when above the resync window.
  466. */
  467. retry:
  468. sectors = r1_bio->sectors;
  469. best_disk = -1;
  470. best_dist_disk = -1;
  471. best_dist = MaxSector;
  472. best_pending_disk = -1;
  473. min_pending = UINT_MAX;
  474. best_good_sectors = 0;
  475. has_nonrot_disk = 0;
  476. choose_next_idle = 0;
  477. choose_first = (conf->mddev->recovery_cp < this_sector + sectors);
  478. for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
  479. sector_t dist;
  480. sector_t first_bad;
  481. int bad_sectors;
  482. unsigned int pending;
  483. bool nonrot;
  484. rdev = rcu_dereference(conf->mirrors[disk].rdev);
  485. if (r1_bio->bios[disk] == IO_BLOCKED
  486. || rdev == NULL
  487. || test_bit(Unmerged, &rdev->flags)
  488. || test_bit(Faulty, &rdev->flags))
  489. continue;
  490. if (!test_bit(In_sync, &rdev->flags) &&
  491. rdev->recovery_offset < this_sector + sectors)
  492. continue;
  493. if (test_bit(WriteMostly, &rdev->flags)) {
  494. /* Don't balance among write-mostly, just
  495. * use the first as a last resort */
  496. if (best_disk < 0) {
  497. if (is_badblock(rdev, this_sector, sectors,
  498. &first_bad, &bad_sectors)) {
  499. if (first_bad < this_sector)
  500. /* Cannot use this */
  501. continue;
  502. best_good_sectors = first_bad - this_sector;
  503. } else
  504. best_good_sectors = sectors;
  505. best_disk = disk;
  506. }
  507. continue;
  508. }
  509. /* This is a reasonable device to use. It might
  510. * even be best.
  511. */
  512. if (is_badblock(rdev, this_sector, sectors,
  513. &first_bad, &bad_sectors)) {
  514. if (best_dist < MaxSector)
  515. /* already have a better device */
  516. continue;
  517. if (first_bad <= this_sector) {
  518. /* cannot read here. If this is the 'primary'
  519. * device, then we must not read beyond
  520. * bad_sectors from another device..
  521. */
  522. bad_sectors -= (this_sector - first_bad);
  523. if (choose_first && sectors > bad_sectors)
  524. sectors = bad_sectors;
  525. if (best_good_sectors > sectors)
  526. best_good_sectors = sectors;
  527. } else {
  528. sector_t good_sectors = first_bad - this_sector;
  529. if (good_sectors > best_good_sectors) {
  530. best_good_sectors = good_sectors;
  531. best_disk = disk;
  532. }
  533. if (choose_first)
  534. break;
  535. }
  536. continue;
  537. } else
  538. best_good_sectors = sectors;
  539. nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
  540. has_nonrot_disk |= nonrot;
  541. pending = atomic_read(&rdev->nr_pending);
  542. dist = abs(this_sector - conf->mirrors[disk].head_position);
  543. if (choose_first) {
  544. best_disk = disk;
  545. break;
  546. }
  547. /* Don't change to another disk for sequential reads */
  548. if (conf->mirrors[disk].next_seq_sect == this_sector
  549. || dist == 0) {
  550. int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
  551. struct raid1_info *mirror = &conf->mirrors[disk];
  552. best_disk = disk;
  553. /*
  554. * If buffered sequential IO size exceeds optimal
  555. * iosize, check if there is idle disk. If yes, choose
  556. * the idle disk. read_balance could already choose an
  557. * idle disk before noticing it's a sequential IO in
  558. * this disk. This doesn't matter because this disk
  559. * will idle, next time it will be utilized after the
  560. * first disk has IO size exceeds optimal iosize. In
  561. * this way, iosize of the first disk will be optimal
  562. * iosize at least. iosize of the second disk might be
  563. * small, but not a big deal since when the second disk
  564. * starts IO, the first disk is likely still busy.
  565. */
  566. if (nonrot && opt_iosize > 0 &&
  567. mirror->seq_start != MaxSector &&
  568. mirror->next_seq_sect > opt_iosize &&
  569. mirror->next_seq_sect - opt_iosize >=
  570. mirror->seq_start) {
  571. choose_next_idle = 1;
  572. continue;
  573. }
  574. break;
  575. }
  576. /* If device is idle, use it */
  577. if (pending == 0) {
  578. best_disk = disk;
  579. break;
  580. }
  581. if (choose_next_idle)
  582. continue;
  583. if (min_pending > pending) {
  584. min_pending = pending;
  585. best_pending_disk = disk;
  586. }
  587. if (dist < best_dist) {
  588. best_dist = dist;
  589. best_dist_disk = disk;
  590. }
  591. }
  592. /*
  593. * If all disks are rotational, choose the closest disk. If any disk is
  594. * non-rotational, choose the disk with less pending request even the
  595. * disk is rotational, which might/might not be optimal for raids with
  596. * mixed ratation/non-rotational disks depending on workload.
  597. */
  598. if (best_disk == -1) {
  599. if (has_nonrot_disk)
  600. best_disk = best_pending_disk;
  601. else
  602. best_disk = best_dist_disk;
  603. }
  604. if (best_disk >= 0) {
  605. rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
  606. if (!rdev)
  607. goto retry;
  608. atomic_inc(&rdev->nr_pending);
  609. if (test_bit(Faulty, &rdev->flags)) {
  610. /* cannot risk returning a device that failed
  611. * before we inc'ed nr_pending
  612. */
  613. rdev_dec_pending(rdev, conf->mddev);
  614. goto retry;
  615. }
  616. sectors = best_good_sectors;
  617. if (conf->mirrors[best_disk].next_seq_sect != this_sector)
  618. conf->mirrors[best_disk].seq_start = this_sector;
  619. conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
  620. }
  621. rcu_read_unlock();
  622. *max_sectors = sectors;
  623. return best_disk;
  624. }
  625. static int raid1_mergeable_bvec(struct request_queue *q,
  626. struct bvec_merge_data *bvm,
  627. struct bio_vec *biovec)
  628. {
  629. struct mddev *mddev = q->queuedata;
  630. struct r1conf *conf = mddev->private;
  631. sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
  632. int max = biovec->bv_len;
  633. if (mddev->merge_check_needed) {
  634. int disk;
  635. rcu_read_lock();
  636. for (disk = 0; disk < conf->raid_disks * 2; disk++) {
  637. struct md_rdev *rdev = rcu_dereference(
  638. conf->mirrors[disk].rdev);
  639. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  640. struct request_queue *q =
  641. bdev_get_queue(rdev->bdev);
  642. if (q->merge_bvec_fn) {
  643. bvm->bi_sector = sector +
  644. rdev->data_offset;
  645. bvm->bi_bdev = rdev->bdev;
  646. max = min(max, q->merge_bvec_fn(
  647. q, bvm, biovec));
  648. }
  649. }
  650. }
  651. rcu_read_unlock();
  652. }
  653. return max;
  654. }
  655. int md_raid1_congested(struct mddev *mddev, int bits)
  656. {
  657. struct r1conf *conf = mddev->private;
  658. int i, ret = 0;
  659. if ((bits & (1 << BDI_async_congested)) &&
  660. conf->pending_count >= max_queued_requests)
  661. return 1;
  662. rcu_read_lock();
  663. for (i = 0; i < conf->raid_disks * 2; i++) {
  664. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  665. if (rdev && !test_bit(Faulty, &rdev->flags)) {
  666. struct request_queue *q = bdev_get_queue(rdev->bdev);
  667. BUG_ON(!q);
  668. /* Note the '|| 1' - when read_balance prefers
  669. * non-congested targets, it can be removed
  670. */
  671. if ((bits & (1<<BDI_async_congested)) || 1)
  672. ret |= bdi_congested(&q->backing_dev_info, bits);
  673. else
  674. ret &= bdi_congested(&q->backing_dev_info, bits);
  675. }
  676. }
  677. rcu_read_unlock();
  678. return ret;
  679. }
  680. EXPORT_SYMBOL_GPL(md_raid1_congested);
  681. static int raid1_congested(void *data, int bits)
  682. {
  683. struct mddev *mddev = data;
  684. return mddev_congested(mddev, bits) ||
  685. md_raid1_congested(mddev, bits);
  686. }
  687. static void flush_pending_writes(struct r1conf *conf)
  688. {
  689. /* Any writes that have been queued but are awaiting
  690. * bitmap updates get flushed here.
  691. */
  692. spin_lock_irq(&conf->device_lock);
  693. if (conf->pending_bio_list.head) {
  694. struct bio *bio;
  695. bio = bio_list_get(&conf->pending_bio_list);
  696. conf->pending_count = 0;
  697. spin_unlock_irq(&conf->device_lock);
  698. /* flush any pending bitmap writes to
  699. * disk before proceeding w/ I/O */
  700. bitmap_unplug(conf->mddev->bitmap);
  701. wake_up(&conf->wait_barrier);
  702. while (bio) { /* submit pending writes */
  703. struct bio *next = bio->bi_next;
  704. bio->bi_next = NULL;
  705. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  706. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  707. /* Just ignore it */
  708. bio_endio(bio, 0);
  709. else
  710. generic_make_request(bio);
  711. bio = next;
  712. }
  713. } else
  714. spin_unlock_irq(&conf->device_lock);
  715. }
  716. /* Barriers....
  717. * Sometimes we need to suspend IO while we do something else,
  718. * either some resync/recovery, or reconfigure the array.
  719. * To do this we raise a 'barrier'.
  720. * The 'barrier' is a counter that can be raised multiple times
  721. * to count how many activities are happening which preclude
  722. * normal IO.
  723. * We can only raise the barrier if there is no pending IO.
  724. * i.e. if nr_pending == 0.
  725. * We choose only to raise the barrier if no-one is waiting for the
  726. * barrier to go down. This means that as soon as an IO request
  727. * is ready, no other operations which require a barrier will start
  728. * until the IO request has had a chance.
  729. *
  730. * So: regular IO calls 'wait_barrier'. When that returns there
  731. * is no backgroup IO happening, It must arrange to call
  732. * allow_barrier when it has finished its IO.
  733. * backgroup IO calls must call raise_barrier. Once that returns
  734. * there is no normal IO happeing. It must arrange to call
  735. * lower_barrier when the particular background IO completes.
  736. */
  737. static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
  738. {
  739. spin_lock_irq(&conf->resync_lock);
  740. /* Wait until no block IO is waiting */
  741. wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
  742. conf->resync_lock);
  743. /* block any new IO from starting */
  744. conf->barrier++;
  745. conf->next_resync = sector_nr;
  746. /* For these conditions we must wait:
  747. * A: while the array is in frozen state
  748. * B: while barrier >= RESYNC_DEPTH, meaning resync reach
  749. * the max count which allowed.
  750. * C: next_resync + RESYNC_SECTORS > start_next_window, meaning
  751. * next resync will reach to the window which normal bios are
  752. * handling.
  753. * D: while there are any active requests in the current window.
  754. */
  755. wait_event_lock_irq(conf->wait_barrier,
  756. !conf->array_frozen &&
  757. conf->barrier < RESYNC_DEPTH &&
  758. conf->current_window_requests == 0 &&
  759. (conf->start_next_window >=
  760. conf->next_resync + RESYNC_SECTORS),
  761. conf->resync_lock);
  762. conf->nr_pending++;
  763. spin_unlock_irq(&conf->resync_lock);
  764. }
  765. static void lower_barrier(struct r1conf *conf)
  766. {
  767. unsigned long flags;
  768. BUG_ON(conf->barrier <= 0);
  769. spin_lock_irqsave(&conf->resync_lock, flags);
  770. conf->barrier--;
  771. conf->nr_pending--;
  772. spin_unlock_irqrestore(&conf->resync_lock, flags);
  773. wake_up(&conf->wait_barrier);
  774. }
  775. static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
  776. {
  777. bool wait = false;
  778. if (conf->array_frozen || !bio)
  779. wait = true;
  780. else if (conf->barrier && bio_data_dir(bio) == WRITE) {
  781. if ((conf->mddev->curr_resync_completed
  782. >= bio_end_sector(bio)) ||
  783. (conf->next_resync + NEXT_NORMALIO_DISTANCE
  784. <= bio->bi_iter.bi_sector))
  785. wait = false;
  786. else
  787. wait = true;
  788. }
  789. return wait;
  790. }
  791. static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
  792. {
  793. sector_t sector = 0;
  794. spin_lock_irq(&conf->resync_lock);
  795. if (need_to_wait_for_sync(conf, bio)) {
  796. conf->nr_waiting++;
  797. /* Wait for the barrier to drop.
  798. * However if there are already pending
  799. * requests (preventing the barrier from
  800. * rising completely), and the
  801. * per-process bio queue isn't empty,
  802. * then don't wait, as we need to empty
  803. * that queue to allow conf->start_next_window
  804. * to increase.
  805. */
  806. wait_event_lock_irq(conf->wait_barrier,
  807. !conf->array_frozen &&
  808. (!conf->barrier ||
  809. ((conf->start_next_window <
  810. conf->next_resync + RESYNC_SECTORS) &&
  811. current->bio_list &&
  812. !bio_list_empty(current->bio_list))),
  813. conf->resync_lock);
  814. conf->nr_waiting--;
  815. }
  816. if (bio && bio_data_dir(bio) == WRITE) {
  817. if (bio->bi_iter.bi_sector >=
  818. conf->mddev->curr_resync_completed) {
  819. if (conf->start_next_window == MaxSector)
  820. conf->start_next_window =
  821. conf->next_resync +
  822. NEXT_NORMALIO_DISTANCE;
  823. if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
  824. <= bio->bi_iter.bi_sector)
  825. conf->next_window_requests++;
  826. else
  827. conf->current_window_requests++;
  828. sector = conf->start_next_window;
  829. }
  830. }
  831. conf->nr_pending++;
  832. spin_unlock_irq(&conf->resync_lock);
  833. return sector;
  834. }
  835. static void allow_barrier(struct r1conf *conf, sector_t start_next_window,
  836. sector_t bi_sector)
  837. {
  838. unsigned long flags;
  839. spin_lock_irqsave(&conf->resync_lock, flags);
  840. conf->nr_pending--;
  841. if (start_next_window) {
  842. if (start_next_window == conf->start_next_window) {
  843. if (conf->start_next_window + NEXT_NORMALIO_DISTANCE
  844. <= bi_sector)
  845. conf->next_window_requests--;
  846. else
  847. conf->current_window_requests--;
  848. } else
  849. conf->current_window_requests--;
  850. if (!conf->current_window_requests) {
  851. if (conf->next_window_requests) {
  852. conf->current_window_requests =
  853. conf->next_window_requests;
  854. conf->next_window_requests = 0;
  855. conf->start_next_window +=
  856. NEXT_NORMALIO_DISTANCE;
  857. } else
  858. conf->start_next_window = MaxSector;
  859. }
  860. }
  861. spin_unlock_irqrestore(&conf->resync_lock, flags);
  862. wake_up(&conf->wait_barrier);
  863. }
  864. static void freeze_array(struct r1conf *conf, int extra)
  865. {
  866. /* stop syncio and normal IO and wait for everything to
  867. * go quite.
  868. * We wait until nr_pending match nr_queued+extra
  869. * This is called in the context of one normal IO request
  870. * that has failed. Thus any sync request that might be pending
  871. * will be blocked by nr_pending, and we need to wait for
  872. * pending IO requests to complete or be queued for re-try.
  873. * Thus the number queued (nr_queued) plus this request (extra)
  874. * must match the number of pending IOs (nr_pending) before
  875. * we continue.
  876. */
  877. spin_lock_irq(&conf->resync_lock);
  878. conf->array_frozen = 1;
  879. wait_event_lock_irq_cmd(conf->wait_barrier,
  880. conf->nr_pending == conf->nr_queued+extra,
  881. conf->resync_lock,
  882. flush_pending_writes(conf));
  883. spin_unlock_irq(&conf->resync_lock);
  884. }
  885. static void unfreeze_array(struct r1conf *conf)
  886. {
  887. /* reverse the effect of the freeze */
  888. spin_lock_irq(&conf->resync_lock);
  889. conf->array_frozen = 0;
  890. wake_up(&conf->wait_barrier);
  891. spin_unlock_irq(&conf->resync_lock);
  892. }
  893. /* duplicate the data pages for behind I/O
  894. */
  895. static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
  896. {
  897. int i;
  898. struct bio_vec *bvec;
  899. struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec),
  900. GFP_NOIO);
  901. if (unlikely(!bvecs))
  902. return;
  903. bio_for_each_segment_all(bvec, bio, i) {
  904. bvecs[i] = *bvec;
  905. bvecs[i].bv_page = alloc_page(GFP_NOIO);
  906. if (unlikely(!bvecs[i].bv_page))
  907. goto do_sync_io;
  908. memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset,
  909. kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
  910. kunmap(bvecs[i].bv_page);
  911. kunmap(bvec->bv_page);
  912. }
  913. r1_bio->behind_bvecs = bvecs;
  914. r1_bio->behind_page_count = bio->bi_vcnt;
  915. set_bit(R1BIO_BehindIO, &r1_bio->state);
  916. return;
  917. do_sync_io:
  918. for (i = 0; i < bio->bi_vcnt; i++)
  919. if (bvecs[i].bv_page)
  920. put_page(bvecs[i].bv_page);
  921. kfree(bvecs);
  922. pr_debug("%dB behind alloc failed, doing sync I/O\n",
  923. bio->bi_iter.bi_size);
  924. }
  925. struct raid1_plug_cb {
  926. struct blk_plug_cb cb;
  927. struct bio_list pending;
  928. int pending_cnt;
  929. };
  930. static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
  931. {
  932. struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
  933. cb);
  934. struct mddev *mddev = plug->cb.data;
  935. struct r1conf *conf = mddev->private;
  936. struct bio *bio;
  937. if (from_schedule || current->bio_list) {
  938. spin_lock_irq(&conf->device_lock);
  939. bio_list_merge(&conf->pending_bio_list, &plug->pending);
  940. conf->pending_count += plug->pending_cnt;
  941. spin_unlock_irq(&conf->device_lock);
  942. wake_up(&conf->wait_barrier);
  943. md_wakeup_thread(mddev->thread);
  944. kfree(plug);
  945. return;
  946. }
  947. /* we aren't scheduling, so we can do the write-out directly. */
  948. bio = bio_list_get(&plug->pending);
  949. bitmap_unplug(mddev->bitmap);
  950. wake_up(&conf->wait_barrier);
  951. while (bio) { /* submit pending writes */
  952. struct bio *next = bio->bi_next;
  953. bio->bi_next = NULL;
  954. if (unlikely((bio->bi_rw & REQ_DISCARD) &&
  955. !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
  956. /* Just ignore it */
  957. bio_endio(bio, 0);
  958. else
  959. generic_make_request(bio);
  960. bio = next;
  961. }
  962. kfree(plug);
  963. }
  964. static void make_request(struct mddev *mddev, struct bio * bio)
  965. {
  966. struct r1conf *conf = mddev->private;
  967. struct raid1_info *mirror;
  968. struct r1bio *r1_bio;
  969. struct bio *read_bio;
  970. int i, disks;
  971. struct bitmap *bitmap;
  972. unsigned long flags;
  973. const int rw = bio_data_dir(bio);
  974. const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
  975. const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
  976. const unsigned long do_discard = (bio->bi_rw
  977. & (REQ_DISCARD | REQ_SECURE));
  978. const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
  979. struct md_rdev *blocked_rdev;
  980. struct blk_plug_cb *cb;
  981. struct raid1_plug_cb *plug = NULL;
  982. int first_clone;
  983. int sectors_handled;
  984. int max_sectors;
  985. sector_t start_next_window;
  986. /*
  987. * Register the new request and wait if the reconstruction
  988. * thread has put up a bar for new requests.
  989. * Continue immediately if no resync is active currently.
  990. */
  991. md_write_start(mddev, bio); /* wait on superblock update early */
  992. if (bio_data_dir(bio) == WRITE &&
  993. bio_end_sector(bio) > mddev->suspend_lo &&
  994. bio->bi_iter.bi_sector < mddev->suspend_hi) {
  995. /* As the suspend_* range is controlled by
  996. * userspace, we want an interruptible
  997. * wait.
  998. */
  999. DEFINE_WAIT(w);
  1000. for (;;) {
  1001. flush_signals(current);
  1002. prepare_to_wait(&conf->wait_barrier,
  1003. &w, TASK_INTERRUPTIBLE);
  1004. if (bio_end_sector(bio) <= mddev->suspend_lo ||
  1005. bio->bi_iter.bi_sector >= mddev->suspend_hi)
  1006. break;
  1007. schedule();
  1008. }
  1009. finish_wait(&conf->wait_barrier, &w);
  1010. }
  1011. start_next_window = wait_barrier(conf, bio);
  1012. bitmap = mddev->bitmap;
  1013. /*
  1014. * make_request() can abort the operation when READA is being
  1015. * used and no empty request is available.
  1016. *
  1017. */
  1018. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1019. r1_bio->master_bio = bio;
  1020. r1_bio->sectors = bio_sectors(bio);
  1021. r1_bio->state = 0;
  1022. r1_bio->mddev = mddev;
  1023. r1_bio->sector = bio->bi_iter.bi_sector;
  1024. /* We might need to issue multiple reads to different
  1025. * devices if there are bad blocks around, so we keep
  1026. * track of the number of reads in bio->bi_phys_segments.
  1027. * If this is 0, there is only one r1_bio and no locking
  1028. * will be needed when requests complete. If it is
  1029. * non-zero, then it is the number of not-completed requests.
  1030. */
  1031. bio->bi_phys_segments = 0;
  1032. clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  1033. if (rw == READ) {
  1034. /*
  1035. * read balancing logic:
  1036. */
  1037. int rdisk;
  1038. read_again:
  1039. rdisk = read_balance(conf, r1_bio, &max_sectors);
  1040. if (rdisk < 0) {
  1041. /* couldn't find anywhere to read from */
  1042. raid_end_bio_io(r1_bio);
  1043. return;
  1044. }
  1045. mirror = conf->mirrors + rdisk;
  1046. if (test_bit(WriteMostly, &mirror->rdev->flags) &&
  1047. bitmap) {
  1048. /* Reading from a write-mostly device must
  1049. * take care not to over-take any writes
  1050. * that are 'behind'
  1051. */
  1052. wait_event(bitmap->behind_wait,
  1053. atomic_read(&bitmap->behind_writes) == 0);
  1054. }
  1055. r1_bio->read_disk = rdisk;
  1056. r1_bio->start_next_window = 0;
  1057. read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1058. bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
  1059. max_sectors);
  1060. r1_bio->bios[rdisk] = read_bio;
  1061. read_bio->bi_iter.bi_sector = r1_bio->sector +
  1062. mirror->rdev->data_offset;
  1063. read_bio->bi_bdev = mirror->rdev->bdev;
  1064. read_bio->bi_end_io = raid1_end_read_request;
  1065. read_bio->bi_rw = READ | do_sync;
  1066. read_bio->bi_private = r1_bio;
  1067. if (max_sectors < r1_bio->sectors) {
  1068. /* could not read all from this device, so we will
  1069. * need another r1_bio.
  1070. */
  1071. sectors_handled = (r1_bio->sector + max_sectors
  1072. - bio->bi_iter.bi_sector);
  1073. r1_bio->sectors = max_sectors;
  1074. spin_lock_irq(&conf->device_lock);
  1075. if (bio->bi_phys_segments == 0)
  1076. bio->bi_phys_segments = 2;
  1077. else
  1078. bio->bi_phys_segments++;
  1079. spin_unlock_irq(&conf->device_lock);
  1080. /* Cannot call generic_make_request directly
  1081. * as that will be queued in __make_request
  1082. * and subsequent mempool_alloc might block waiting
  1083. * for it. So hand bio over to raid1d.
  1084. */
  1085. reschedule_retry(r1_bio);
  1086. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1087. r1_bio->master_bio = bio;
  1088. r1_bio->sectors = bio_sectors(bio) - sectors_handled;
  1089. r1_bio->state = 0;
  1090. r1_bio->mddev = mddev;
  1091. r1_bio->sector = bio->bi_iter.bi_sector +
  1092. sectors_handled;
  1093. goto read_again;
  1094. } else
  1095. generic_make_request(read_bio);
  1096. return;
  1097. }
  1098. /*
  1099. * WRITE:
  1100. */
  1101. if (conf->pending_count >= max_queued_requests) {
  1102. md_wakeup_thread(mddev->thread);
  1103. wait_event(conf->wait_barrier,
  1104. conf->pending_count < max_queued_requests);
  1105. }
  1106. /* first select target devices under rcu_lock and
  1107. * inc refcount on their rdev. Record them by setting
  1108. * bios[x] to bio
  1109. * If there are known/acknowledged bad blocks on any device on
  1110. * which we have seen a write error, we want to avoid writing those
  1111. * blocks.
  1112. * This potentially requires several writes to write around
  1113. * the bad blocks. Each set of writes gets it's own r1bio
  1114. * with a set of bios attached.
  1115. */
  1116. disks = conf->raid_disks * 2;
  1117. retry_write:
  1118. r1_bio->start_next_window = start_next_window;
  1119. blocked_rdev = NULL;
  1120. rcu_read_lock();
  1121. max_sectors = r1_bio->sectors;
  1122. for (i = 0; i < disks; i++) {
  1123. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1124. if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
  1125. atomic_inc(&rdev->nr_pending);
  1126. blocked_rdev = rdev;
  1127. break;
  1128. }
  1129. r1_bio->bios[i] = NULL;
  1130. if (!rdev || test_bit(Faulty, &rdev->flags)
  1131. || test_bit(Unmerged, &rdev->flags)) {
  1132. if (i < conf->raid_disks)
  1133. set_bit(R1BIO_Degraded, &r1_bio->state);
  1134. continue;
  1135. }
  1136. atomic_inc(&rdev->nr_pending);
  1137. if (test_bit(WriteErrorSeen, &rdev->flags)) {
  1138. sector_t first_bad;
  1139. int bad_sectors;
  1140. int is_bad;
  1141. is_bad = is_badblock(rdev, r1_bio->sector,
  1142. max_sectors,
  1143. &first_bad, &bad_sectors);
  1144. if (is_bad < 0) {
  1145. /* mustn't write here until the bad block is
  1146. * acknowledged*/
  1147. set_bit(BlockedBadBlocks, &rdev->flags);
  1148. blocked_rdev = rdev;
  1149. break;
  1150. }
  1151. if (is_bad && first_bad <= r1_bio->sector) {
  1152. /* Cannot write here at all */
  1153. bad_sectors -= (r1_bio->sector - first_bad);
  1154. if (bad_sectors < max_sectors)
  1155. /* mustn't write more than bad_sectors
  1156. * to other devices yet
  1157. */
  1158. max_sectors = bad_sectors;
  1159. rdev_dec_pending(rdev, mddev);
  1160. /* We don't set R1BIO_Degraded as that
  1161. * only applies if the disk is
  1162. * missing, so it might be re-added,
  1163. * and we want to know to recover this
  1164. * chunk.
  1165. * In this case the device is here,
  1166. * and the fact that this chunk is not
  1167. * in-sync is recorded in the bad
  1168. * block log
  1169. */
  1170. continue;
  1171. }
  1172. if (is_bad) {
  1173. int good_sectors = first_bad - r1_bio->sector;
  1174. if (good_sectors < max_sectors)
  1175. max_sectors = good_sectors;
  1176. }
  1177. }
  1178. r1_bio->bios[i] = bio;
  1179. }
  1180. rcu_read_unlock();
  1181. if (unlikely(blocked_rdev)) {
  1182. /* Wait for this device to become unblocked */
  1183. int j;
  1184. sector_t old = start_next_window;
  1185. for (j = 0; j < i; j++)
  1186. if (r1_bio->bios[j])
  1187. rdev_dec_pending(conf->mirrors[j].rdev, mddev);
  1188. r1_bio->state = 0;
  1189. allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
  1190. md_wait_for_blocked_rdev(blocked_rdev, mddev);
  1191. start_next_window = wait_barrier(conf, bio);
  1192. /*
  1193. * We must make sure the multi r1bios of bio have
  1194. * the same value of bi_phys_segments
  1195. */
  1196. if (bio->bi_phys_segments && old &&
  1197. old != start_next_window)
  1198. /* Wait for the former r1bio(s) to complete */
  1199. wait_event(conf->wait_barrier,
  1200. bio->bi_phys_segments == 1);
  1201. goto retry_write;
  1202. }
  1203. if (max_sectors < r1_bio->sectors) {
  1204. /* We are splitting this write into multiple parts, so
  1205. * we need to prepare for allocating another r1_bio.
  1206. */
  1207. r1_bio->sectors = max_sectors;
  1208. spin_lock_irq(&conf->device_lock);
  1209. if (bio->bi_phys_segments == 0)
  1210. bio->bi_phys_segments = 2;
  1211. else
  1212. bio->bi_phys_segments++;
  1213. spin_unlock_irq(&conf->device_lock);
  1214. }
  1215. sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
  1216. atomic_set(&r1_bio->remaining, 1);
  1217. atomic_set(&r1_bio->behind_remaining, 0);
  1218. first_clone = 1;
  1219. for (i = 0; i < disks; i++) {
  1220. struct bio *mbio;
  1221. if (!r1_bio->bios[i])
  1222. continue;
  1223. mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
  1224. bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
  1225. if (first_clone) {
  1226. /* do behind I/O ?
  1227. * Not if there are too many, or cannot
  1228. * allocate memory, or a reader on WriteMostly
  1229. * is waiting for behind writes to flush */
  1230. if (bitmap &&
  1231. (atomic_read(&bitmap->behind_writes)
  1232. < mddev->bitmap_info.max_write_behind) &&
  1233. !waitqueue_active(&bitmap->behind_wait))
  1234. alloc_behind_pages(mbio, r1_bio);
  1235. bitmap_startwrite(bitmap, r1_bio->sector,
  1236. r1_bio->sectors,
  1237. test_bit(R1BIO_BehindIO,
  1238. &r1_bio->state));
  1239. first_clone = 0;
  1240. }
  1241. if (r1_bio->behind_bvecs) {
  1242. struct bio_vec *bvec;
  1243. int j;
  1244. /*
  1245. * We trimmed the bio, so _all is legit
  1246. */
  1247. bio_for_each_segment_all(bvec, mbio, j)
  1248. bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
  1249. if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
  1250. atomic_inc(&r1_bio->behind_remaining);
  1251. }
  1252. r1_bio->bios[i] = mbio;
  1253. mbio->bi_iter.bi_sector = (r1_bio->sector +
  1254. conf->mirrors[i].rdev->data_offset);
  1255. mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
  1256. mbio->bi_end_io = raid1_end_write_request;
  1257. mbio->bi_rw =
  1258. WRITE | do_flush_fua | do_sync | do_discard | do_same;
  1259. mbio->bi_private = r1_bio;
  1260. atomic_inc(&r1_bio->remaining);
  1261. cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
  1262. if (cb)
  1263. plug = container_of(cb, struct raid1_plug_cb, cb);
  1264. else
  1265. plug = NULL;
  1266. spin_lock_irqsave(&conf->device_lock, flags);
  1267. if (plug) {
  1268. bio_list_add(&plug->pending, mbio);
  1269. plug->pending_cnt++;
  1270. } else {
  1271. bio_list_add(&conf->pending_bio_list, mbio);
  1272. conf->pending_count++;
  1273. }
  1274. spin_unlock_irqrestore(&conf->device_lock, flags);
  1275. if (!plug)
  1276. md_wakeup_thread(mddev->thread);
  1277. }
  1278. /* Mustn't call r1_bio_write_done before this next test,
  1279. * as it could result in the bio being freed.
  1280. */
  1281. if (sectors_handled < bio_sectors(bio)) {
  1282. r1_bio_write_done(r1_bio);
  1283. /* We need another r1_bio. It has already been counted
  1284. * in bio->bi_phys_segments
  1285. */
  1286. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  1287. r1_bio->master_bio = bio;
  1288. r1_bio->sectors = bio_sectors(bio) - sectors_handled;
  1289. r1_bio->state = 0;
  1290. r1_bio->mddev = mddev;
  1291. r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
  1292. goto retry_write;
  1293. }
  1294. r1_bio_write_done(r1_bio);
  1295. /* In case raid1d snuck in to freeze_array */
  1296. wake_up(&conf->wait_barrier);
  1297. }
  1298. static void status(struct seq_file *seq, struct mddev *mddev)
  1299. {
  1300. struct r1conf *conf = mddev->private;
  1301. int i;
  1302. seq_printf(seq, " [%d/%d] [", conf->raid_disks,
  1303. conf->raid_disks - mddev->degraded);
  1304. rcu_read_lock();
  1305. for (i = 0; i < conf->raid_disks; i++) {
  1306. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1307. seq_printf(seq, "%s",
  1308. rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
  1309. }
  1310. rcu_read_unlock();
  1311. seq_printf(seq, "]");
  1312. }
  1313. static void error(struct mddev *mddev, struct md_rdev *rdev)
  1314. {
  1315. char b[BDEVNAME_SIZE];
  1316. struct r1conf *conf = mddev->private;
  1317. /*
  1318. * If it is not operational, then we have already marked it as dead
  1319. * else if it is the last working disks, ignore the error, let the
  1320. * next level up know.
  1321. * else mark the drive as failed
  1322. */
  1323. if (test_bit(In_sync, &rdev->flags)
  1324. && (conf->raid_disks - mddev->degraded) == 1) {
  1325. /*
  1326. * Don't fail the drive, act as though we were just a
  1327. * normal single drive.
  1328. * However don't try a recovery from this drive as
  1329. * it is very likely to fail.
  1330. */
  1331. conf->recovery_disabled = mddev->recovery_disabled;
  1332. return;
  1333. }
  1334. set_bit(Blocked, &rdev->flags);
  1335. if (test_and_clear_bit(In_sync, &rdev->flags)) {
  1336. unsigned long flags;
  1337. spin_lock_irqsave(&conf->device_lock, flags);
  1338. mddev->degraded++;
  1339. set_bit(Faulty, &rdev->flags);
  1340. spin_unlock_irqrestore(&conf->device_lock, flags);
  1341. } else
  1342. set_bit(Faulty, &rdev->flags);
  1343. /*
  1344. * if recovery is running, make sure it aborts.
  1345. */
  1346. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1347. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  1348. printk(KERN_ALERT
  1349. "md/raid1:%s: Disk failure on %s, disabling device.\n"
  1350. "md/raid1:%s: Operation continuing on %d devices.\n",
  1351. mdname(mddev), bdevname(rdev->bdev, b),
  1352. mdname(mddev), conf->raid_disks - mddev->degraded);
  1353. }
  1354. static void print_conf(struct r1conf *conf)
  1355. {
  1356. int i;
  1357. printk(KERN_DEBUG "RAID1 conf printout:\n");
  1358. if (!conf) {
  1359. printk(KERN_DEBUG "(!conf)\n");
  1360. return;
  1361. }
  1362. printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
  1363. conf->raid_disks);
  1364. rcu_read_lock();
  1365. for (i = 0; i < conf->raid_disks; i++) {
  1366. char b[BDEVNAME_SIZE];
  1367. struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
  1368. if (rdev)
  1369. printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
  1370. i, !test_bit(In_sync, &rdev->flags),
  1371. !test_bit(Faulty, &rdev->flags),
  1372. bdevname(rdev->bdev,b));
  1373. }
  1374. rcu_read_unlock();
  1375. }
  1376. static void close_sync(struct r1conf *conf)
  1377. {
  1378. wait_barrier(conf, NULL);
  1379. allow_barrier(conf, 0, 0);
  1380. mempool_destroy(conf->r1buf_pool);
  1381. conf->r1buf_pool = NULL;
  1382. spin_lock_irq(&conf->resync_lock);
  1383. conf->next_resync = 0;
  1384. conf->start_next_window = MaxSector;
  1385. conf->current_window_requests +=
  1386. conf->next_window_requests;
  1387. conf->next_window_requests = 0;
  1388. spin_unlock_irq(&conf->resync_lock);
  1389. }
  1390. static int raid1_spare_active(struct mddev *mddev)
  1391. {
  1392. int i;
  1393. struct r1conf *conf = mddev->private;
  1394. int count = 0;
  1395. unsigned long flags;
  1396. /*
  1397. * Find all failed disks within the RAID1 configuration
  1398. * and mark them readable.
  1399. * Called under mddev lock, so rcu protection not needed.
  1400. */
  1401. for (i = 0; i < conf->raid_disks; i++) {
  1402. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1403. struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
  1404. if (repl
  1405. && repl->recovery_offset == MaxSector
  1406. && !test_bit(Faulty, &repl->flags)
  1407. && !test_and_set_bit(In_sync, &repl->flags)) {
  1408. /* replacement has just become active */
  1409. if (!rdev ||
  1410. !test_and_clear_bit(In_sync, &rdev->flags))
  1411. count++;
  1412. if (rdev) {
  1413. /* Replaced device not technically
  1414. * faulty, but we need to be sure
  1415. * it gets removed and never re-added
  1416. */
  1417. set_bit(Faulty, &rdev->flags);
  1418. sysfs_notify_dirent_safe(
  1419. rdev->sysfs_state);
  1420. }
  1421. }
  1422. if (rdev
  1423. && rdev->recovery_offset == MaxSector
  1424. && !test_bit(Faulty, &rdev->flags)
  1425. && !test_and_set_bit(In_sync, &rdev->flags)) {
  1426. count++;
  1427. sysfs_notify_dirent_safe(rdev->sysfs_state);
  1428. }
  1429. }
  1430. spin_lock_irqsave(&conf->device_lock, flags);
  1431. mddev->degraded -= count;
  1432. spin_unlock_irqrestore(&conf->device_lock, flags);
  1433. print_conf(conf);
  1434. return count;
  1435. }
  1436. static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
  1437. {
  1438. struct r1conf *conf = mddev->private;
  1439. int err = -EEXIST;
  1440. int mirror = 0;
  1441. struct raid1_info *p;
  1442. int first = 0;
  1443. int last = conf->raid_disks - 1;
  1444. struct request_queue *q = bdev_get_queue(rdev->bdev);
  1445. if (mddev->recovery_disabled == conf->recovery_disabled)
  1446. return -EBUSY;
  1447. if (rdev->raid_disk >= 0)
  1448. first = last = rdev->raid_disk;
  1449. if (q->merge_bvec_fn) {
  1450. set_bit(Unmerged, &rdev->flags);
  1451. mddev->merge_check_needed = 1;
  1452. }
  1453. for (mirror = first; mirror <= last; mirror++) {
  1454. p = conf->mirrors+mirror;
  1455. if (!p->rdev) {
  1456. if (mddev->gendisk)
  1457. disk_stack_limits(mddev->gendisk, rdev->bdev,
  1458. rdev->data_offset << 9);
  1459. p->head_position = 0;
  1460. rdev->raid_disk = mirror;
  1461. err = 0;
  1462. /* As all devices are equivalent, we don't need a full recovery
  1463. * if this was recently any drive of the array
  1464. */
  1465. if (rdev->saved_raid_disk < 0)
  1466. conf->fullsync = 1;
  1467. rcu_assign_pointer(p->rdev, rdev);
  1468. break;
  1469. }
  1470. if (test_bit(WantReplacement, &p->rdev->flags) &&
  1471. p[conf->raid_disks].rdev == NULL) {
  1472. /* Add this device as a replacement */
  1473. clear_bit(In_sync, &rdev->flags);
  1474. set_bit(Replacement, &rdev->flags);
  1475. rdev->raid_disk = mirror;
  1476. err = 0;
  1477. conf->fullsync = 1;
  1478. rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
  1479. break;
  1480. }
  1481. }
  1482. if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
  1483. /* Some requests might not have seen this new
  1484. * merge_bvec_fn. We must wait for them to complete
  1485. * before merging the device fully.
  1486. * First we make sure any code which has tested
  1487. * our function has submitted the request, then
  1488. * we wait for all outstanding requests to complete.
  1489. */
  1490. synchronize_sched();
  1491. freeze_array(conf, 0);
  1492. unfreeze_array(conf);
  1493. clear_bit(Unmerged, &rdev->flags);
  1494. }
  1495. md_integrity_add_rdev(rdev, mddev);
  1496. if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
  1497. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
  1498. print_conf(conf);
  1499. return err;
  1500. }
  1501. static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
  1502. {
  1503. struct r1conf *conf = mddev->private;
  1504. int err = 0;
  1505. int number = rdev->raid_disk;
  1506. struct raid1_info *p = conf->mirrors + number;
  1507. if (rdev != p->rdev)
  1508. p = conf->mirrors + conf->raid_disks + number;
  1509. print_conf(conf);
  1510. if (rdev == p->rdev) {
  1511. if (test_bit(In_sync, &rdev->flags) ||
  1512. atomic_read(&rdev->nr_pending)) {
  1513. err = -EBUSY;
  1514. goto abort;
  1515. }
  1516. /* Only remove non-faulty devices if recovery
  1517. * is not possible.
  1518. */
  1519. if (!test_bit(Faulty, &rdev->flags) &&
  1520. mddev->recovery_disabled != conf->recovery_disabled &&
  1521. mddev->degraded < conf->raid_disks) {
  1522. err = -EBUSY;
  1523. goto abort;
  1524. }
  1525. p->rdev = NULL;
  1526. synchronize_rcu();
  1527. if (atomic_read(&rdev->nr_pending)) {
  1528. /* lost the race, try later */
  1529. err = -EBUSY;
  1530. p->rdev = rdev;
  1531. goto abort;
  1532. } else if (conf->mirrors[conf->raid_disks + number].rdev) {
  1533. /* We just removed a device that is being replaced.
  1534. * Move down the replacement. We drain all IO before
  1535. * doing this to avoid confusion.
  1536. */
  1537. struct md_rdev *repl =
  1538. conf->mirrors[conf->raid_disks + number].rdev;
  1539. freeze_array(conf, 0);
  1540. clear_bit(Replacement, &repl->flags);
  1541. p->rdev = repl;
  1542. conf->mirrors[conf->raid_disks + number].rdev = NULL;
  1543. unfreeze_array(conf);
  1544. clear_bit(WantReplacement, &rdev->flags);
  1545. } else
  1546. clear_bit(WantReplacement, &rdev->flags);
  1547. err = md_integrity_register(mddev);
  1548. }
  1549. abort:
  1550. print_conf(conf);
  1551. return err;
  1552. }
  1553. static void end_sync_read(struct bio *bio, int error)
  1554. {
  1555. struct r1bio *r1_bio = bio->bi_private;
  1556. update_head_pos(r1_bio->read_disk, r1_bio);
  1557. /*
  1558. * we have read a block, now it needs to be re-written,
  1559. * or re-read if the read failed.
  1560. * We don't do much here, just schedule handling by raid1d
  1561. */
  1562. if (test_bit(BIO_UPTODATE, &bio->bi_flags))
  1563. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1564. if (atomic_dec_and_test(&r1_bio->remaining))
  1565. reschedule_retry(r1_bio);
  1566. }
  1567. static void end_sync_write(struct bio *bio, int error)
  1568. {
  1569. int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  1570. struct r1bio *r1_bio = bio->bi_private;
  1571. struct mddev *mddev = r1_bio->mddev;
  1572. struct r1conf *conf = mddev->private;
  1573. int mirror=0;
  1574. sector_t first_bad;
  1575. int bad_sectors;
  1576. mirror = find_bio_disk(r1_bio, bio);
  1577. if (!uptodate) {
  1578. sector_t sync_blocks = 0;
  1579. sector_t s = r1_bio->sector;
  1580. long sectors_to_go = r1_bio->sectors;
  1581. /* make sure these bits doesn't get cleared. */
  1582. do {
  1583. bitmap_end_sync(mddev->bitmap, s,
  1584. &sync_blocks, 1);
  1585. s += sync_blocks;
  1586. sectors_to_go -= sync_blocks;
  1587. } while (sectors_to_go > 0);
  1588. set_bit(WriteErrorSeen,
  1589. &conf->mirrors[mirror].rdev->flags);
  1590. if (!test_and_set_bit(WantReplacement,
  1591. &conf->mirrors[mirror].rdev->flags))
  1592. set_bit(MD_RECOVERY_NEEDED, &
  1593. mddev->recovery);
  1594. set_bit(R1BIO_WriteError, &r1_bio->state);
  1595. } else if (is_badblock(conf->mirrors[mirror].rdev,
  1596. r1_bio->sector,
  1597. r1_bio->sectors,
  1598. &first_bad, &bad_sectors) &&
  1599. !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
  1600. r1_bio->sector,
  1601. r1_bio->sectors,
  1602. &first_bad, &bad_sectors)
  1603. )
  1604. set_bit(R1BIO_MadeGood, &r1_bio->state);
  1605. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1606. int s = r1_bio->sectors;
  1607. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1608. test_bit(R1BIO_WriteError, &r1_bio->state))
  1609. reschedule_retry(r1_bio);
  1610. else {
  1611. put_buf(r1_bio);
  1612. md_done_sync(mddev, s, uptodate);
  1613. }
  1614. }
  1615. }
  1616. static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
  1617. int sectors, struct page *page, int rw)
  1618. {
  1619. if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
  1620. /* success */
  1621. return 1;
  1622. if (rw == WRITE) {
  1623. set_bit(WriteErrorSeen, &rdev->flags);
  1624. if (!test_and_set_bit(WantReplacement,
  1625. &rdev->flags))
  1626. set_bit(MD_RECOVERY_NEEDED, &
  1627. rdev->mddev->recovery);
  1628. }
  1629. /* need to record an error - either for the block or the device */
  1630. if (!rdev_set_badblocks(rdev, sector, sectors, 0))
  1631. md_error(rdev->mddev, rdev);
  1632. return 0;
  1633. }
  1634. static int fix_sync_read_error(struct r1bio *r1_bio)
  1635. {
  1636. /* Try some synchronous reads of other devices to get
  1637. * good data, much like with normal read errors. Only
  1638. * read into the pages we already have so we don't
  1639. * need to re-issue the read request.
  1640. * We don't need to freeze the array, because being in an
  1641. * active sync request, there is no normal IO, and
  1642. * no overlapping syncs.
  1643. * We don't need to check is_badblock() again as we
  1644. * made sure that anything with a bad block in range
  1645. * will have bi_end_io clear.
  1646. */
  1647. struct mddev *mddev = r1_bio->mddev;
  1648. struct r1conf *conf = mddev->private;
  1649. struct bio *bio = r1_bio->bios[r1_bio->read_disk];
  1650. sector_t sect = r1_bio->sector;
  1651. int sectors = r1_bio->sectors;
  1652. int idx = 0;
  1653. while(sectors) {
  1654. int s = sectors;
  1655. int d = r1_bio->read_disk;
  1656. int success = 0;
  1657. struct md_rdev *rdev;
  1658. int start;
  1659. if (s > (PAGE_SIZE>>9))
  1660. s = PAGE_SIZE >> 9;
  1661. do {
  1662. if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
  1663. /* No rcu protection needed here devices
  1664. * can only be removed when no resync is
  1665. * active, and resync is currently active
  1666. */
  1667. rdev = conf->mirrors[d].rdev;
  1668. if (sync_page_io(rdev, sect, s<<9,
  1669. bio->bi_io_vec[idx].bv_page,
  1670. READ, false)) {
  1671. success = 1;
  1672. break;
  1673. }
  1674. }
  1675. d++;
  1676. if (d == conf->raid_disks * 2)
  1677. d = 0;
  1678. } while (!success && d != r1_bio->read_disk);
  1679. if (!success) {
  1680. char b[BDEVNAME_SIZE];
  1681. int abort = 0;
  1682. /* Cannot read from anywhere, this block is lost.
  1683. * Record a bad block on each device. If that doesn't
  1684. * work just disable and interrupt the recovery.
  1685. * Don't fail devices as that won't really help.
  1686. */
  1687. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error"
  1688. " for block %llu\n",
  1689. mdname(mddev),
  1690. bdevname(bio->bi_bdev, b),
  1691. (unsigned long long)r1_bio->sector);
  1692. for (d = 0; d < conf->raid_disks * 2; d++) {
  1693. rdev = conf->mirrors[d].rdev;
  1694. if (!rdev || test_bit(Faulty, &rdev->flags))
  1695. continue;
  1696. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1697. abort = 1;
  1698. }
  1699. if (abort) {
  1700. conf->recovery_disabled =
  1701. mddev->recovery_disabled;
  1702. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  1703. md_done_sync(mddev, r1_bio->sectors, 0);
  1704. put_buf(r1_bio);
  1705. return 0;
  1706. }
  1707. /* Try next page */
  1708. sectors -= s;
  1709. sect += s;
  1710. idx++;
  1711. continue;
  1712. }
  1713. start = d;
  1714. /* write it back and re-read */
  1715. while (d != r1_bio->read_disk) {
  1716. if (d == 0)
  1717. d = conf->raid_disks * 2;
  1718. d--;
  1719. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1720. continue;
  1721. rdev = conf->mirrors[d].rdev;
  1722. if (r1_sync_page_io(rdev, sect, s,
  1723. bio->bi_io_vec[idx].bv_page,
  1724. WRITE) == 0) {
  1725. r1_bio->bios[d]->bi_end_io = NULL;
  1726. rdev_dec_pending(rdev, mddev);
  1727. }
  1728. }
  1729. d = start;
  1730. while (d != r1_bio->read_disk) {
  1731. if (d == 0)
  1732. d = conf->raid_disks * 2;
  1733. d--;
  1734. if (r1_bio->bios[d]->bi_end_io != end_sync_read)
  1735. continue;
  1736. rdev = conf->mirrors[d].rdev;
  1737. if (r1_sync_page_io(rdev, sect, s,
  1738. bio->bi_io_vec[idx].bv_page,
  1739. READ) != 0)
  1740. atomic_add(s, &rdev->corrected_errors);
  1741. }
  1742. sectors -= s;
  1743. sect += s;
  1744. idx ++;
  1745. }
  1746. set_bit(R1BIO_Uptodate, &r1_bio->state);
  1747. set_bit(BIO_UPTODATE, &bio->bi_flags);
  1748. return 1;
  1749. }
  1750. static void process_checks(struct r1bio *r1_bio)
  1751. {
  1752. /* We have read all readable devices. If we haven't
  1753. * got the block, then there is no hope left.
  1754. * If we have, then we want to do a comparison
  1755. * and skip the write if everything is the same.
  1756. * If any blocks failed to read, then we need to
  1757. * attempt an over-write
  1758. */
  1759. struct mddev *mddev = r1_bio->mddev;
  1760. struct r1conf *conf = mddev->private;
  1761. int primary;
  1762. int i;
  1763. int vcnt;
  1764. /* Fix variable parts of all bios */
  1765. vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
  1766. for (i = 0; i < conf->raid_disks * 2; i++) {
  1767. int j;
  1768. int size;
  1769. int uptodate;
  1770. struct bio *b = r1_bio->bios[i];
  1771. if (b->bi_end_io != end_sync_read)
  1772. continue;
  1773. /* fixup the bio for reuse, but preserve BIO_UPTODATE */
  1774. uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
  1775. bio_reset(b);
  1776. if (!uptodate)
  1777. clear_bit(BIO_UPTODATE, &b->bi_flags);
  1778. b->bi_vcnt = vcnt;
  1779. b->bi_iter.bi_size = r1_bio->sectors << 9;
  1780. b->bi_iter.bi_sector = r1_bio->sector +
  1781. conf->mirrors[i].rdev->data_offset;
  1782. b->bi_bdev = conf->mirrors[i].rdev->bdev;
  1783. b->bi_end_io = end_sync_read;
  1784. b->bi_private = r1_bio;
  1785. size = b->bi_iter.bi_size;
  1786. for (j = 0; j < vcnt ; j++) {
  1787. struct bio_vec *bi;
  1788. bi = &b->bi_io_vec[j];
  1789. bi->bv_offset = 0;
  1790. if (size > PAGE_SIZE)
  1791. bi->bv_len = PAGE_SIZE;
  1792. else
  1793. bi->bv_len = size;
  1794. size -= PAGE_SIZE;
  1795. }
  1796. }
  1797. for (primary = 0; primary < conf->raid_disks * 2; primary++)
  1798. if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
  1799. test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
  1800. r1_bio->bios[primary]->bi_end_io = NULL;
  1801. rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
  1802. break;
  1803. }
  1804. r1_bio->read_disk = primary;
  1805. for (i = 0; i < conf->raid_disks * 2; i++) {
  1806. int j;
  1807. struct bio *pbio = r1_bio->bios[primary];
  1808. struct bio *sbio = r1_bio->bios[i];
  1809. int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
  1810. if (sbio->bi_end_io != end_sync_read)
  1811. continue;
  1812. /* Now we can 'fixup' the BIO_UPTODATE flag */
  1813. set_bit(BIO_UPTODATE, &sbio->bi_flags);
  1814. if (uptodate) {
  1815. for (j = vcnt; j-- ; ) {
  1816. struct page *p, *s;
  1817. p = pbio->bi_io_vec[j].bv_page;
  1818. s = sbio->bi_io_vec[j].bv_page;
  1819. if (memcmp(page_address(p),
  1820. page_address(s),
  1821. sbio->bi_io_vec[j].bv_len))
  1822. break;
  1823. }
  1824. } else
  1825. j = 0;
  1826. if (j >= 0)
  1827. atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
  1828. if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
  1829. && uptodate)) {
  1830. /* No need to write to this device. */
  1831. sbio->bi_end_io = NULL;
  1832. rdev_dec_pending(conf->mirrors[i].rdev, mddev);
  1833. continue;
  1834. }
  1835. bio_copy_data(sbio, pbio);
  1836. }
  1837. }
  1838. static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
  1839. {
  1840. struct r1conf *conf = mddev->private;
  1841. int i;
  1842. int disks = conf->raid_disks * 2;
  1843. struct bio *bio, *wbio;
  1844. bio = r1_bio->bios[r1_bio->read_disk];
  1845. if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
  1846. /* ouch - failed to read all of that. */
  1847. if (!fix_sync_read_error(r1_bio))
  1848. return;
  1849. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  1850. process_checks(r1_bio);
  1851. /*
  1852. * schedule writes
  1853. */
  1854. atomic_set(&r1_bio->remaining, 1);
  1855. for (i = 0; i < disks ; i++) {
  1856. wbio = r1_bio->bios[i];
  1857. if (wbio->bi_end_io == NULL ||
  1858. (wbio->bi_end_io == end_sync_read &&
  1859. (i == r1_bio->read_disk ||
  1860. !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
  1861. continue;
  1862. wbio->bi_rw = WRITE;
  1863. wbio->bi_end_io = end_sync_write;
  1864. atomic_inc(&r1_bio->remaining);
  1865. md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
  1866. generic_make_request(wbio);
  1867. }
  1868. if (atomic_dec_and_test(&r1_bio->remaining)) {
  1869. /* if we're here, all write(s) have completed, so clean up */
  1870. int s = r1_bio->sectors;
  1871. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  1872. test_bit(R1BIO_WriteError, &r1_bio->state))
  1873. reschedule_retry(r1_bio);
  1874. else {
  1875. put_buf(r1_bio);
  1876. md_done_sync(mddev, s, 1);
  1877. }
  1878. }
  1879. }
  1880. /*
  1881. * This is a kernel thread which:
  1882. *
  1883. * 1. Retries failed read operations on working mirrors.
  1884. * 2. Updates the raid superblock when problems encounter.
  1885. * 3. Performs writes following reads for array synchronising.
  1886. */
  1887. static void fix_read_error(struct r1conf *conf, int read_disk,
  1888. sector_t sect, int sectors)
  1889. {
  1890. struct mddev *mddev = conf->mddev;
  1891. while(sectors) {
  1892. int s = sectors;
  1893. int d = read_disk;
  1894. int success = 0;
  1895. int start;
  1896. struct md_rdev *rdev;
  1897. if (s > (PAGE_SIZE>>9))
  1898. s = PAGE_SIZE >> 9;
  1899. do {
  1900. /* Note: no rcu protection needed here
  1901. * as this is synchronous in the raid1d thread
  1902. * which is the thread that might remove
  1903. * a device. If raid1d ever becomes multi-threaded....
  1904. */
  1905. sector_t first_bad;
  1906. int bad_sectors;
  1907. rdev = conf->mirrors[d].rdev;
  1908. if (rdev &&
  1909. (test_bit(In_sync, &rdev->flags) ||
  1910. (!test_bit(Faulty, &rdev->flags) &&
  1911. rdev->recovery_offset >= sect + s)) &&
  1912. is_badblock(rdev, sect, s,
  1913. &first_bad, &bad_sectors) == 0 &&
  1914. sync_page_io(rdev, sect, s<<9,
  1915. conf->tmppage, READ, false))
  1916. success = 1;
  1917. else {
  1918. d++;
  1919. if (d == conf->raid_disks * 2)
  1920. d = 0;
  1921. }
  1922. } while (!success && d != read_disk);
  1923. if (!success) {
  1924. /* Cannot read from anywhere - mark it bad */
  1925. struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
  1926. if (!rdev_set_badblocks(rdev, sect, s, 0))
  1927. md_error(mddev, rdev);
  1928. break;
  1929. }
  1930. /* write it back and re-read */
  1931. start = d;
  1932. while (d != read_disk) {
  1933. if (d==0)
  1934. d = conf->raid_disks * 2;
  1935. d--;
  1936. rdev = conf->mirrors[d].rdev;
  1937. if (rdev &&
  1938. !test_bit(Faulty, &rdev->flags))
  1939. r1_sync_page_io(rdev, sect, s,
  1940. conf->tmppage, WRITE);
  1941. }
  1942. d = start;
  1943. while (d != read_disk) {
  1944. char b[BDEVNAME_SIZE];
  1945. if (d==0)
  1946. d = conf->raid_disks * 2;
  1947. d--;
  1948. rdev = conf->mirrors[d].rdev;
  1949. if (rdev &&
  1950. !test_bit(Faulty, &rdev->flags)) {
  1951. if (r1_sync_page_io(rdev, sect, s,
  1952. conf->tmppage, READ)) {
  1953. atomic_add(s, &rdev->corrected_errors);
  1954. printk(KERN_INFO
  1955. "md/raid1:%s: read error corrected "
  1956. "(%d sectors at %llu on %s)\n",
  1957. mdname(mddev), s,
  1958. (unsigned long long)(sect +
  1959. rdev->data_offset),
  1960. bdevname(rdev->bdev, b));
  1961. }
  1962. }
  1963. }
  1964. sectors -= s;
  1965. sect += s;
  1966. }
  1967. }
  1968. static int narrow_write_error(struct r1bio *r1_bio, int i)
  1969. {
  1970. struct mddev *mddev = r1_bio->mddev;
  1971. struct r1conf *conf = mddev->private;
  1972. struct md_rdev *rdev = conf->mirrors[i].rdev;
  1973. /* bio has the data to be written to device 'i' where
  1974. * we just recently had a write error.
  1975. * We repeatedly clone the bio and trim down to one block,
  1976. * then try the write. Where the write fails we record
  1977. * a bad block.
  1978. * It is conceivable that the bio doesn't exactly align with
  1979. * blocks. We must handle this somehow.
  1980. *
  1981. * We currently own a reference on the rdev.
  1982. */
  1983. int block_sectors;
  1984. sector_t sector;
  1985. int sectors;
  1986. int sect_to_write = r1_bio->sectors;
  1987. int ok = 1;
  1988. if (rdev->badblocks.shift < 0)
  1989. return 0;
  1990. block_sectors = 1 << rdev->badblocks.shift;
  1991. sector = r1_bio->sector;
  1992. sectors = ((sector + block_sectors)
  1993. & ~(sector_t)(block_sectors - 1))
  1994. - sector;
  1995. while (sect_to_write) {
  1996. struct bio *wbio;
  1997. if (sectors > sect_to_write)
  1998. sectors = sect_to_write;
  1999. /* Write at 'sector' for 'sectors'*/
  2000. if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
  2001. unsigned vcnt = r1_bio->behind_page_count;
  2002. struct bio_vec *vec = r1_bio->behind_bvecs;
  2003. while (!vec->bv_page) {
  2004. vec++;
  2005. vcnt--;
  2006. }
  2007. wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
  2008. memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
  2009. wbio->bi_vcnt = vcnt;
  2010. } else {
  2011. wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  2012. }
  2013. wbio->bi_rw = WRITE;
  2014. wbio->bi_iter.bi_sector = r1_bio->sector;
  2015. wbio->bi_iter.bi_size = r1_bio->sectors << 9;
  2016. bio_trim(wbio, sector - r1_bio->sector, sectors);
  2017. wbio->bi_iter.bi_sector += rdev->data_offset;
  2018. wbio->bi_bdev = rdev->bdev;
  2019. if (submit_bio_wait(WRITE, wbio) == 0)
  2020. /* failure! */
  2021. ok = rdev_set_badblocks(rdev, sector,
  2022. sectors, 0)
  2023. && ok;
  2024. bio_put(wbio);
  2025. sect_to_write -= sectors;
  2026. sector += sectors;
  2027. sectors = block_sectors;
  2028. }
  2029. return ok;
  2030. }
  2031. static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  2032. {
  2033. int m;
  2034. int s = r1_bio->sectors;
  2035. for (m = 0; m < conf->raid_disks * 2 ; m++) {
  2036. struct md_rdev *rdev = conf->mirrors[m].rdev;
  2037. struct bio *bio = r1_bio->bios[m];
  2038. if (bio->bi_end_io == NULL)
  2039. continue;
  2040. if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  2041. test_bit(R1BIO_MadeGood, &r1_bio->state)) {
  2042. rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
  2043. }
  2044. if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
  2045. test_bit(R1BIO_WriteError, &r1_bio->state)) {
  2046. if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
  2047. md_error(conf->mddev, rdev);
  2048. }
  2049. }
  2050. put_buf(r1_bio);
  2051. md_done_sync(conf->mddev, s, 1);
  2052. }
  2053. static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
  2054. {
  2055. int m;
  2056. for (m = 0; m < conf->raid_disks * 2 ; m++)
  2057. if (r1_bio->bios[m] == IO_MADE_GOOD) {
  2058. struct md_rdev *rdev = conf->mirrors[m].rdev;
  2059. rdev_clear_badblocks(rdev,
  2060. r1_bio->sector,
  2061. r1_bio->sectors, 0);
  2062. rdev_dec_pending(rdev, conf->mddev);
  2063. } else if (r1_bio->bios[m] != NULL) {
  2064. /* This drive got a write error. We need to
  2065. * narrow down and record precise write
  2066. * errors.
  2067. */
  2068. if (!narrow_write_error(r1_bio, m)) {
  2069. md_error(conf->mddev,
  2070. conf->mirrors[m].rdev);
  2071. /* an I/O failed, we can't clear the bitmap */
  2072. set_bit(R1BIO_Degraded, &r1_bio->state);
  2073. }
  2074. rdev_dec_pending(conf->mirrors[m].rdev,
  2075. conf->mddev);
  2076. }
  2077. if (test_bit(R1BIO_WriteError, &r1_bio->state))
  2078. close_write(r1_bio);
  2079. raid_end_bio_io(r1_bio);
  2080. }
  2081. static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
  2082. {
  2083. int disk;
  2084. int max_sectors;
  2085. struct mddev *mddev = conf->mddev;
  2086. struct bio *bio;
  2087. char b[BDEVNAME_SIZE];
  2088. struct md_rdev *rdev;
  2089. clear_bit(R1BIO_ReadError, &r1_bio->state);
  2090. /* we got a read error. Maybe the drive is bad. Maybe just
  2091. * the block and we can fix it.
  2092. * We freeze all other IO, and try reading the block from
  2093. * other devices. When we find one, we re-write
  2094. * and check it that fixes the read error.
  2095. * This is all done synchronously while the array is
  2096. * frozen
  2097. */
  2098. if (mddev->ro == 0) {
  2099. freeze_array(conf, 1);
  2100. fix_read_error(conf, r1_bio->read_disk,
  2101. r1_bio->sector, r1_bio->sectors);
  2102. unfreeze_array(conf);
  2103. } else
  2104. md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
  2105. rdev_dec_pending(conf->mirrors[r1_bio->read_disk].rdev, conf->mddev);
  2106. bio = r1_bio->bios[r1_bio->read_disk];
  2107. bdevname(bio->bi_bdev, b);
  2108. read_more:
  2109. disk = read_balance(conf, r1_bio, &max_sectors);
  2110. if (disk == -1) {
  2111. printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O"
  2112. " read error for block %llu\n",
  2113. mdname(mddev), b, (unsigned long long)r1_bio->sector);
  2114. raid_end_bio_io(r1_bio);
  2115. } else {
  2116. const unsigned long do_sync
  2117. = r1_bio->master_bio->bi_rw & REQ_SYNC;
  2118. if (bio) {
  2119. r1_bio->bios[r1_bio->read_disk] =
  2120. mddev->ro ? IO_BLOCKED : NULL;
  2121. bio_put(bio);
  2122. }
  2123. r1_bio->read_disk = disk;
  2124. bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
  2125. bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
  2126. max_sectors);
  2127. r1_bio->bios[r1_bio->read_disk] = bio;
  2128. rdev = conf->mirrors[disk].rdev;
  2129. printk_ratelimited(KERN_ERR
  2130. "md/raid1:%s: redirecting sector %llu"
  2131. " to other mirror: %s\n",
  2132. mdname(mddev),
  2133. (unsigned long long)r1_bio->sector,
  2134. bdevname(rdev->bdev, b));
  2135. bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
  2136. bio->bi_bdev = rdev->bdev;
  2137. bio->bi_end_io = raid1_end_read_request;
  2138. bio->bi_rw = READ | do_sync;
  2139. bio->bi_private = r1_bio;
  2140. if (max_sectors < r1_bio->sectors) {
  2141. /* Drat - have to split this up more */
  2142. struct bio *mbio = r1_bio->master_bio;
  2143. int sectors_handled = (r1_bio->sector + max_sectors
  2144. - mbio->bi_iter.bi_sector);
  2145. r1_bio->sectors = max_sectors;
  2146. spin_lock_irq(&conf->device_lock);
  2147. if (mbio->bi_phys_segments == 0)
  2148. mbio->bi_phys_segments = 2;
  2149. else
  2150. mbio->bi_phys_segments++;
  2151. spin_unlock_irq(&conf->device_lock);
  2152. generic_make_request(bio);
  2153. bio = NULL;
  2154. r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
  2155. r1_bio->master_bio = mbio;
  2156. r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
  2157. r1_bio->state = 0;
  2158. set_bit(R1BIO_ReadError, &r1_bio->state);
  2159. r1_bio->mddev = mddev;
  2160. r1_bio->sector = mbio->bi_iter.bi_sector +
  2161. sectors_handled;
  2162. goto read_more;
  2163. } else
  2164. generic_make_request(bio);
  2165. }
  2166. }
  2167. static void raid1d(struct md_thread *thread)
  2168. {
  2169. struct mddev *mddev = thread->mddev;
  2170. struct r1bio *r1_bio;
  2171. unsigned long flags;
  2172. struct r1conf *conf = mddev->private;
  2173. struct list_head *head = &conf->retry_list;
  2174. struct blk_plug plug;
  2175. md_check_recovery(mddev);
  2176. blk_start_plug(&plug);
  2177. for (;;) {
  2178. flush_pending_writes(conf);
  2179. spin_lock_irqsave(&conf->device_lock, flags);
  2180. if (list_empty(head)) {
  2181. spin_unlock_irqrestore(&conf->device_lock, flags);
  2182. break;
  2183. }
  2184. r1_bio = list_entry(head->prev, struct r1bio, retry_list);
  2185. list_del(head->prev);
  2186. conf->nr_queued--;
  2187. spin_unlock_irqrestore(&conf->device_lock, flags);
  2188. mddev = r1_bio->mddev;
  2189. conf = mddev->private;
  2190. if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
  2191. if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  2192. test_bit(R1BIO_WriteError, &r1_bio->state))
  2193. handle_sync_write_finished(conf, r1_bio);
  2194. else
  2195. sync_request_write(mddev, r1_bio);
  2196. } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
  2197. test_bit(R1BIO_WriteError, &r1_bio->state))
  2198. handle_write_finished(conf, r1_bio);
  2199. else if (test_bit(R1BIO_ReadError, &r1_bio->state))
  2200. handle_read_error(conf, r1_bio);
  2201. else
  2202. /* just a partial read to be scheduled from separate
  2203. * context
  2204. */
  2205. generic_make_request(r1_bio->bios[r1_bio->read_disk]);
  2206. cond_resched();
  2207. if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
  2208. md_check_recovery(mddev);
  2209. }
  2210. blk_finish_plug(&plug);
  2211. }
  2212. static int init_resync(struct r1conf *conf)
  2213. {
  2214. int buffs;
  2215. buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
  2216. BUG_ON(conf->r1buf_pool);
  2217. conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
  2218. conf->poolinfo);
  2219. if (!conf->r1buf_pool)
  2220. return -ENOMEM;
  2221. conf->next_resync = 0;
  2222. return 0;
  2223. }
  2224. /*
  2225. * perform a "sync" on one "block"
  2226. *
  2227. * We need to make sure that no normal I/O request - particularly write
  2228. * requests - conflict with active sync requests.
  2229. *
  2230. * This is achieved by tracking pending requests and a 'barrier' concept
  2231. * that can be installed to exclude normal IO requests.
  2232. */
  2233. static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster)
  2234. {
  2235. struct r1conf *conf = mddev->private;
  2236. struct r1bio *r1_bio;
  2237. struct bio *bio;
  2238. sector_t max_sector, nr_sectors;
  2239. int disk = -1;
  2240. int i;
  2241. int wonly = -1;
  2242. int write_targets = 0, read_targets = 0;
  2243. sector_t sync_blocks;
  2244. int still_degraded = 0;
  2245. int good_sectors = RESYNC_SECTORS;
  2246. int min_bad = 0; /* number of sectors that are bad in all devices */
  2247. if (!conf->r1buf_pool)
  2248. if (init_resync(conf))
  2249. return 0;
  2250. max_sector = mddev->dev_sectors;
  2251. if (sector_nr >= max_sector) {
  2252. /* If we aborted, we need to abort the
  2253. * sync on the 'current' bitmap chunk (there will
  2254. * only be one in raid1 resync.
  2255. * We can find the current addess in mddev->curr_resync
  2256. */
  2257. if (mddev->curr_resync < max_sector) /* aborted */
  2258. bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
  2259. &sync_blocks, 1);
  2260. else /* completed sync */
  2261. conf->fullsync = 0;
  2262. bitmap_close_sync(mddev->bitmap);
  2263. close_sync(conf);
  2264. return 0;
  2265. }
  2266. if (mddev->bitmap == NULL &&
  2267. mddev->recovery_cp == MaxSector &&
  2268. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
  2269. conf->fullsync == 0) {
  2270. *skipped = 1;
  2271. return max_sector - sector_nr;
  2272. }
  2273. /* before building a request, check if we can skip these blocks..
  2274. * This call the bitmap_start_sync doesn't actually record anything
  2275. */
  2276. if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
  2277. !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2278. /* We can skip this block, and probably several more */
  2279. *skipped = 1;
  2280. return sync_blocks;
  2281. }
  2282. /*
  2283. * If there is non-resync activity waiting for a turn,
  2284. * and resync is going fast enough,
  2285. * then let it though before starting on this new sync request.
  2286. */
  2287. if (!go_faster && conf->nr_waiting)
  2288. msleep_interruptible(1000);
  2289. bitmap_cond_end_sync(mddev->bitmap, sector_nr);
  2290. r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
  2291. raise_barrier(conf, sector_nr);
  2292. rcu_read_lock();
  2293. /*
  2294. * If we get a correctably read error during resync or recovery,
  2295. * we might want to read from a different device. So we
  2296. * flag all drives that could conceivably be read from for READ,
  2297. * and any others (which will be non-In_sync devices) for WRITE.
  2298. * If a read fails, we try reading from something else for which READ
  2299. * is OK.
  2300. */
  2301. r1_bio->mddev = mddev;
  2302. r1_bio->sector = sector_nr;
  2303. r1_bio->state = 0;
  2304. set_bit(R1BIO_IsSync, &r1_bio->state);
  2305. for (i = 0; i < conf->raid_disks * 2; i++) {
  2306. struct md_rdev *rdev;
  2307. bio = r1_bio->bios[i];
  2308. bio_reset(bio);
  2309. rdev = rcu_dereference(conf->mirrors[i].rdev);
  2310. if (rdev == NULL ||
  2311. test_bit(Faulty, &rdev->flags)) {
  2312. if (i < conf->raid_disks)
  2313. still_degraded = 1;
  2314. } else if (!test_bit(In_sync, &rdev->flags)) {
  2315. bio->bi_rw = WRITE;
  2316. bio->bi_end_io = end_sync_write;
  2317. write_targets ++;
  2318. } else {
  2319. /* may need to read from here */
  2320. sector_t first_bad = MaxSector;
  2321. int bad_sectors;
  2322. if (is_badblock(rdev, sector_nr, good_sectors,
  2323. &first_bad, &bad_sectors)) {
  2324. if (first_bad > sector_nr)
  2325. good_sectors = first_bad - sector_nr;
  2326. else {
  2327. bad_sectors -= (sector_nr - first_bad);
  2328. if (min_bad == 0 ||
  2329. min_bad > bad_sectors)
  2330. min_bad = bad_sectors;
  2331. }
  2332. }
  2333. if (sector_nr < first_bad) {
  2334. if (test_bit(WriteMostly, &rdev->flags)) {
  2335. if (wonly < 0)
  2336. wonly = i;
  2337. } else {
  2338. if (disk < 0)
  2339. disk = i;
  2340. }
  2341. bio->bi_rw = READ;
  2342. bio->bi_end_io = end_sync_read;
  2343. read_targets++;
  2344. } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
  2345. test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  2346. !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  2347. /*
  2348. * The device is suitable for reading (InSync),
  2349. * but has bad block(s) here. Let's try to correct them,
  2350. * if we are doing resync or repair. Otherwise, leave
  2351. * this device alone for this sync request.
  2352. */
  2353. bio->bi_rw = WRITE;
  2354. bio->bi_end_io = end_sync_write;
  2355. write_targets++;
  2356. }
  2357. }
  2358. if (bio->bi_end_io) {
  2359. atomic_inc(&rdev->nr_pending);
  2360. bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
  2361. bio->bi_bdev = rdev->bdev;
  2362. bio->bi_private = r1_bio;
  2363. }
  2364. }
  2365. rcu_read_unlock();
  2366. if (disk < 0)
  2367. disk = wonly;
  2368. r1_bio->read_disk = disk;
  2369. if (read_targets == 0 && min_bad > 0) {
  2370. /* These sectors are bad on all InSync devices, so we
  2371. * need to mark them bad on all write targets
  2372. */
  2373. int ok = 1;
  2374. for (i = 0 ; i < conf->raid_disks * 2 ; i++)
  2375. if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
  2376. struct md_rdev *rdev = conf->mirrors[i].rdev;
  2377. ok = rdev_set_badblocks(rdev, sector_nr,
  2378. min_bad, 0
  2379. ) && ok;
  2380. }
  2381. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  2382. *skipped = 1;
  2383. put_buf(r1_bio);
  2384. if (!ok) {
  2385. /* Cannot record the badblocks, so need to
  2386. * abort the resync.
  2387. * If there are multiple read targets, could just
  2388. * fail the really bad ones ???
  2389. */
  2390. conf->recovery_disabled = mddev->recovery_disabled;
  2391. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  2392. return 0;
  2393. } else
  2394. return min_bad;
  2395. }
  2396. if (min_bad > 0 && min_bad < good_sectors) {
  2397. /* only resync enough to reach the next bad->good
  2398. * transition */
  2399. good_sectors = min_bad;
  2400. }
  2401. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
  2402. /* extra read targets are also write targets */
  2403. write_targets += read_targets-1;
  2404. if (write_targets == 0 || read_targets == 0) {
  2405. /* There is nowhere to write, so all non-sync
  2406. * drives must be failed - so we are finished
  2407. */
  2408. sector_t rv;
  2409. if (min_bad > 0)
  2410. max_sector = sector_nr + min_bad;
  2411. rv = max_sector - sector_nr;
  2412. *skipped = 1;
  2413. put_buf(r1_bio);
  2414. return rv;
  2415. }
  2416. if (max_sector > mddev->resync_max)
  2417. max_sector = mddev->resync_max; /* Don't do IO beyond here */
  2418. if (max_sector > sector_nr + good_sectors)
  2419. max_sector = sector_nr + good_sectors;
  2420. nr_sectors = 0;
  2421. sync_blocks = 0;
  2422. do {
  2423. struct page *page;
  2424. int len = PAGE_SIZE;
  2425. if (sector_nr + (len>>9) > max_sector)
  2426. len = (max_sector - sector_nr) << 9;
  2427. if (len == 0)
  2428. break;
  2429. if (sync_blocks == 0) {
  2430. if (!bitmap_start_sync(mddev->bitmap, sector_nr,
  2431. &sync_blocks, still_degraded) &&
  2432. !conf->fullsync &&
  2433. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  2434. break;
  2435. BUG_ON(sync_blocks < (PAGE_SIZE>>9));
  2436. if ((len >> 9) > sync_blocks)
  2437. len = sync_blocks<<9;
  2438. }
  2439. for (i = 0 ; i < conf->raid_disks * 2; i++) {
  2440. bio = r1_bio->bios[i];
  2441. if (bio->bi_end_io) {
  2442. page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
  2443. if (bio_add_page(bio, page, len, 0) == 0) {
  2444. /* stop here */
  2445. bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
  2446. while (i > 0) {
  2447. i--;
  2448. bio = r1_bio->bios[i];
  2449. if (bio->bi_end_io==NULL)
  2450. continue;
  2451. /* remove last page from this bio */
  2452. bio->bi_vcnt--;
  2453. bio->bi_iter.bi_size -= len;
  2454. __clear_bit(BIO_SEG_VALID, &bio->bi_flags);
  2455. }
  2456. goto bio_full;
  2457. }
  2458. }
  2459. }
  2460. nr_sectors += len>>9;
  2461. sector_nr += len>>9;
  2462. sync_blocks -= (len>>9);
  2463. } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
  2464. bio_full:
  2465. r1_bio->sectors = nr_sectors;
  2466. /* For a user-requested sync, we read all readable devices and do a
  2467. * compare
  2468. */
  2469. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  2470. atomic_set(&r1_bio->remaining, read_targets);
  2471. for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
  2472. bio = r1_bio->bios[i];
  2473. if (bio->bi_end_io == end_sync_read) {
  2474. read_targets--;
  2475. md_sync_acct(bio->bi_bdev, nr_sectors);
  2476. generic_make_request(bio);
  2477. }
  2478. }
  2479. } else {
  2480. atomic_set(&r1_bio->remaining, 1);
  2481. bio = r1_bio->bios[r1_bio->read_disk];
  2482. md_sync_acct(bio->bi_bdev, nr_sectors);
  2483. generic_make_request(bio);
  2484. }
  2485. return nr_sectors;
  2486. }
  2487. static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
  2488. {
  2489. if (sectors)
  2490. return sectors;
  2491. return mddev->dev_sectors;
  2492. }
  2493. static struct r1conf *setup_conf(struct mddev *mddev)
  2494. {
  2495. struct r1conf *conf;
  2496. int i;
  2497. struct raid1_info *disk;
  2498. struct md_rdev *rdev;
  2499. int err = -ENOMEM;
  2500. conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
  2501. if (!conf)
  2502. goto abort;
  2503. conf->mirrors = kzalloc(sizeof(struct raid1_info)
  2504. * mddev->raid_disks * 2,
  2505. GFP_KERNEL);
  2506. if (!conf->mirrors)
  2507. goto abort;
  2508. conf->tmppage = alloc_page(GFP_KERNEL);
  2509. if (!conf->tmppage)
  2510. goto abort;
  2511. conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
  2512. if (!conf->poolinfo)
  2513. goto abort;
  2514. conf->poolinfo->raid_disks = mddev->raid_disks * 2;
  2515. conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2516. r1bio_pool_free,
  2517. conf->poolinfo);
  2518. if (!conf->r1bio_pool)
  2519. goto abort;
  2520. conf->poolinfo->mddev = mddev;
  2521. err = -EINVAL;
  2522. spin_lock_init(&conf->device_lock);
  2523. rdev_for_each(rdev, mddev) {
  2524. struct request_queue *q;
  2525. int disk_idx = rdev->raid_disk;
  2526. if (disk_idx >= mddev->raid_disks
  2527. || disk_idx < 0)
  2528. continue;
  2529. if (test_bit(Replacement, &rdev->flags))
  2530. disk = conf->mirrors + mddev->raid_disks + disk_idx;
  2531. else
  2532. disk = conf->mirrors + disk_idx;
  2533. if (disk->rdev)
  2534. goto abort;
  2535. disk->rdev = rdev;
  2536. q = bdev_get_queue(rdev->bdev);
  2537. if (q->merge_bvec_fn)
  2538. mddev->merge_check_needed = 1;
  2539. disk->head_position = 0;
  2540. disk->seq_start = MaxSector;
  2541. }
  2542. conf->raid_disks = mddev->raid_disks;
  2543. conf->mddev = mddev;
  2544. INIT_LIST_HEAD(&conf->retry_list);
  2545. spin_lock_init(&conf->resync_lock);
  2546. init_waitqueue_head(&conf->wait_barrier);
  2547. bio_list_init(&conf->pending_bio_list);
  2548. conf->pending_count = 0;
  2549. conf->recovery_disabled = mddev->recovery_disabled - 1;
  2550. conf->start_next_window = MaxSector;
  2551. conf->current_window_requests = conf->next_window_requests = 0;
  2552. err = -EIO;
  2553. for (i = 0; i < conf->raid_disks * 2; i++) {
  2554. disk = conf->mirrors + i;
  2555. if (i < conf->raid_disks &&
  2556. disk[conf->raid_disks].rdev) {
  2557. /* This slot has a replacement. */
  2558. if (!disk->rdev) {
  2559. /* No original, just make the replacement
  2560. * a recovering spare
  2561. */
  2562. disk->rdev =
  2563. disk[conf->raid_disks].rdev;
  2564. disk[conf->raid_disks].rdev = NULL;
  2565. } else if (!test_bit(In_sync, &disk->rdev->flags))
  2566. /* Original is not in_sync - bad */
  2567. goto abort;
  2568. }
  2569. if (!disk->rdev ||
  2570. !test_bit(In_sync, &disk->rdev->flags)) {
  2571. disk->head_position = 0;
  2572. if (disk->rdev &&
  2573. (disk->rdev->saved_raid_disk < 0))
  2574. conf->fullsync = 1;
  2575. }
  2576. }
  2577. err = -ENOMEM;
  2578. conf->thread = md_register_thread(raid1d, mddev, "raid1");
  2579. if (!conf->thread) {
  2580. printk(KERN_ERR
  2581. "md/raid1:%s: couldn't allocate thread\n",
  2582. mdname(mddev));
  2583. goto abort;
  2584. }
  2585. return conf;
  2586. abort:
  2587. if (conf) {
  2588. if (conf->r1bio_pool)
  2589. mempool_destroy(conf->r1bio_pool);
  2590. kfree(conf->mirrors);
  2591. safe_put_page(conf->tmppage);
  2592. kfree(conf->poolinfo);
  2593. kfree(conf);
  2594. }
  2595. return ERR_PTR(err);
  2596. }
  2597. static int stop(struct mddev *mddev);
  2598. static int run(struct mddev *mddev)
  2599. {
  2600. struct r1conf *conf;
  2601. int i;
  2602. struct md_rdev *rdev;
  2603. int ret;
  2604. bool discard_supported = false;
  2605. if (mddev->level != 1) {
  2606. printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
  2607. mdname(mddev), mddev->level);
  2608. return -EIO;
  2609. }
  2610. if (mddev->reshape_position != MaxSector) {
  2611. printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n",
  2612. mdname(mddev));
  2613. return -EIO;
  2614. }
  2615. /*
  2616. * copy the already verified devices into our private RAID1
  2617. * bookkeeping area. [whatever we allocate in run(),
  2618. * should be freed in stop()]
  2619. */
  2620. if (mddev->private == NULL)
  2621. conf = setup_conf(mddev);
  2622. else
  2623. conf = mddev->private;
  2624. if (IS_ERR(conf))
  2625. return PTR_ERR(conf);
  2626. if (mddev->queue)
  2627. blk_queue_max_write_same_sectors(mddev->queue, 0);
  2628. rdev_for_each(rdev, mddev) {
  2629. if (!mddev->gendisk)
  2630. continue;
  2631. disk_stack_limits(mddev->gendisk, rdev->bdev,
  2632. rdev->data_offset << 9);
  2633. if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
  2634. discard_supported = true;
  2635. }
  2636. mddev->degraded = 0;
  2637. for (i=0; i < conf->raid_disks; i++)
  2638. if (conf->mirrors[i].rdev == NULL ||
  2639. !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
  2640. test_bit(Faulty, &conf->mirrors[i].rdev->flags))
  2641. mddev->degraded++;
  2642. if (conf->raid_disks - mddev->degraded == 1)
  2643. mddev->recovery_cp = MaxSector;
  2644. if (mddev->recovery_cp != MaxSector)
  2645. printk(KERN_NOTICE "md/raid1:%s: not clean"
  2646. " -- starting background reconstruction\n",
  2647. mdname(mddev));
  2648. printk(KERN_INFO
  2649. "md/raid1:%s: active with %d out of %d mirrors\n",
  2650. mdname(mddev), mddev->raid_disks - mddev->degraded,
  2651. mddev->raid_disks);
  2652. /*
  2653. * Ok, everything is just fine now
  2654. */
  2655. mddev->thread = conf->thread;
  2656. conf->thread = NULL;
  2657. mddev->private = conf;
  2658. md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
  2659. if (mddev->queue) {
  2660. mddev->queue->backing_dev_info.congested_fn = raid1_congested;
  2661. mddev->queue->backing_dev_info.congested_data = mddev;
  2662. blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
  2663. if (discard_supported)
  2664. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
  2665. mddev->queue);
  2666. else
  2667. queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
  2668. mddev->queue);
  2669. }
  2670. ret = md_integrity_register(mddev);
  2671. if (ret)
  2672. stop(mddev);
  2673. return ret;
  2674. }
  2675. static int stop(struct mddev *mddev)
  2676. {
  2677. struct r1conf *conf = mddev->private;
  2678. struct bitmap *bitmap = mddev->bitmap;
  2679. /* wait for behind writes to complete */
  2680. if (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
  2681. printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n",
  2682. mdname(mddev));
  2683. /* need to kick something here to make sure I/O goes? */
  2684. wait_event(bitmap->behind_wait,
  2685. atomic_read(&bitmap->behind_writes) == 0);
  2686. }
  2687. freeze_array(conf, 0);
  2688. unfreeze_array(conf);
  2689. md_unregister_thread(&mddev->thread);
  2690. if (conf->r1bio_pool)
  2691. mempool_destroy(conf->r1bio_pool);
  2692. kfree(conf->mirrors);
  2693. safe_put_page(conf->tmppage);
  2694. kfree(conf->poolinfo);
  2695. kfree(conf);
  2696. mddev->private = NULL;
  2697. return 0;
  2698. }
  2699. static int raid1_resize(struct mddev *mddev, sector_t sectors)
  2700. {
  2701. /* no resync is happening, and there is enough space
  2702. * on all devices, so we can resize.
  2703. * We need to make sure resync covers any new space.
  2704. * If the array is shrinking we should possibly wait until
  2705. * any io in the removed space completes, but it hardly seems
  2706. * worth it.
  2707. */
  2708. sector_t newsize = raid1_size(mddev, sectors, 0);
  2709. if (mddev->external_size &&
  2710. mddev->array_sectors > newsize)
  2711. return -EINVAL;
  2712. if (mddev->bitmap) {
  2713. int ret = bitmap_resize(mddev->bitmap, newsize, 0, 0);
  2714. if (ret)
  2715. return ret;
  2716. }
  2717. md_set_array_sectors(mddev, newsize);
  2718. set_capacity(mddev->gendisk, mddev->array_sectors);
  2719. revalidate_disk(mddev->gendisk);
  2720. if (sectors > mddev->dev_sectors &&
  2721. mddev->recovery_cp > mddev->dev_sectors) {
  2722. mddev->recovery_cp = mddev->dev_sectors;
  2723. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2724. }
  2725. mddev->dev_sectors = sectors;
  2726. mddev->resync_max_sectors = sectors;
  2727. return 0;
  2728. }
  2729. static int raid1_reshape(struct mddev *mddev)
  2730. {
  2731. /* We need to:
  2732. * 1/ resize the r1bio_pool
  2733. * 2/ resize conf->mirrors
  2734. *
  2735. * We allocate a new r1bio_pool if we can.
  2736. * Then raise a device barrier and wait until all IO stops.
  2737. * Then resize conf->mirrors and swap in the new r1bio pool.
  2738. *
  2739. * At the same time, we "pack" the devices so that all the missing
  2740. * devices have the higher raid_disk numbers.
  2741. */
  2742. mempool_t *newpool, *oldpool;
  2743. struct pool_info *newpoolinfo;
  2744. struct raid1_info *newmirrors;
  2745. struct r1conf *conf = mddev->private;
  2746. int cnt, raid_disks;
  2747. unsigned long flags;
  2748. int d, d2, err;
  2749. /* Cannot change chunk_size, layout, or level */
  2750. if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
  2751. mddev->layout != mddev->new_layout ||
  2752. mddev->level != mddev->new_level) {
  2753. mddev->new_chunk_sectors = mddev->chunk_sectors;
  2754. mddev->new_layout = mddev->layout;
  2755. mddev->new_level = mddev->level;
  2756. return -EINVAL;
  2757. }
  2758. err = md_allow_write(mddev);
  2759. if (err)
  2760. return err;
  2761. raid_disks = mddev->raid_disks + mddev->delta_disks;
  2762. if (raid_disks < conf->raid_disks) {
  2763. cnt=0;
  2764. for (d= 0; d < conf->raid_disks; d++)
  2765. if (conf->mirrors[d].rdev)
  2766. cnt++;
  2767. if (cnt > raid_disks)
  2768. return -EBUSY;
  2769. }
  2770. newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
  2771. if (!newpoolinfo)
  2772. return -ENOMEM;
  2773. newpoolinfo->mddev = mddev;
  2774. newpoolinfo->raid_disks = raid_disks * 2;
  2775. newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
  2776. r1bio_pool_free, newpoolinfo);
  2777. if (!newpool) {
  2778. kfree(newpoolinfo);
  2779. return -ENOMEM;
  2780. }
  2781. newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2,
  2782. GFP_KERNEL);
  2783. if (!newmirrors) {
  2784. kfree(newpoolinfo);
  2785. mempool_destroy(newpool);
  2786. return -ENOMEM;
  2787. }
  2788. freeze_array(conf, 0);
  2789. /* ok, everything is stopped */
  2790. oldpool = conf->r1bio_pool;
  2791. conf->r1bio_pool = newpool;
  2792. for (d = d2 = 0; d < conf->raid_disks; d++) {
  2793. struct md_rdev *rdev = conf->mirrors[d].rdev;
  2794. if (rdev && rdev->raid_disk != d2) {
  2795. sysfs_unlink_rdev(mddev, rdev);
  2796. rdev->raid_disk = d2;
  2797. sysfs_unlink_rdev(mddev, rdev);
  2798. if (sysfs_link_rdev(mddev, rdev))
  2799. printk(KERN_WARNING
  2800. "md/raid1:%s: cannot register rd%d\n",
  2801. mdname(mddev), rdev->raid_disk);
  2802. }
  2803. if (rdev)
  2804. newmirrors[d2++].rdev = rdev;
  2805. }
  2806. kfree(conf->mirrors);
  2807. conf->mirrors = newmirrors;
  2808. kfree(conf->poolinfo);
  2809. conf->poolinfo = newpoolinfo;
  2810. spin_lock_irqsave(&conf->device_lock, flags);
  2811. mddev->degraded += (raid_disks - conf->raid_disks);
  2812. spin_unlock_irqrestore(&conf->device_lock, flags);
  2813. conf->raid_disks = mddev->raid_disks = raid_disks;
  2814. mddev->delta_disks = 0;
  2815. unfreeze_array(conf);
  2816. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2817. md_wakeup_thread(mddev->thread);
  2818. mempool_destroy(oldpool);
  2819. return 0;
  2820. }
  2821. static void raid1_quiesce(struct mddev *mddev, int state)
  2822. {
  2823. struct r1conf *conf = mddev->private;
  2824. switch(state) {
  2825. case 2: /* wake for suspend */
  2826. wake_up(&conf->wait_barrier);
  2827. break;
  2828. case 1:
  2829. freeze_array(conf, 0);
  2830. break;
  2831. case 0:
  2832. unfreeze_array(conf);
  2833. break;
  2834. }
  2835. }
  2836. static void *raid1_takeover(struct mddev *mddev)
  2837. {
  2838. /* raid1 can take over:
  2839. * raid5 with 2 devices, any layout or chunk size
  2840. */
  2841. if (mddev->level == 5 && mddev->raid_disks == 2) {
  2842. struct r1conf *conf;
  2843. mddev->new_level = 1;
  2844. mddev->new_layout = 0;
  2845. mddev->new_chunk_sectors = 0;
  2846. conf = setup_conf(mddev);
  2847. if (!IS_ERR(conf))
  2848. /* Array must appear to be quiesced */
  2849. conf->array_frozen = 1;
  2850. return conf;
  2851. }
  2852. return ERR_PTR(-EINVAL);
  2853. }
  2854. static struct md_personality raid1_personality =
  2855. {
  2856. .name = "raid1",
  2857. .level = 1,
  2858. .owner = THIS_MODULE,
  2859. .make_request = make_request,
  2860. .run = run,
  2861. .stop = stop,
  2862. .status = status,
  2863. .error_handler = error,
  2864. .hot_add_disk = raid1_add_disk,
  2865. .hot_remove_disk= raid1_remove_disk,
  2866. .spare_active = raid1_spare_active,
  2867. .sync_request = sync_request,
  2868. .resize = raid1_resize,
  2869. .size = raid1_size,
  2870. .check_reshape = raid1_reshape,
  2871. .quiesce = raid1_quiesce,
  2872. .takeover = raid1_takeover,
  2873. };
  2874. static int __init raid_init(void)
  2875. {
  2876. return register_md_personality(&raid1_personality);
  2877. }
  2878. static void raid_exit(void)
  2879. {
  2880. unregister_md_personality(&raid1_personality);
  2881. }
  2882. module_init(raid_init);
  2883. module_exit(raid_exit);
  2884. MODULE_LICENSE("GPL");
  2885. MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
  2886. MODULE_ALIAS("md-personality-3"); /* RAID1 */
  2887. MODULE_ALIAS("md-raid1");
  2888. MODULE_ALIAS("md-level-1");
  2889. module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);