md.c 243 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335
  1. /*
  2. md.c : Multiple Devices driver for Linux
  3. Copyright (C) 1998, 1999, 2000 Ingo Molnar
  4. completely rewritten, based on the MD driver code from Marc Zyngier
  5. Changes:
  6. - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar
  7. - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com>
  8. - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net>
  9. - kerneld support by Boris Tobotras <boris@xtalk.msk.su>
  10. - kmod support by: Cyrus Durgin
  11. - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com>
  12. - Devfs support by Richard Gooch <rgooch@atnf.csiro.au>
  13. - lots of fixes and improvements to the RAID1/RAID5 and generic
  14. RAID code (such as request based resynchronization):
  15. Neil Brown <neilb@cse.unsw.edu.au>.
  16. - persistent bitmap code
  17. Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
  18. This program is free software; you can redistribute it and/or modify
  19. it under the terms of the GNU General Public License as published by
  20. the Free Software Foundation; either version 2, or (at your option)
  21. any later version.
  22. You should have received a copy of the GNU General Public License
  23. (for example /usr/src/linux/COPYING); if not, write to the Free
  24. Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  25. Errors, Warnings, etc.
  26. Please use:
  27. pr_crit() for error conditions that risk data loss
  28. pr_err() for error conditions that are unexpected, like an IO error
  29. or internal inconsistency
  30. pr_warn() for error conditions that could have been predicated, like
  31. adding a device to an array when it has incompatible metadata
  32. pr_info() for every interesting, very rare events, like an array starting
  33. or stopping, or resync starting or stopping
  34. pr_debug() for everything else.
  35. */
  36. #include <linux/sched/signal.h>
  37. #include <linux/kthread.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/badblocks.h>
  40. #include <linux/sysctl.h>
  41. #include <linux/seq_file.h>
  42. #include <linux/fs.h>
  43. #include <linux/poll.h>
  44. #include <linux/ctype.h>
  45. #include <linux/string.h>
  46. #include <linux/hdreg.h>
  47. #include <linux/proc_fs.h>
  48. #include <linux/random.h>
  49. #include <linux/module.h>
  50. #include <linux/reboot.h>
  51. #include <linux/file.h>
  52. #include <linux/compat.h>
  53. #include <linux/delay.h>
  54. #include <linux/raid/md_p.h>
  55. #include <linux/raid/md_u.h>
  56. #include <linux/slab.h>
  57. #include <linux/percpu-refcount.h>
  58. #include <trace/events/block.h>
  59. #include "md.h"
  60. #include "md-bitmap.h"
  61. #include "md-cluster.h"
  62. #ifndef MODULE
  63. static void autostart_arrays(int part);
  64. #endif
  65. /* pers_list is a list of registered personalities protected
  66. * by pers_lock.
  67. * pers_lock does extra service to protect accesses to
  68. * mddev->thread when the mutex cannot be held.
  69. */
  70. static LIST_HEAD(pers_list);
  71. static DEFINE_SPINLOCK(pers_lock);
  72. struct md_cluster_operations *md_cluster_ops;
  73. EXPORT_SYMBOL(md_cluster_ops);
  74. struct module *md_cluster_mod;
  75. EXPORT_SYMBOL(md_cluster_mod);
  76. static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
  77. static struct workqueue_struct *md_wq;
  78. static struct workqueue_struct *md_misc_wq;
  79. static int remove_and_add_spares(struct mddev *mddev,
  80. struct md_rdev *this);
  81. static void mddev_detach(struct mddev *mddev);
  82. /*
  83. * Default number of read corrections we'll attempt on an rdev
  84. * before ejecting it from the array. We divide the read error
  85. * count by 2 for every hour elapsed between read errors.
  86. */
  87. #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20
  88. /*
  89. * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit'
  90. * is 1000 KB/sec, so the extra system load does not show up that much.
  91. * Increase it if you want to have more _guaranteed_ speed. Note that
  92. * the RAID driver will use the maximum available bandwidth if the IO
  93. * subsystem is idle. There is also an 'absolute maximum' reconstruction
  94. * speed limit - in case reconstruction slows down your system despite
  95. * idle IO detection.
  96. *
  97. * you can change it via /proc/sys/dev/raid/speed_limit_min and _max.
  98. * or /sys/block/mdX/md/sync_speed_{min,max}
  99. */
  100. static int sysctl_speed_limit_min = 1000;
  101. static int sysctl_speed_limit_max = 200000;
  102. static inline int speed_min(struct mddev *mddev)
  103. {
  104. return mddev->sync_speed_min ?
  105. mddev->sync_speed_min : sysctl_speed_limit_min;
  106. }
  107. static inline int speed_max(struct mddev *mddev)
  108. {
  109. return mddev->sync_speed_max ?
  110. mddev->sync_speed_max : sysctl_speed_limit_max;
  111. }
  112. static struct ctl_table_header *raid_table_header;
  113. static struct ctl_table raid_table[] = {
  114. {
  115. .procname = "speed_limit_min",
  116. .data = &sysctl_speed_limit_min,
  117. .maxlen = sizeof(int),
  118. .mode = S_IRUGO|S_IWUSR,
  119. .proc_handler = proc_dointvec,
  120. },
  121. {
  122. .procname = "speed_limit_max",
  123. .data = &sysctl_speed_limit_max,
  124. .maxlen = sizeof(int),
  125. .mode = S_IRUGO|S_IWUSR,
  126. .proc_handler = proc_dointvec,
  127. },
  128. { }
  129. };
  130. static struct ctl_table raid_dir_table[] = {
  131. {
  132. .procname = "raid",
  133. .maxlen = 0,
  134. .mode = S_IRUGO|S_IXUGO,
  135. .child = raid_table,
  136. },
  137. { }
  138. };
  139. static struct ctl_table raid_root_table[] = {
  140. {
  141. .procname = "dev",
  142. .maxlen = 0,
  143. .mode = 0555,
  144. .child = raid_dir_table,
  145. },
  146. { }
  147. };
  148. static const struct block_device_operations md_fops;
  149. static int start_readonly;
  150. /*
  151. * The original mechanism for creating an md device is to create
  152. * a device node in /dev and to open it. This causes races with device-close.
  153. * The preferred method is to write to the "new_array" module parameter.
  154. * This can avoid races.
  155. * Setting create_on_open to false disables the original mechanism
  156. * so all the races disappear.
  157. */
  158. static bool create_on_open = true;
  159. /* bio_clone_mddev
  160. * like bio_clone_bioset, but with a local bio set
  161. */
  162. struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
  163. struct mddev *mddev)
  164. {
  165. struct bio *b;
  166. if (!mddev || !mddev->bio_set)
  167. return bio_alloc(gfp_mask, nr_iovecs);
  168. b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
  169. if (!b)
  170. return NULL;
  171. return b;
  172. }
  173. EXPORT_SYMBOL_GPL(bio_alloc_mddev);
  174. static struct bio *md_bio_alloc_sync(struct mddev *mddev)
  175. {
  176. if (!mddev || !mddev->sync_set)
  177. return bio_alloc(GFP_NOIO, 1);
  178. return bio_alloc_bioset(GFP_NOIO, 1, mddev->sync_set);
  179. }
  180. /*
  181. * We have a system wide 'event count' that is incremented
  182. * on any 'interesting' event, and readers of /proc/mdstat
  183. * can use 'poll' or 'select' to find out when the event
  184. * count increases.
  185. *
  186. * Events are:
  187. * start array, stop array, error, add device, remove device,
  188. * start build, activate spare
  189. */
  190. static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
  191. static atomic_t md_event_count;
  192. void md_new_event(struct mddev *mddev)
  193. {
  194. atomic_inc(&md_event_count);
  195. wake_up(&md_event_waiters);
  196. }
  197. EXPORT_SYMBOL_GPL(md_new_event);
  198. /*
  199. * Enables to iterate over all existing md arrays
  200. * all_mddevs_lock protects this list.
  201. */
  202. static LIST_HEAD(all_mddevs);
  203. static DEFINE_SPINLOCK(all_mddevs_lock);
  204. /*
  205. * iterates through all used mddevs in the system.
  206. * We take care to grab the all_mddevs_lock whenever navigating
  207. * the list, and to always hold a refcount when unlocked.
  208. * Any code which breaks out of this loop while own
  209. * a reference to the current mddev and must mddev_put it.
  210. */
  211. #define for_each_mddev(_mddev,_tmp) \
  212. \
  213. for (({ spin_lock(&all_mddevs_lock); \
  214. _tmp = all_mddevs.next; \
  215. _mddev = NULL;}); \
  216. ({ if (_tmp != &all_mddevs) \
  217. mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\
  218. spin_unlock(&all_mddevs_lock); \
  219. if (_mddev) mddev_put(_mddev); \
  220. _mddev = list_entry(_tmp, struct mddev, all_mddevs); \
  221. _tmp != &all_mddevs;}); \
  222. ({ spin_lock(&all_mddevs_lock); \
  223. _tmp = _tmp->next;}) \
  224. )
  225. /* Rather than calling directly into the personality make_request function,
  226. * IO requests come here first so that we can check if the device is
  227. * being suspended pending a reconfiguration.
  228. * We hold a refcount over the call to ->make_request. By the time that
  229. * call has finished, the bio has been linked into some internal structure
  230. * and so is visible to ->quiesce(), so we don't need the refcount any more.
  231. */
  232. static bool is_suspended(struct mddev *mddev, struct bio *bio)
  233. {
  234. if (mddev->suspended)
  235. return true;
  236. if (bio_data_dir(bio) != WRITE)
  237. return false;
  238. if (mddev->suspend_lo >= mddev->suspend_hi)
  239. return false;
  240. if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
  241. return false;
  242. if (bio_end_sector(bio) < mddev->suspend_lo)
  243. return false;
  244. return true;
  245. }
  246. void md_handle_request(struct mddev *mddev, struct bio *bio)
  247. {
  248. check_suspended:
  249. rcu_read_lock();
  250. if (is_suspended(mddev, bio)) {
  251. DEFINE_WAIT(__wait);
  252. for (;;) {
  253. prepare_to_wait(&mddev->sb_wait, &__wait,
  254. TASK_UNINTERRUPTIBLE);
  255. if (!is_suspended(mddev, bio))
  256. break;
  257. rcu_read_unlock();
  258. schedule();
  259. rcu_read_lock();
  260. }
  261. finish_wait(&mddev->sb_wait, &__wait);
  262. }
  263. atomic_inc(&mddev->active_io);
  264. rcu_read_unlock();
  265. if (!mddev->pers->make_request(mddev, bio)) {
  266. atomic_dec(&mddev->active_io);
  267. wake_up(&mddev->sb_wait);
  268. goto check_suspended;
  269. }
  270. if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
  271. wake_up(&mddev->sb_wait);
  272. }
  273. EXPORT_SYMBOL(md_handle_request);
  274. static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
  275. {
  276. const int rw = bio_data_dir(bio);
  277. struct mddev *mddev = q->queuedata;
  278. unsigned int sectors;
  279. int cpu;
  280. blk_queue_split(q, &bio);
  281. if (mddev == NULL || mddev->pers == NULL) {
  282. bio_io_error(bio);
  283. return BLK_QC_T_NONE;
  284. }
  285. if (mddev->ro == 1 && unlikely(rw == WRITE)) {
  286. if (bio_sectors(bio) != 0)
  287. bio->bi_status = BLK_STS_IOERR;
  288. bio_endio(bio);
  289. return BLK_QC_T_NONE;
  290. }
  291. /*
  292. * save the sectors now since our bio can
  293. * go away inside make_request
  294. */
  295. sectors = bio_sectors(bio);
  296. /* bio could be mergeable after passing to underlayer */
  297. bio->bi_opf &= ~REQ_NOMERGE;
  298. md_handle_request(mddev, bio);
  299. cpu = part_stat_lock();
  300. part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
  301. part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
  302. part_stat_unlock();
  303. return BLK_QC_T_NONE;
  304. }
  305. /* mddev_suspend makes sure no new requests are submitted
  306. * to the device, and that any requests that have been submitted
  307. * are completely handled.
  308. * Once mddev_detach() is called and completes, the module will be
  309. * completely unused.
  310. */
  311. void mddev_suspend(struct mddev *mddev)
  312. {
  313. WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
  314. lockdep_assert_held(&mddev->reconfig_mutex);
  315. if (mddev->suspended++)
  316. return;
  317. synchronize_rcu();
  318. wake_up(&mddev->sb_wait);
  319. set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
  320. smp_mb__after_atomic();
  321. wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
  322. mddev->pers->quiesce(mddev, 1);
  323. clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
  324. wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
  325. del_timer_sync(&mddev->safemode_timer);
  326. }
  327. EXPORT_SYMBOL_GPL(mddev_suspend);
  328. void mddev_resume(struct mddev *mddev)
  329. {
  330. lockdep_assert_held(&mddev->reconfig_mutex);
  331. if (--mddev->suspended)
  332. return;
  333. wake_up(&mddev->sb_wait);
  334. mddev->pers->quiesce(mddev, 0);
  335. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  336. md_wakeup_thread(mddev->thread);
  337. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  338. }
  339. EXPORT_SYMBOL_GPL(mddev_resume);
  340. int mddev_congested(struct mddev *mddev, int bits)
  341. {
  342. struct md_personality *pers = mddev->pers;
  343. int ret = 0;
  344. rcu_read_lock();
  345. if (mddev->suspended)
  346. ret = 1;
  347. else if (pers && pers->congested)
  348. ret = pers->congested(mddev, bits);
  349. rcu_read_unlock();
  350. return ret;
  351. }
  352. EXPORT_SYMBOL_GPL(mddev_congested);
  353. static int md_congested(void *data, int bits)
  354. {
  355. struct mddev *mddev = data;
  356. return mddev_congested(mddev, bits);
  357. }
  358. /*
  359. * Generic flush handling for md
  360. */
  361. static void md_end_flush(struct bio *bio)
  362. {
  363. struct md_rdev *rdev = bio->bi_private;
  364. struct mddev *mddev = rdev->mddev;
  365. rdev_dec_pending(rdev, mddev);
  366. if (atomic_dec_and_test(&mddev->flush_pending)) {
  367. /* The pre-request flush has finished */
  368. queue_work(md_wq, &mddev->flush_work);
  369. }
  370. bio_put(bio);
  371. }
  372. static void md_submit_flush_data(struct work_struct *ws);
  373. static void submit_flushes(struct work_struct *ws)
  374. {
  375. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  376. struct md_rdev *rdev;
  377. INIT_WORK(&mddev->flush_work, md_submit_flush_data);
  378. atomic_set(&mddev->flush_pending, 1);
  379. rcu_read_lock();
  380. rdev_for_each_rcu(rdev, mddev)
  381. if (rdev->raid_disk >= 0 &&
  382. !test_bit(Faulty, &rdev->flags)) {
  383. /* Take two references, one is dropped
  384. * when request finishes, one after
  385. * we reclaim rcu_read_lock
  386. */
  387. struct bio *bi;
  388. atomic_inc(&rdev->nr_pending);
  389. atomic_inc(&rdev->nr_pending);
  390. rcu_read_unlock();
  391. bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
  392. bi->bi_end_io = md_end_flush;
  393. bi->bi_private = rdev;
  394. bio_set_dev(bi, rdev->bdev);
  395. bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
  396. atomic_inc(&mddev->flush_pending);
  397. submit_bio(bi);
  398. rcu_read_lock();
  399. rdev_dec_pending(rdev, mddev);
  400. }
  401. rcu_read_unlock();
  402. if (atomic_dec_and_test(&mddev->flush_pending))
  403. queue_work(md_wq, &mddev->flush_work);
  404. }
  405. static void md_submit_flush_data(struct work_struct *ws)
  406. {
  407. struct mddev *mddev = container_of(ws, struct mddev, flush_work);
  408. struct bio *bio = mddev->flush_bio;
  409. /*
  410. * must reset flush_bio before calling into md_handle_request to avoid a
  411. * deadlock, because other bios passed md_handle_request suspend check
  412. * could wait for this and below md_handle_request could wait for those
  413. * bios because of suspend check
  414. */
  415. mddev->flush_bio = NULL;
  416. wake_up(&mddev->sb_wait);
  417. if (bio->bi_iter.bi_size == 0)
  418. /* an empty barrier - all done */
  419. bio_endio(bio);
  420. else {
  421. bio->bi_opf &= ~REQ_PREFLUSH;
  422. md_handle_request(mddev, bio);
  423. }
  424. }
  425. void md_flush_request(struct mddev *mddev, struct bio *bio)
  426. {
  427. spin_lock_irq(&mddev->lock);
  428. wait_event_lock_irq(mddev->sb_wait,
  429. !mddev->flush_bio,
  430. mddev->lock);
  431. mddev->flush_bio = bio;
  432. spin_unlock_irq(&mddev->lock);
  433. INIT_WORK(&mddev->flush_work, submit_flushes);
  434. queue_work(md_wq, &mddev->flush_work);
  435. }
  436. EXPORT_SYMBOL(md_flush_request);
  437. static inline struct mddev *mddev_get(struct mddev *mddev)
  438. {
  439. atomic_inc(&mddev->active);
  440. return mddev;
  441. }
  442. static void mddev_delayed_delete(struct work_struct *ws);
  443. static void mddev_put(struct mddev *mddev)
  444. {
  445. struct bio_set *bs = NULL, *sync_bs = NULL;
  446. if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
  447. return;
  448. if (!mddev->raid_disks && list_empty(&mddev->disks) &&
  449. mddev->ctime == 0 && !mddev->hold_active) {
  450. /* Array is not configured at all, and not held active,
  451. * so destroy it */
  452. list_del_init(&mddev->all_mddevs);
  453. bs = mddev->bio_set;
  454. sync_bs = mddev->sync_set;
  455. mddev->bio_set = NULL;
  456. mddev->sync_set = NULL;
  457. if (mddev->gendisk) {
  458. /* We did a probe so need to clean up. Call
  459. * queue_work inside the spinlock so that
  460. * flush_workqueue() after mddev_find will
  461. * succeed in waiting for the work to be done.
  462. */
  463. INIT_WORK(&mddev->del_work, mddev_delayed_delete);
  464. queue_work(md_misc_wq, &mddev->del_work);
  465. } else
  466. kfree(mddev);
  467. }
  468. spin_unlock(&all_mddevs_lock);
  469. if (bs)
  470. bioset_free(bs);
  471. if (sync_bs)
  472. bioset_free(sync_bs);
  473. }
  474. static void md_safemode_timeout(unsigned long data);
  475. void mddev_init(struct mddev *mddev)
  476. {
  477. mutex_init(&mddev->open_mutex);
  478. mutex_init(&mddev->reconfig_mutex);
  479. mutex_init(&mddev->bitmap_info.mutex);
  480. INIT_LIST_HEAD(&mddev->disks);
  481. INIT_LIST_HEAD(&mddev->all_mddevs);
  482. setup_timer(&mddev->safemode_timer, md_safemode_timeout,
  483. (unsigned long) mddev);
  484. atomic_set(&mddev->active, 1);
  485. atomic_set(&mddev->openers, 0);
  486. atomic_set(&mddev->active_io, 0);
  487. spin_lock_init(&mddev->lock);
  488. atomic_set(&mddev->flush_pending, 0);
  489. init_waitqueue_head(&mddev->sb_wait);
  490. init_waitqueue_head(&mddev->recovery_wait);
  491. mddev->reshape_position = MaxSector;
  492. mddev->reshape_backwards = 0;
  493. mddev->last_sync_action = "none";
  494. mddev->resync_min = 0;
  495. mddev->resync_max = MaxSector;
  496. mddev->level = LEVEL_NONE;
  497. }
  498. EXPORT_SYMBOL_GPL(mddev_init);
  499. static struct mddev *mddev_find(dev_t unit)
  500. {
  501. struct mddev *mddev, *new = NULL;
  502. if (unit && MAJOR(unit) != MD_MAJOR)
  503. unit &= ~((1<<MdpMinorShift)-1);
  504. retry:
  505. spin_lock(&all_mddevs_lock);
  506. if (unit) {
  507. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  508. if (mddev->unit == unit) {
  509. mddev_get(mddev);
  510. spin_unlock(&all_mddevs_lock);
  511. kfree(new);
  512. return mddev;
  513. }
  514. if (new) {
  515. list_add(&new->all_mddevs, &all_mddevs);
  516. spin_unlock(&all_mddevs_lock);
  517. new->hold_active = UNTIL_IOCTL;
  518. return new;
  519. }
  520. } else if (new) {
  521. /* find an unused unit number */
  522. static int next_minor = 512;
  523. int start = next_minor;
  524. int is_free = 0;
  525. int dev = 0;
  526. while (!is_free) {
  527. dev = MKDEV(MD_MAJOR, next_minor);
  528. next_minor++;
  529. if (next_minor > MINORMASK)
  530. next_minor = 0;
  531. if (next_minor == start) {
  532. /* Oh dear, all in use. */
  533. spin_unlock(&all_mddevs_lock);
  534. kfree(new);
  535. return NULL;
  536. }
  537. is_free = 1;
  538. list_for_each_entry(mddev, &all_mddevs, all_mddevs)
  539. if (mddev->unit == dev) {
  540. is_free = 0;
  541. break;
  542. }
  543. }
  544. new->unit = dev;
  545. new->md_minor = MINOR(dev);
  546. new->hold_active = UNTIL_STOP;
  547. list_add(&new->all_mddevs, &all_mddevs);
  548. spin_unlock(&all_mddevs_lock);
  549. return new;
  550. }
  551. spin_unlock(&all_mddevs_lock);
  552. new = kzalloc(sizeof(*new), GFP_KERNEL);
  553. if (!new)
  554. return NULL;
  555. new->unit = unit;
  556. if (MAJOR(unit) == MD_MAJOR)
  557. new->md_minor = MINOR(unit);
  558. else
  559. new->md_minor = MINOR(unit) >> MdpMinorShift;
  560. mddev_init(new);
  561. goto retry;
  562. }
  563. static struct attribute_group md_redundancy_group;
  564. void mddev_unlock(struct mddev *mddev)
  565. {
  566. if (mddev->to_remove) {
  567. /* These cannot be removed under reconfig_mutex as
  568. * an access to the files will try to take reconfig_mutex
  569. * while holding the file unremovable, which leads to
  570. * a deadlock.
  571. * So hold set sysfs_active while the remove in happeing,
  572. * and anything else which might set ->to_remove or my
  573. * otherwise change the sysfs namespace will fail with
  574. * -EBUSY if sysfs_active is still set.
  575. * We set sysfs_active under reconfig_mutex and elsewhere
  576. * test it under the same mutex to ensure its correct value
  577. * is seen.
  578. */
  579. struct attribute_group *to_remove = mddev->to_remove;
  580. mddev->to_remove = NULL;
  581. mddev->sysfs_active = 1;
  582. mutex_unlock(&mddev->reconfig_mutex);
  583. if (mddev->kobj.sd) {
  584. if (to_remove != &md_redundancy_group)
  585. sysfs_remove_group(&mddev->kobj, to_remove);
  586. if (mddev->pers == NULL ||
  587. mddev->pers->sync_request == NULL) {
  588. sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
  589. if (mddev->sysfs_action)
  590. sysfs_put(mddev->sysfs_action);
  591. mddev->sysfs_action = NULL;
  592. }
  593. }
  594. mddev->sysfs_active = 0;
  595. } else
  596. mutex_unlock(&mddev->reconfig_mutex);
  597. /* As we've dropped the mutex we need a spinlock to
  598. * make sure the thread doesn't disappear
  599. */
  600. spin_lock(&pers_lock);
  601. md_wakeup_thread(mddev->thread);
  602. wake_up(&mddev->sb_wait);
  603. spin_unlock(&pers_lock);
  604. }
  605. EXPORT_SYMBOL_GPL(mddev_unlock);
  606. struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr)
  607. {
  608. struct md_rdev *rdev;
  609. rdev_for_each_rcu(rdev, mddev)
  610. if (rdev->desc_nr == nr)
  611. return rdev;
  612. return NULL;
  613. }
  614. EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu);
  615. static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
  616. {
  617. struct md_rdev *rdev;
  618. rdev_for_each(rdev, mddev)
  619. if (rdev->bdev->bd_dev == dev)
  620. return rdev;
  621. return NULL;
  622. }
  623. static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
  624. {
  625. struct md_rdev *rdev;
  626. rdev_for_each_rcu(rdev, mddev)
  627. if (rdev->bdev->bd_dev == dev)
  628. return rdev;
  629. return NULL;
  630. }
  631. static struct md_personality *find_pers(int level, char *clevel)
  632. {
  633. struct md_personality *pers;
  634. list_for_each_entry(pers, &pers_list, list) {
  635. if (level != LEVEL_NONE && pers->level == level)
  636. return pers;
  637. if (strcmp(pers->name, clevel)==0)
  638. return pers;
  639. }
  640. return NULL;
  641. }
  642. /* return the offset of the super block in 512byte sectors */
  643. static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
  644. {
  645. sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
  646. return MD_NEW_SIZE_SECTORS(num_sectors);
  647. }
  648. static int alloc_disk_sb(struct md_rdev *rdev)
  649. {
  650. rdev->sb_page = alloc_page(GFP_KERNEL);
  651. if (!rdev->sb_page)
  652. return -ENOMEM;
  653. return 0;
  654. }
  655. void md_rdev_clear(struct md_rdev *rdev)
  656. {
  657. if (rdev->sb_page) {
  658. put_page(rdev->sb_page);
  659. rdev->sb_loaded = 0;
  660. rdev->sb_page = NULL;
  661. rdev->sb_start = 0;
  662. rdev->sectors = 0;
  663. }
  664. if (rdev->bb_page) {
  665. put_page(rdev->bb_page);
  666. rdev->bb_page = NULL;
  667. }
  668. badblocks_exit(&rdev->badblocks);
  669. }
  670. EXPORT_SYMBOL_GPL(md_rdev_clear);
  671. static void super_written(struct bio *bio)
  672. {
  673. struct md_rdev *rdev = bio->bi_private;
  674. struct mddev *mddev = rdev->mddev;
  675. if (bio->bi_status) {
  676. pr_err("md: super_written gets error=%d\n", bio->bi_status);
  677. md_error(mddev, rdev);
  678. if (!test_bit(Faulty, &rdev->flags)
  679. && (bio->bi_opf & MD_FAILFAST)) {
  680. set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags);
  681. set_bit(LastDev, &rdev->flags);
  682. }
  683. } else
  684. clear_bit(LastDev, &rdev->flags);
  685. if (atomic_dec_and_test(&mddev->pending_writes))
  686. wake_up(&mddev->sb_wait);
  687. rdev_dec_pending(rdev, mddev);
  688. bio_put(bio);
  689. }
  690. void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
  691. sector_t sector, int size, struct page *page)
  692. {
  693. /* write first size bytes of page to sector of rdev
  694. * Increment mddev->pending_writes before returning
  695. * and decrement it on completion, waking up sb_wait
  696. * if zero is reached.
  697. * If an error occurred, call md_error
  698. */
  699. struct bio *bio;
  700. int ff = 0;
  701. if (test_bit(Faulty, &rdev->flags))
  702. return;
  703. bio = md_bio_alloc_sync(mddev);
  704. atomic_inc(&rdev->nr_pending);
  705. bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
  706. bio->bi_iter.bi_sector = sector;
  707. bio_add_page(bio, page, size, 0);
  708. bio->bi_private = rdev;
  709. bio->bi_end_io = super_written;
  710. if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) &&
  711. test_bit(FailFast, &rdev->flags) &&
  712. !test_bit(LastDev, &rdev->flags))
  713. ff = MD_FAILFAST;
  714. bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff;
  715. atomic_inc(&mddev->pending_writes);
  716. submit_bio(bio);
  717. }
  718. int md_super_wait(struct mddev *mddev)
  719. {
  720. /* wait for all superblock writes that were scheduled to complete */
  721. wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0);
  722. if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags))
  723. return -EAGAIN;
  724. return 0;
  725. }
  726. int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
  727. struct page *page, int op, int op_flags, bool metadata_op)
  728. {
  729. struct bio *bio = md_bio_alloc_sync(rdev->mddev);
  730. int ret;
  731. if (metadata_op && rdev->meta_bdev)
  732. bio_set_dev(bio, rdev->meta_bdev);
  733. else
  734. bio_set_dev(bio, rdev->bdev);
  735. bio_set_op_attrs(bio, op, op_flags);
  736. if (metadata_op)
  737. bio->bi_iter.bi_sector = sector + rdev->sb_start;
  738. else if (rdev->mddev->reshape_position != MaxSector &&
  739. (rdev->mddev->reshape_backwards ==
  740. (sector >= rdev->mddev->reshape_position)))
  741. bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
  742. else
  743. bio->bi_iter.bi_sector = sector + rdev->data_offset;
  744. bio_add_page(bio, page, size, 0);
  745. submit_bio_wait(bio);
  746. ret = !bio->bi_status;
  747. bio_put(bio);
  748. return ret;
  749. }
  750. EXPORT_SYMBOL_GPL(sync_page_io);
  751. static int read_disk_sb(struct md_rdev *rdev, int size)
  752. {
  753. char b[BDEVNAME_SIZE];
  754. if (rdev->sb_loaded)
  755. return 0;
  756. if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
  757. goto fail;
  758. rdev->sb_loaded = 1;
  759. return 0;
  760. fail:
  761. pr_err("md: disabled device %s, could not read superblock.\n",
  762. bdevname(rdev->bdev,b));
  763. return -EINVAL;
  764. }
  765. static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  766. {
  767. return sb1->set_uuid0 == sb2->set_uuid0 &&
  768. sb1->set_uuid1 == sb2->set_uuid1 &&
  769. sb1->set_uuid2 == sb2->set_uuid2 &&
  770. sb1->set_uuid3 == sb2->set_uuid3;
  771. }
  772. static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2)
  773. {
  774. int ret;
  775. mdp_super_t *tmp1, *tmp2;
  776. tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL);
  777. tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL);
  778. if (!tmp1 || !tmp2) {
  779. ret = 0;
  780. goto abort;
  781. }
  782. *tmp1 = *sb1;
  783. *tmp2 = *sb2;
  784. /*
  785. * nr_disks is not constant
  786. */
  787. tmp1->nr_disks = 0;
  788. tmp2->nr_disks = 0;
  789. ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0);
  790. abort:
  791. kfree(tmp1);
  792. kfree(tmp2);
  793. return ret;
  794. }
  795. static u32 md_csum_fold(u32 csum)
  796. {
  797. csum = (csum & 0xffff) + (csum >> 16);
  798. return (csum & 0xffff) + (csum >> 16);
  799. }
  800. static unsigned int calc_sb_csum(mdp_super_t *sb)
  801. {
  802. u64 newcsum = 0;
  803. u32 *sb32 = (u32*)sb;
  804. int i;
  805. unsigned int disk_csum, csum;
  806. disk_csum = sb->sb_csum;
  807. sb->sb_csum = 0;
  808. for (i = 0; i < MD_SB_BYTES/4 ; i++)
  809. newcsum += sb32[i];
  810. csum = (newcsum & 0xffffffff) + (newcsum>>32);
  811. #ifdef CONFIG_ALPHA
  812. /* This used to use csum_partial, which was wrong for several
  813. * reasons including that different results are returned on
  814. * different architectures. It isn't critical that we get exactly
  815. * the same return value as before (we always csum_fold before
  816. * testing, and that removes any differences). However as we
  817. * know that csum_partial always returned a 16bit value on
  818. * alphas, do a fold to maximise conformity to previous behaviour.
  819. */
  820. sb->sb_csum = md_csum_fold(disk_csum);
  821. #else
  822. sb->sb_csum = disk_csum;
  823. #endif
  824. return csum;
  825. }
  826. /*
  827. * Handle superblock details.
  828. * We want to be able to handle multiple superblock formats
  829. * so we have a common interface to them all, and an array of
  830. * different handlers.
  831. * We rely on user-space to write the initial superblock, and support
  832. * reading and updating of superblocks.
  833. * Interface methods are:
  834. * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version)
  835. * loads and validates a superblock on dev.
  836. * if refdev != NULL, compare superblocks on both devices
  837. * Return:
  838. * 0 - dev has a superblock that is compatible with refdev
  839. * 1 - dev has a superblock that is compatible and newer than refdev
  840. * so dev should be used as the refdev in future
  841. * -EINVAL superblock incompatible or invalid
  842. * -othererror e.g. -EIO
  843. *
  844. * int validate_super(struct mddev *mddev, struct md_rdev *dev)
  845. * Verify that dev is acceptable into mddev.
  846. * The first time, mddev->raid_disks will be 0, and data from
  847. * dev should be merged in. Subsequent calls check that dev
  848. * is new enough. Return 0 or -EINVAL
  849. *
  850. * void sync_super(struct mddev *mddev, struct md_rdev *dev)
  851. * Update the superblock for rdev with data in mddev
  852. * This does not write to disc.
  853. *
  854. */
  855. struct super_type {
  856. char *name;
  857. struct module *owner;
  858. int (*load_super)(struct md_rdev *rdev,
  859. struct md_rdev *refdev,
  860. int minor_version);
  861. int (*validate_super)(struct mddev *mddev,
  862. struct md_rdev *rdev);
  863. void (*sync_super)(struct mddev *mddev,
  864. struct md_rdev *rdev);
  865. unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
  866. sector_t num_sectors);
  867. int (*allow_new_offset)(struct md_rdev *rdev,
  868. unsigned long long new_offset);
  869. };
  870. /*
  871. * Check that the given mddev has no bitmap.
  872. *
  873. * This function is called from the run method of all personalities that do not
  874. * support bitmaps. It prints an error message and returns non-zero if mddev
  875. * has a bitmap. Otherwise, it returns 0.
  876. *
  877. */
  878. int md_check_no_bitmap(struct mddev *mddev)
  879. {
  880. if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset)
  881. return 0;
  882. pr_warn("%s: bitmaps are not supported for %s\n",
  883. mdname(mddev), mddev->pers->name);
  884. return 1;
  885. }
  886. EXPORT_SYMBOL(md_check_no_bitmap);
  887. /*
  888. * load_super for 0.90.0
  889. */
  890. static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  891. {
  892. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  893. mdp_super_t *sb;
  894. int ret;
  895. /*
  896. * Calculate the position of the superblock (512byte sectors),
  897. * it's at the end of the disk.
  898. *
  899. * It also happens to be a multiple of 4Kb.
  900. */
  901. rdev->sb_start = calc_dev_sboffset(rdev);
  902. ret = read_disk_sb(rdev, MD_SB_BYTES);
  903. if (ret)
  904. return ret;
  905. ret = -EINVAL;
  906. bdevname(rdev->bdev, b);
  907. sb = page_address(rdev->sb_page);
  908. if (sb->md_magic != MD_SB_MAGIC) {
  909. pr_warn("md: invalid raid superblock magic on %s\n", b);
  910. goto abort;
  911. }
  912. if (sb->major_version != 0 ||
  913. sb->minor_version < 90 ||
  914. sb->minor_version > 91) {
  915. pr_warn("Bad version number %d.%d on %s\n",
  916. sb->major_version, sb->minor_version, b);
  917. goto abort;
  918. }
  919. if (sb->raid_disks <= 0)
  920. goto abort;
  921. if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) {
  922. pr_warn("md: invalid superblock checksum on %s\n", b);
  923. goto abort;
  924. }
  925. rdev->preferred_minor = sb->md_minor;
  926. rdev->data_offset = 0;
  927. rdev->new_data_offset = 0;
  928. rdev->sb_size = MD_SB_BYTES;
  929. rdev->badblocks.shift = -1;
  930. if (sb->level == LEVEL_MULTIPATH)
  931. rdev->desc_nr = -1;
  932. else
  933. rdev->desc_nr = sb->this_disk.number;
  934. if (!refdev) {
  935. ret = 1;
  936. } else {
  937. __u64 ev1, ev2;
  938. mdp_super_t *refsb = page_address(refdev->sb_page);
  939. if (!md_uuid_equal(refsb, sb)) {
  940. pr_warn("md: %s has different UUID to %s\n",
  941. b, bdevname(refdev->bdev,b2));
  942. goto abort;
  943. }
  944. if (!md_sb_equal(refsb, sb)) {
  945. pr_warn("md: %s has same UUID but different superblock to %s\n",
  946. b, bdevname(refdev->bdev, b2));
  947. goto abort;
  948. }
  949. ev1 = md_event(sb);
  950. ev2 = md_event(refsb);
  951. if (ev1 > ev2)
  952. ret = 1;
  953. else
  954. ret = 0;
  955. }
  956. rdev->sectors = rdev->sb_start;
  957. /* Limit to 4TB as metadata cannot record more than that.
  958. * (not needed for Linear and RAID0 as metadata doesn't
  959. * record this size)
  960. */
  961. if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
  962. sb->level >= 1)
  963. rdev->sectors = (sector_t)(2ULL << 32) - 2;
  964. if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
  965. /* "this cannot possibly happen" ... */
  966. ret = -EINVAL;
  967. abort:
  968. return ret;
  969. }
  970. /*
  971. * validate_super for 0.90.0
  972. */
  973. static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev)
  974. {
  975. mdp_disk_t *desc;
  976. mdp_super_t *sb = page_address(rdev->sb_page);
  977. __u64 ev1 = md_event(sb);
  978. rdev->raid_disk = -1;
  979. clear_bit(Faulty, &rdev->flags);
  980. clear_bit(In_sync, &rdev->flags);
  981. clear_bit(Bitmap_sync, &rdev->flags);
  982. clear_bit(WriteMostly, &rdev->flags);
  983. if (mddev->raid_disks == 0) {
  984. mddev->major_version = 0;
  985. mddev->minor_version = sb->minor_version;
  986. mddev->patch_version = sb->patch_version;
  987. mddev->external = 0;
  988. mddev->chunk_sectors = sb->chunk_size >> 9;
  989. mddev->ctime = sb->ctime;
  990. mddev->utime = sb->utime;
  991. mddev->level = sb->level;
  992. mddev->clevel[0] = 0;
  993. mddev->layout = sb->layout;
  994. mddev->raid_disks = sb->raid_disks;
  995. mddev->dev_sectors = ((sector_t)sb->size) * 2;
  996. mddev->events = ev1;
  997. mddev->bitmap_info.offset = 0;
  998. mddev->bitmap_info.space = 0;
  999. /* bitmap can use 60 K after the 4K superblocks */
  1000. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  1001. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  1002. mddev->reshape_backwards = 0;
  1003. if (mddev->minor_version >= 91) {
  1004. mddev->reshape_position = sb->reshape_position;
  1005. mddev->delta_disks = sb->delta_disks;
  1006. mddev->new_level = sb->new_level;
  1007. mddev->new_layout = sb->new_layout;
  1008. mddev->new_chunk_sectors = sb->new_chunk >> 9;
  1009. if (mddev->delta_disks < 0)
  1010. mddev->reshape_backwards = 1;
  1011. } else {
  1012. mddev->reshape_position = MaxSector;
  1013. mddev->delta_disks = 0;
  1014. mddev->new_level = mddev->level;
  1015. mddev->new_layout = mddev->layout;
  1016. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1017. }
  1018. if (sb->state & (1<<MD_SB_CLEAN))
  1019. mddev->recovery_cp = MaxSector;
  1020. else {
  1021. if (sb->events_hi == sb->cp_events_hi &&
  1022. sb->events_lo == sb->cp_events_lo) {
  1023. mddev->recovery_cp = sb->recovery_cp;
  1024. } else
  1025. mddev->recovery_cp = 0;
  1026. }
  1027. memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
  1028. memcpy(mddev->uuid+4, &sb->set_uuid1, 4);
  1029. memcpy(mddev->uuid+8, &sb->set_uuid2, 4);
  1030. memcpy(mddev->uuid+12,&sb->set_uuid3, 4);
  1031. mddev->max_disks = MD_SB_DISKS;
  1032. if (sb->state & (1<<MD_SB_BITMAP_PRESENT) &&
  1033. mddev->bitmap_info.file == NULL) {
  1034. mddev->bitmap_info.offset =
  1035. mddev->bitmap_info.default_offset;
  1036. mddev->bitmap_info.space =
  1037. mddev->bitmap_info.default_space;
  1038. }
  1039. } else if (mddev->pers == NULL) {
  1040. /* Insist on good event counter while assembling, except
  1041. * for spares (which don't need an event count) */
  1042. ++ev1;
  1043. if (sb->disks[rdev->desc_nr].state & (
  1044. (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE)))
  1045. if (ev1 < mddev->events)
  1046. return -EINVAL;
  1047. } else if (mddev->bitmap) {
  1048. /* if adding to array with a bitmap, then we can accept an
  1049. * older device ... but not too old.
  1050. */
  1051. if (ev1 < mddev->bitmap->events_cleared)
  1052. return 0;
  1053. if (ev1 < mddev->events)
  1054. set_bit(Bitmap_sync, &rdev->flags);
  1055. } else {
  1056. if (ev1 < mddev->events)
  1057. /* just a hot-add of a new device, leave raid_disk at -1 */
  1058. return 0;
  1059. }
  1060. if (mddev->level != LEVEL_MULTIPATH) {
  1061. desc = sb->disks + rdev->desc_nr;
  1062. if (desc->state & (1<<MD_DISK_FAULTY))
  1063. set_bit(Faulty, &rdev->flags);
  1064. else if (desc->state & (1<<MD_DISK_SYNC) /* &&
  1065. desc->raid_disk < mddev->raid_disks */) {
  1066. set_bit(In_sync, &rdev->flags);
  1067. rdev->raid_disk = desc->raid_disk;
  1068. rdev->saved_raid_disk = desc->raid_disk;
  1069. } else if (desc->state & (1<<MD_DISK_ACTIVE)) {
  1070. /* active but not in sync implies recovery up to
  1071. * reshape position. We don't know exactly where
  1072. * that is, so set to zero for now */
  1073. if (mddev->minor_version >= 91) {
  1074. rdev->recovery_offset = 0;
  1075. rdev->raid_disk = desc->raid_disk;
  1076. }
  1077. }
  1078. if (desc->state & (1<<MD_DISK_WRITEMOSTLY))
  1079. set_bit(WriteMostly, &rdev->flags);
  1080. if (desc->state & (1<<MD_DISK_FAILFAST))
  1081. set_bit(FailFast, &rdev->flags);
  1082. } else /* MULTIPATH are always insync */
  1083. set_bit(In_sync, &rdev->flags);
  1084. return 0;
  1085. }
  1086. /*
  1087. * sync_super for 0.90.0
  1088. */
  1089. static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
  1090. {
  1091. mdp_super_t *sb;
  1092. struct md_rdev *rdev2;
  1093. int next_spare = mddev->raid_disks;
  1094. /* make rdev->sb match mddev data..
  1095. *
  1096. * 1/ zero out disks
  1097. * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare);
  1098. * 3/ any empty disks < next_spare become removed
  1099. *
  1100. * disks[0] gets initialised to REMOVED because
  1101. * we cannot be sure from other fields if it has
  1102. * been initialised or not.
  1103. */
  1104. int i;
  1105. int active=0, working=0,failed=0,spare=0,nr_disks=0;
  1106. rdev->sb_size = MD_SB_BYTES;
  1107. sb = page_address(rdev->sb_page);
  1108. memset(sb, 0, sizeof(*sb));
  1109. sb->md_magic = MD_SB_MAGIC;
  1110. sb->major_version = mddev->major_version;
  1111. sb->patch_version = mddev->patch_version;
  1112. sb->gvalid_words = 0; /* ignored */
  1113. memcpy(&sb->set_uuid0, mddev->uuid+0, 4);
  1114. memcpy(&sb->set_uuid1, mddev->uuid+4, 4);
  1115. memcpy(&sb->set_uuid2, mddev->uuid+8, 4);
  1116. memcpy(&sb->set_uuid3, mddev->uuid+12,4);
  1117. sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
  1118. sb->level = mddev->level;
  1119. sb->size = mddev->dev_sectors / 2;
  1120. sb->raid_disks = mddev->raid_disks;
  1121. sb->md_minor = mddev->md_minor;
  1122. sb->not_persistent = 0;
  1123. sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
  1124. sb->state = 0;
  1125. sb->events_hi = (mddev->events>>32);
  1126. sb->events_lo = (u32)mddev->events;
  1127. if (mddev->reshape_position == MaxSector)
  1128. sb->minor_version = 90;
  1129. else {
  1130. sb->minor_version = 91;
  1131. sb->reshape_position = mddev->reshape_position;
  1132. sb->new_level = mddev->new_level;
  1133. sb->delta_disks = mddev->delta_disks;
  1134. sb->new_layout = mddev->new_layout;
  1135. sb->new_chunk = mddev->new_chunk_sectors << 9;
  1136. }
  1137. mddev->minor_version = sb->minor_version;
  1138. if (mddev->in_sync)
  1139. {
  1140. sb->recovery_cp = mddev->recovery_cp;
  1141. sb->cp_events_hi = (mddev->events>>32);
  1142. sb->cp_events_lo = (u32)mddev->events;
  1143. if (mddev->recovery_cp == MaxSector)
  1144. sb->state = (1<< MD_SB_CLEAN);
  1145. } else
  1146. sb->recovery_cp = 0;
  1147. sb->layout = mddev->layout;
  1148. sb->chunk_size = mddev->chunk_sectors << 9;
  1149. if (mddev->bitmap && mddev->bitmap_info.file == NULL)
  1150. sb->state |= (1<<MD_SB_BITMAP_PRESENT);
  1151. sb->disks[0].state = (1<<MD_DISK_REMOVED);
  1152. rdev_for_each(rdev2, mddev) {
  1153. mdp_disk_t *d;
  1154. int desc_nr;
  1155. int is_active = test_bit(In_sync, &rdev2->flags);
  1156. if (rdev2->raid_disk >= 0 &&
  1157. sb->minor_version >= 91)
  1158. /* we have nowhere to store the recovery_offset,
  1159. * but if it is not below the reshape_position,
  1160. * we can piggy-back on that.
  1161. */
  1162. is_active = 1;
  1163. if (rdev2->raid_disk < 0 ||
  1164. test_bit(Faulty, &rdev2->flags))
  1165. is_active = 0;
  1166. if (is_active)
  1167. desc_nr = rdev2->raid_disk;
  1168. else
  1169. desc_nr = next_spare++;
  1170. rdev2->desc_nr = desc_nr;
  1171. d = &sb->disks[rdev2->desc_nr];
  1172. nr_disks++;
  1173. d->number = rdev2->desc_nr;
  1174. d->major = MAJOR(rdev2->bdev->bd_dev);
  1175. d->minor = MINOR(rdev2->bdev->bd_dev);
  1176. if (is_active)
  1177. d->raid_disk = rdev2->raid_disk;
  1178. else
  1179. d->raid_disk = rdev2->desc_nr; /* compatibility */
  1180. if (test_bit(Faulty, &rdev2->flags))
  1181. d->state = (1<<MD_DISK_FAULTY);
  1182. else if (is_active) {
  1183. d->state = (1<<MD_DISK_ACTIVE);
  1184. if (test_bit(In_sync, &rdev2->flags))
  1185. d->state |= (1<<MD_DISK_SYNC);
  1186. active++;
  1187. working++;
  1188. } else {
  1189. d->state = 0;
  1190. spare++;
  1191. working++;
  1192. }
  1193. if (test_bit(WriteMostly, &rdev2->flags))
  1194. d->state |= (1<<MD_DISK_WRITEMOSTLY);
  1195. if (test_bit(FailFast, &rdev2->flags))
  1196. d->state |= (1<<MD_DISK_FAILFAST);
  1197. }
  1198. /* now set the "removed" and "faulty" bits on any missing devices */
  1199. for (i=0 ; i < mddev->raid_disks ; i++) {
  1200. mdp_disk_t *d = &sb->disks[i];
  1201. if (d->state == 0 && d->number == 0) {
  1202. d->number = i;
  1203. d->raid_disk = i;
  1204. d->state = (1<<MD_DISK_REMOVED);
  1205. d->state |= (1<<MD_DISK_FAULTY);
  1206. failed++;
  1207. }
  1208. }
  1209. sb->nr_disks = nr_disks;
  1210. sb->active_disks = active;
  1211. sb->working_disks = working;
  1212. sb->failed_disks = failed;
  1213. sb->spare_disks = spare;
  1214. sb->this_disk = sb->disks[rdev->desc_nr];
  1215. sb->sb_csum = calc_sb_csum(sb);
  1216. }
  1217. /*
  1218. * rdev_size_change for 0.90.0
  1219. */
  1220. static unsigned long long
  1221. super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1222. {
  1223. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1224. return 0; /* component must fit device */
  1225. if (rdev->mddev->bitmap_info.offset)
  1226. return 0; /* can't move bitmap */
  1227. rdev->sb_start = calc_dev_sboffset(rdev);
  1228. if (!num_sectors || num_sectors > rdev->sb_start)
  1229. num_sectors = rdev->sb_start;
  1230. /* Limit to 4TB as metadata cannot record more than that.
  1231. * 4TB == 2^32 KB, or 2*2^32 sectors.
  1232. */
  1233. if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
  1234. rdev->mddev->level >= 1)
  1235. num_sectors = (sector_t)(2ULL << 32) - 2;
  1236. do {
  1237. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1238. rdev->sb_page);
  1239. } while (md_super_wait(rdev->mddev) < 0);
  1240. return num_sectors;
  1241. }
  1242. static int
  1243. super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
  1244. {
  1245. /* non-zero offset changes not possible with v0.90 */
  1246. return new_offset == 0;
  1247. }
  1248. /*
  1249. * version 1 superblock
  1250. */
  1251. static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb)
  1252. {
  1253. __le32 disk_csum;
  1254. u32 csum;
  1255. unsigned long long newcsum;
  1256. int size = 256 + le32_to_cpu(sb->max_dev)*2;
  1257. __le32 *isuper = (__le32*)sb;
  1258. disk_csum = sb->sb_csum;
  1259. sb->sb_csum = 0;
  1260. newcsum = 0;
  1261. for (; size >= 4; size -= 4)
  1262. newcsum += le32_to_cpu(*isuper++);
  1263. if (size == 2)
  1264. newcsum += le16_to_cpu(*(__le16*) isuper);
  1265. csum = (newcsum & 0xffffffff) + (newcsum >> 32);
  1266. sb->sb_csum = disk_csum;
  1267. return cpu_to_le32(csum);
  1268. }
  1269. static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
  1270. {
  1271. struct mdp_superblock_1 *sb;
  1272. int ret;
  1273. sector_t sb_start;
  1274. sector_t sectors;
  1275. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  1276. int bmask;
  1277. /*
  1278. * Calculate the position of the superblock in 512byte sectors.
  1279. * It is always aligned to a 4K boundary and
  1280. * depeding on minor_version, it can be:
  1281. * 0: At least 8K, but less than 12K, from end of device
  1282. * 1: At start of device
  1283. * 2: 4K from start of device.
  1284. */
  1285. switch(minor_version) {
  1286. case 0:
  1287. sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
  1288. sb_start -= 8*2;
  1289. sb_start &= ~(sector_t)(4*2-1);
  1290. break;
  1291. case 1:
  1292. sb_start = 0;
  1293. break;
  1294. case 2:
  1295. sb_start = 8;
  1296. break;
  1297. default:
  1298. return -EINVAL;
  1299. }
  1300. rdev->sb_start = sb_start;
  1301. /* superblock is rarely larger than 1K, but it can be larger,
  1302. * and it is safe to read 4k, so we do that
  1303. */
  1304. ret = read_disk_sb(rdev, 4096);
  1305. if (ret) return ret;
  1306. sb = page_address(rdev->sb_page);
  1307. if (sb->magic != cpu_to_le32(MD_SB_MAGIC) ||
  1308. sb->major_version != cpu_to_le32(1) ||
  1309. le32_to_cpu(sb->max_dev) > (4096-256)/2 ||
  1310. le64_to_cpu(sb->super_offset) != rdev->sb_start ||
  1311. (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0)
  1312. return -EINVAL;
  1313. if (calc_sb_1_csum(sb) != sb->sb_csum) {
  1314. pr_warn("md: invalid superblock checksum on %s\n",
  1315. bdevname(rdev->bdev,b));
  1316. return -EINVAL;
  1317. }
  1318. if (le64_to_cpu(sb->data_size) < 10) {
  1319. pr_warn("md: data_size too small on %s\n",
  1320. bdevname(rdev->bdev,b));
  1321. return -EINVAL;
  1322. }
  1323. if (sb->pad0 ||
  1324. sb->pad3[0] ||
  1325. memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
  1326. /* Some padding is non-zero, might be a new feature */
  1327. return -EINVAL;
  1328. rdev->preferred_minor = 0xffff;
  1329. rdev->data_offset = le64_to_cpu(sb->data_offset);
  1330. rdev->new_data_offset = rdev->data_offset;
  1331. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
  1332. (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
  1333. rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
  1334. atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
  1335. rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
  1336. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1337. if (rdev->sb_size & bmask)
  1338. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1339. if (minor_version
  1340. && rdev->data_offset < sb_start + (rdev->sb_size/512))
  1341. return -EINVAL;
  1342. if (minor_version
  1343. && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
  1344. return -EINVAL;
  1345. if (sb->level == cpu_to_le32(LEVEL_MULTIPATH))
  1346. rdev->desc_nr = -1;
  1347. else
  1348. rdev->desc_nr = le32_to_cpu(sb->dev_number);
  1349. if (!rdev->bb_page) {
  1350. rdev->bb_page = alloc_page(GFP_KERNEL);
  1351. if (!rdev->bb_page)
  1352. return -ENOMEM;
  1353. }
  1354. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) &&
  1355. rdev->badblocks.count == 0) {
  1356. /* need to load the bad block list.
  1357. * Currently we limit it to one page.
  1358. */
  1359. s32 offset;
  1360. sector_t bb_sector;
  1361. u64 *bbp;
  1362. int i;
  1363. int sectors = le16_to_cpu(sb->bblog_size);
  1364. if (sectors > (PAGE_SIZE / 512))
  1365. return -EINVAL;
  1366. offset = le32_to_cpu(sb->bblog_offset);
  1367. if (offset == 0)
  1368. return -EINVAL;
  1369. bb_sector = (long long)offset;
  1370. if (!sync_page_io(rdev, bb_sector, sectors << 9,
  1371. rdev->bb_page, REQ_OP_READ, 0, true))
  1372. return -EIO;
  1373. bbp = (u64 *)page_address(rdev->bb_page);
  1374. rdev->badblocks.shift = sb->bblog_shift;
  1375. for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) {
  1376. u64 bb = le64_to_cpu(*bbp);
  1377. int count = bb & (0x3ff);
  1378. u64 sector = bb >> 10;
  1379. sector <<= sb->bblog_shift;
  1380. count <<= sb->bblog_shift;
  1381. if (bb + 1 == 0)
  1382. break;
  1383. if (badblocks_set(&rdev->badblocks, sector, count, 1))
  1384. return -EINVAL;
  1385. }
  1386. } else if (sb->bblog_offset != 0)
  1387. rdev->badblocks.shift = 0;
  1388. if ((le32_to_cpu(sb->feature_map) &
  1389. (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) {
  1390. rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
  1391. rdev->ppl.size = le16_to_cpu(sb->ppl.size);
  1392. rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
  1393. }
  1394. if (!refdev) {
  1395. ret = 1;
  1396. } else {
  1397. __u64 ev1, ev2;
  1398. struct mdp_superblock_1 *refsb = page_address(refdev->sb_page);
  1399. if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 ||
  1400. sb->level != refsb->level ||
  1401. sb->layout != refsb->layout ||
  1402. sb->chunksize != refsb->chunksize) {
  1403. pr_warn("md: %s has strangely different superblock to %s\n",
  1404. bdevname(rdev->bdev,b),
  1405. bdevname(refdev->bdev,b2));
  1406. return -EINVAL;
  1407. }
  1408. ev1 = le64_to_cpu(sb->events);
  1409. ev2 = le64_to_cpu(refsb->events);
  1410. if (ev1 > ev2)
  1411. ret = 1;
  1412. else
  1413. ret = 0;
  1414. }
  1415. if (minor_version) {
  1416. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
  1417. sectors -= rdev->data_offset;
  1418. } else
  1419. sectors = rdev->sb_start;
  1420. if (sectors < le64_to_cpu(sb->data_size))
  1421. return -EINVAL;
  1422. rdev->sectors = le64_to_cpu(sb->data_size);
  1423. return ret;
  1424. }
  1425. static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
  1426. {
  1427. struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
  1428. __u64 ev1 = le64_to_cpu(sb->events);
  1429. rdev->raid_disk = -1;
  1430. clear_bit(Faulty, &rdev->flags);
  1431. clear_bit(In_sync, &rdev->flags);
  1432. clear_bit(Bitmap_sync, &rdev->flags);
  1433. clear_bit(WriteMostly, &rdev->flags);
  1434. if (mddev->raid_disks == 0) {
  1435. mddev->major_version = 1;
  1436. mddev->patch_version = 0;
  1437. mddev->external = 0;
  1438. mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
  1439. mddev->ctime = le64_to_cpu(sb->ctime);
  1440. mddev->utime = le64_to_cpu(sb->utime);
  1441. mddev->level = le32_to_cpu(sb->level);
  1442. mddev->clevel[0] = 0;
  1443. mddev->layout = le32_to_cpu(sb->layout);
  1444. mddev->raid_disks = le32_to_cpu(sb->raid_disks);
  1445. mddev->dev_sectors = le64_to_cpu(sb->size);
  1446. mddev->events = ev1;
  1447. mddev->bitmap_info.offset = 0;
  1448. mddev->bitmap_info.space = 0;
  1449. /* Default location for bitmap is 1K after superblock
  1450. * using 3K - total of 4K
  1451. */
  1452. mddev->bitmap_info.default_offset = 1024 >> 9;
  1453. mddev->bitmap_info.default_space = (4096-1024) >> 9;
  1454. mddev->reshape_backwards = 0;
  1455. mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
  1456. memcpy(mddev->uuid, sb->set_uuid, 16);
  1457. mddev->max_disks = (4096-256)/2;
  1458. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) &&
  1459. mddev->bitmap_info.file == NULL) {
  1460. mddev->bitmap_info.offset =
  1461. (__s32)le32_to_cpu(sb->bitmap_offset);
  1462. /* Metadata doesn't record how much space is available.
  1463. * For 1.0, we assume we can use up to the superblock
  1464. * if before, else to 4K beyond superblock.
  1465. * For others, assume no change is possible.
  1466. */
  1467. if (mddev->minor_version > 0)
  1468. mddev->bitmap_info.space = 0;
  1469. else if (mddev->bitmap_info.offset > 0)
  1470. mddev->bitmap_info.space =
  1471. 8 - mddev->bitmap_info.offset;
  1472. else
  1473. mddev->bitmap_info.space =
  1474. -mddev->bitmap_info.offset;
  1475. }
  1476. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
  1477. mddev->reshape_position = le64_to_cpu(sb->reshape_position);
  1478. mddev->delta_disks = le32_to_cpu(sb->delta_disks);
  1479. mddev->new_level = le32_to_cpu(sb->new_level);
  1480. mddev->new_layout = le32_to_cpu(sb->new_layout);
  1481. mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
  1482. if (mddev->delta_disks < 0 ||
  1483. (mddev->delta_disks == 0 &&
  1484. (le32_to_cpu(sb->feature_map)
  1485. & MD_FEATURE_RESHAPE_BACKWARDS)))
  1486. mddev->reshape_backwards = 1;
  1487. } else {
  1488. mddev->reshape_position = MaxSector;
  1489. mddev->delta_disks = 0;
  1490. mddev->new_level = mddev->level;
  1491. mddev->new_layout = mddev->layout;
  1492. mddev->new_chunk_sectors = mddev->chunk_sectors;
  1493. }
  1494. if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)
  1495. set_bit(MD_HAS_JOURNAL, &mddev->flags);
  1496. if (le32_to_cpu(sb->feature_map) &
  1497. (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) {
  1498. if (le32_to_cpu(sb->feature_map) &
  1499. (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL))
  1500. return -EINVAL;
  1501. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) &&
  1502. (le32_to_cpu(sb->feature_map) &
  1503. MD_FEATURE_MULTIPLE_PPLS))
  1504. return -EINVAL;
  1505. set_bit(MD_HAS_PPL, &mddev->flags);
  1506. }
  1507. } else if (mddev->pers == NULL) {
  1508. /* Insist of good event counter while assembling, except for
  1509. * spares (which don't need an event count) */
  1510. ++ev1;
  1511. if (rdev->desc_nr >= 0 &&
  1512. rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
  1513. (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
  1514. le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
  1515. if (ev1 < mddev->events)
  1516. return -EINVAL;
  1517. } else if (mddev->bitmap) {
  1518. /* If adding to array with a bitmap, then we can accept an
  1519. * older device, but not too old.
  1520. */
  1521. if (ev1 < mddev->bitmap->events_cleared)
  1522. return 0;
  1523. if (ev1 < mddev->events)
  1524. set_bit(Bitmap_sync, &rdev->flags);
  1525. } else {
  1526. if (ev1 < mddev->events)
  1527. /* just a hot-add of a new device, leave raid_disk at -1 */
  1528. return 0;
  1529. }
  1530. if (mddev->level != LEVEL_MULTIPATH) {
  1531. int role;
  1532. if (rdev->desc_nr < 0 ||
  1533. rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
  1534. role = MD_DISK_ROLE_SPARE;
  1535. rdev->desc_nr = -1;
  1536. } else
  1537. role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
  1538. switch(role) {
  1539. case MD_DISK_ROLE_SPARE: /* spare */
  1540. break;
  1541. case MD_DISK_ROLE_FAULTY: /* faulty */
  1542. set_bit(Faulty, &rdev->flags);
  1543. break;
  1544. case MD_DISK_ROLE_JOURNAL: /* journal device */
  1545. if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) {
  1546. /* journal device without journal feature */
  1547. pr_warn("md: journal device provided without journal feature, ignoring the device\n");
  1548. return -EINVAL;
  1549. }
  1550. set_bit(Journal, &rdev->flags);
  1551. rdev->journal_tail = le64_to_cpu(sb->journal_tail);
  1552. rdev->raid_disk = 0;
  1553. break;
  1554. default:
  1555. rdev->saved_raid_disk = role;
  1556. if ((le32_to_cpu(sb->feature_map) &
  1557. MD_FEATURE_RECOVERY_OFFSET)) {
  1558. rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
  1559. if (!(le32_to_cpu(sb->feature_map) &
  1560. MD_FEATURE_RECOVERY_BITMAP))
  1561. rdev->saved_raid_disk = -1;
  1562. } else
  1563. set_bit(In_sync, &rdev->flags);
  1564. rdev->raid_disk = role;
  1565. break;
  1566. }
  1567. if (sb->devflags & WriteMostly1)
  1568. set_bit(WriteMostly, &rdev->flags);
  1569. if (sb->devflags & FailFast1)
  1570. set_bit(FailFast, &rdev->flags);
  1571. if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
  1572. set_bit(Replacement, &rdev->flags);
  1573. } else /* MULTIPATH are always insync */
  1574. set_bit(In_sync, &rdev->flags);
  1575. return 0;
  1576. }
  1577. static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
  1578. {
  1579. struct mdp_superblock_1 *sb;
  1580. struct md_rdev *rdev2;
  1581. int max_dev, i;
  1582. /* make rdev->sb match mddev and rdev data. */
  1583. sb = page_address(rdev->sb_page);
  1584. sb->feature_map = 0;
  1585. sb->pad0 = 0;
  1586. sb->recovery_offset = cpu_to_le64(0);
  1587. memset(sb->pad3, 0, sizeof(sb->pad3));
  1588. sb->utime = cpu_to_le64((__u64)mddev->utime);
  1589. sb->events = cpu_to_le64(mddev->events);
  1590. if (mddev->in_sync)
  1591. sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
  1592. else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
  1593. sb->resync_offset = cpu_to_le64(MaxSector);
  1594. else
  1595. sb->resync_offset = cpu_to_le64(0);
  1596. sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
  1597. sb->raid_disks = cpu_to_le32(mddev->raid_disks);
  1598. sb->size = cpu_to_le64(mddev->dev_sectors);
  1599. sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
  1600. sb->level = cpu_to_le32(mddev->level);
  1601. sb->layout = cpu_to_le32(mddev->layout);
  1602. if (test_bit(FailFast, &rdev->flags))
  1603. sb->devflags |= FailFast1;
  1604. else
  1605. sb->devflags &= ~FailFast1;
  1606. if (test_bit(WriteMostly, &rdev->flags))
  1607. sb->devflags |= WriteMostly1;
  1608. else
  1609. sb->devflags &= ~WriteMostly1;
  1610. sb->data_offset = cpu_to_le64(rdev->data_offset);
  1611. sb->data_size = cpu_to_le64(rdev->sectors);
  1612. if (mddev->bitmap && mddev->bitmap_info.file == NULL) {
  1613. sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset);
  1614. sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET);
  1615. }
  1616. if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
  1617. !test_bit(In_sync, &rdev->flags)) {
  1618. sb->feature_map |=
  1619. cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET);
  1620. sb->recovery_offset =
  1621. cpu_to_le64(rdev->recovery_offset);
  1622. if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
  1623. sb->feature_map |=
  1624. cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP);
  1625. }
  1626. /* Note: recovery_offset and journal_tail share space */
  1627. if (test_bit(Journal, &rdev->flags))
  1628. sb->journal_tail = cpu_to_le64(rdev->journal_tail);
  1629. if (test_bit(Replacement, &rdev->flags))
  1630. sb->feature_map |=
  1631. cpu_to_le32(MD_FEATURE_REPLACEMENT);
  1632. if (mddev->reshape_position != MaxSector) {
  1633. sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
  1634. sb->reshape_position = cpu_to_le64(mddev->reshape_position);
  1635. sb->new_layout = cpu_to_le32(mddev->new_layout);
  1636. sb->delta_disks = cpu_to_le32(mddev->delta_disks);
  1637. sb->new_level = cpu_to_le32(mddev->new_level);
  1638. sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
  1639. if (mddev->delta_disks == 0 &&
  1640. mddev->reshape_backwards)
  1641. sb->feature_map
  1642. |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS);
  1643. if (rdev->new_data_offset != rdev->data_offset) {
  1644. sb->feature_map
  1645. |= cpu_to_le32(MD_FEATURE_NEW_OFFSET);
  1646. sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
  1647. - rdev->data_offset));
  1648. }
  1649. }
  1650. if (mddev_is_clustered(mddev))
  1651. sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED);
  1652. if (rdev->badblocks.count == 0)
  1653. /* Nothing to do for bad blocks*/ ;
  1654. else if (sb->bblog_offset == 0)
  1655. /* Cannot record bad blocks on this device */
  1656. md_error(mddev, rdev);
  1657. else {
  1658. struct badblocks *bb = &rdev->badblocks;
  1659. u64 *bbp = (u64 *)page_address(rdev->bb_page);
  1660. u64 *p = bb->page;
  1661. sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS);
  1662. if (bb->changed) {
  1663. unsigned seq;
  1664. retry:
  1665. seq = read_seqbegin(&bb->lock);
  1666. memset(bbp, 0xff, PAGE_SIZE);
  1667. for (i = 0 ; i < bb->count ; i++) {
  1668. u64 internal_bb = p[i];
  1669. u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
  1670. | BB_LEN(internal_bb));
  1671. bbp[i] = cpu_to_le64(store_bb);
  1672. }
  1673. bb->changed = 0;
  1674. if (read_seqretry(&bb->lock, seq))
  1675. goto retry;
  1676. bb->sector = (rdev->sb_start +
  1677. (int)le32_to_cpu(sb->bblog_offset));
  1678. bb->size = le16_to_cpu(sb->bblog_size);
  1679. }
  1680. }
  1681. max_dev = 0;
  1682. rdev_for_each(rdev2, mddev)
  1683. if (rdev2->desc_nr+1 > max_dev)
  1684. max_dev = rdev2->desc_nr+1;
  1685. if (max_dev > le32_to_cpu(sb->max_dev)) {
  1686. int bmask;
  1687. sb->max_dev = cpu_to_le32(max_dev);
  1688. rdev->sb_size = max_dev * 2 + 256;
  1689. bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
  1690. if (rdev->sb_size & bmask)
  1691. rdev->sb_size = (rdev->sb_size | bmask) + 1;
  1692. } else
  1693. max_dev = le32_to_cpu(sb->max_dev);
  1694. for (i=0; i<max_dev;i++)
  1695. sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
  1696. if (test_bit(MD_HAS_JOURNAL, &mddev->flags))
  1697. sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL);
  1698. if (test_bit(MD_HAS_PPL, &mddev->flags)) {
  1699. if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags))
  1700. sb->feature_map |=
  1701. cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS);
  1702. else
  1703. sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL);
  1704. sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
  1705. sb->ppl.size = cpu_to_le16(rdev->ppl.size);
  1706. }
  1707. rdev_for_each(rdev2, mddev) {
  1708. i = rdev2->desc_nr;
  1709. if (test_bit(Faulty, &rdev2->flags))
  1710. sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY);
  1711. else if (test_bit(In_sync, &rdev2->flags))
  1712. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1713. else if (test_bit(Journal, &rdev2->flags))
  1714. sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL);
  1715. else if (rdev2->raid_disk >= 0)
  1716. sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk);
  1717. else
  1718. sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE);
  1719. }
  1720. sb->sb_csum = calc_sb_1_csum(sb);
  1721. }
  1722. static unsigned long long
  1723. super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
  1724. {
  1725. struct mdp_superblock_1 *sb;
  1726. sector_t max_sectors;
  1727. if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
  1728. return 0; /* component must fit device */
  1729. if (rdev->data_offset != rdev->new_data_offset)
  1730. return 0; /* too confusing */
  1731. if (rdev->sb_start < rdev->data_offset) {
  1732. /* minor versions 1 and 2; superblock before data */
  1733. max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
  1734. max_sectors -= rdev->data_offset;
  1735. if (!num_sectors || num_sectors > max_sectors)
  1736. num_sectors = max_sectors;
  1737. } else if (rdev->mddev->bitmap_info.offset) {
  1738. /* minor version 0 with bitmap we can't move */
  1739. return 0;
  1740. } else {
  1741. /* minor version 0; superblock after data */
  1742. sector_t sb_start;
  1743. sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
  1744. sb_start &= ~(sector_t)(4*2 - 1);
  1745. max_sectors = rdev->sectors + sb_start - rdev->sb_start;
  1746. if (!num_sectors || num_sectors > max_sectors)
  1747. num_sectors = max_sectors;
  1748. rdev->sb_start = sb_start;
  1749. }
  1750. sb = page_address(rdev->sb_page);
  1751. sb->data_size = cpu_to_le64(num_sectors);
  1752. sb->super_offset = cpu_to_le64(rdev->sb_start);
  1753. sb->sb_csum = calc_sb_1_csum(sb);
  1754. do {
  1755. md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
  1756. rdev->sb_page);
  1757. } while (md_super_wait(rdev->mddev) < 0);
  1758. return num_sectors;
  1759. }
  1760. static int
  1761. super_1_allow_new_offset(struct md_rdev *rdev,
  1762. unsigned long long new_offset)
  1763. {
  1764. /* All necessary checks on new >= old have been done */
  1765. struct bitmap *bitmap;
  1766. if (new_offset >= rdev->data_offset)
  1767. return 1;
  1768. /* with 1.0 metadata, there is no metadata to tread on
  1769. * so we can always move back */
  1770. if (rdev->mddev->minor_version == 0)
  1771. return 1;
  1772. /* otherwise we must be sure not to step on
  1773. * any metadata, so stay:
  1774. * 36K beyond start of superblock
  1775. * beyond end of badblocks
  1776. * beyond write-intent bitmap
  1777. */
  1778. if (rdev->sb_start + (32+4)*2 > new_offset)
  1779. return 0;
  1780. bitmap = rdev->mddev->bitmap;
  1781. if (bitmap && !rdev->mddev->bitmap_info.file &&
  1782. rdev->sb_start + rdev->mddev->bitmap_info.offset +
  1783. bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset)
  1784. return 0;
  1785. if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
  1786. return 0;
  1787. return 1;
  1788. }
  1789. static struct super_type super_types[] = {
  1790. [0] = {
  1791. .name = "0.90.0",
  1792. .owner = THIS_MODULE,
  1793. .load_super = super_90_load,
  1794. .validate_super = super_90_validate,
  1795. .sync_super = super_90_sync,
  1796. .rdev_size_change = super_90_rdev_size_change,
  1797. .allow_new_offset = super_90_allow_new_offset,
  1798. },
  1799. [1] = {
  1800. .name = "md-1",
  1801. .owner = THIS_MODULE,
  1802. .load_super = super_1_load,
  1803. .validate_super = super_1_validate,
  1804. .sync_super = super_1_sync,
  1805. .rdev_size_change = super_1_rdev_size_change,
  1806. .allow_new_offset = super_1_allow_new_offset,
  1807. },
  1808. };
  1809. static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
  1810. {
  1811. if (mddev->sync_super) {
  1812. mddev->sync_super(mddev, rdev);
  1813. return;
  1814. }
  1815. BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
  1816. super_types[mddev->major_version].sync_super(mddev, rdev);
  1817. }
  1818. static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2)
  1819. {
  1820. struct md_rdev *rdev, *rdev2;
  1821. rcu_read_lock();
  1822. rdev_for_each_rcu(rdev, mddev1) {
  1823. if (test_bit(Faulty, &rdev->flags) ||
  1824. test_bit(Journal, &rdev->flags) ||
  1825. rdev->raid_disk == -1)
  1826. continue;
  1827. rdev_for_each_rcu(rdev2, mddev2) {
  1828. if (test_bit(Faulty, &rdev2->flags) ||
  1829. test_bit(Journal, &rdev2->flags) ||
  1830. rdev2->raid_disk == -1)
  1831. continue;
  1832. if (rdev->bdev->bd_contains ==
  1833. rdev2->bdev->bd_contains) {
  1834. rcu_read_unlock();
  1835. return 1;
  1836. }
  1837. }
  1838. }
  1839. rcu_read_unlock();
  1840. return 0;
  1841. }
  1842. static LIST_HEAD(pending_raid_disks);
  1843. /*
  1844. * Try to register data integrity profile for an mddev
  1845. *
  1846. * This is called when an array is started and after a disk has been kicked
  1847. * from the array. It only succeeds if all working and active component devices
  1848. * are integrity capable with matching profiles.
  1849. */
  1850. int md_integrity_register(struct mddev *mddev)
  1851. {
  1852. struct md_rdev *rdev, *reference = NULL;
  1853. if (list_empty(&mddev->disks))
  1854. return 0; /* nothing to do */
  1855. if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
  1856. return 0; /* shouldn't register, or already is */
  1857. rdev_for_each(rdev, mddev) {
  1858. /* skip spares and non-functional disks */
  1859. if (test_bit(Faulty, &rdev->flags))
  1860. continue;
  1861. if (rdev->raid_disk < 0)
  1862. continue;
  1863. if (!reference) {
  1864. /* Use the first rdev as the reference */
  1865. reference = rdev;
  1866. continue;
  1867. }
  1868. /* does this rdev's profile match the reference profile? */
  1869. if (blk_integrity_compare(reference->bdev->bd_disk,
  1870. rdev->bdev->bd_disk) < 0)
  1871. return -EINVAL;
  1872. }
  1873. if (!reference || !bdev_get_integrity(reference->bdev))
  1874. return 0;
  1875. /*
  1876. * All component devices are integrity capable and have matching
  1877. * profiles, register the common profile for the md device.
  1878. */
  1879. blk_integrity_register(mddev->gendisk,
  1880. bdev_get_integrity(reference->bdev));
  1881. pr_debug("md: data integrity enabled on %s\n", mdname(mddev));
  1882. if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
  1883. pr_err("md: failed to create integrity pool for %s\n",
  1884. mdname(mddev));
  1885. return -EINVAL;
  1886. }
  1887. return 0;
  1888. }
  1889. EXPORT_SYMBOL(md_integrity_register);
  1890. /*
  1891. * Attempt to add an rdev, but only if it is consistent with the current
  1892. * integrity profile
  1893. */
  1894. int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
  1895. {
  1896. struct blk_integrity *bi_rdev;
  1897. struct blk_integrity *bi_mddev;
  1898. char name[BDEVNAME_SIZE];
  1899. if (!mddev->gendisk)
  1900. return 0;
  1901. bi_rdev = bdev_get_integrity(rdev->bdev);
  1902. bi_mddev = blk_get_integrity(mddev->gendisk);
  1903. if (!bi_mddev) /* nothing to do */
  1904. return 0;
  1905. if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
  1906. pr_err("%s: incompatible integrity profile for %s\n",
  1907. mdname(mddev), bdevname(rdev->bdev, name));
  1908. return -ENXIO;
  1909. }
  1910. return 0;
  1911. }
  1912. EXPORT_SYMBOL(md_integrity_add_rdev);
  1913. static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
  1914. {
  1915. char b[BDEVNAME_SIZE];
  1916. struct kobject *ko;
  1917. int err;
  1918. /* prevent duplicates */
  1919. if (find_rdev(mddev, rdev->bdev->bd_dev))
  1920. return -EEXIST;
  1921. if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
  1922. mddev->pers)
  1923. return -EROFS;
  1924. /* make sure rdev->sectors exceeds mddev->dev_sectors */
  1925. if (!test_bit(Journal, &rdev->flags) &&
  1926. rdev->sectors &&
  1927. (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
  1928. if (mddev->pers) {
  1929. /* Cannot change size, so fail
  1930. * If mddev->level <= 0, then we don't care
  1931. * about aligning sizes (e.g. linear)
  1932. */
  1933. if (mddev->level > 0)
  1934. return -ENOSPC;
  1935. } else
  1936. mddev->dev_sectors = rdev->sectors;
  1937. }
  1938. /* Verify rdev->desc_nr is unique.
  1939. * If it is -1, assign a free number, else
  1940. * check number is not in use
  1941. */
  1942. rcu_read_lock();
  1943. if (rdev->desc_nr < 0) {
  1944. int choice = 0;
  1945. if (mddev->pers)
  1946. choice = mddev->raid_disks;
  1947. while (md_find_rdev_nr_rcu(mddev, choice))
  1948. choice++;
  1949. rdev->desc_nr = choice;
  1950. } else {
  1951. if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
  1952. rcu_read_unlock();
  1953. return -EBUSY;
  1954. }
  1955. }
  1956. rcu_read_unlock();
  1957. if (!test_bit(Journal, &rdev->flags) &&
  1958. mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
  1959. pr_warn("md: %s: array is limited to %d devices\n",
  1960. mdname(mddev), mddev->max_disks);
  1961. return -EBUSY;
  1962. }
  1963. bdevname(rdev->bdev,b);
  1964. strreplace(b, '/', '!');
  1965. rdev->mddev = mddev;
  1966. pr_debug("md: bind<%s>\n", b);
  1967. if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
  1968. goto fail;
  1969. ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
  1970. if (sysfs_create_link(&rdev->kobj, ko, "block"))
  1971. /* failure here is OK */;
  1972. rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
  1973. list_add_rcu(&rdev->same_set, &mddev->disks);
  1974. bd_link_disk_holder(rdev->bdev, mddev->gendisk);
  1975. /* May as well allow recovery to be retried once */
  1976. mddev->recovery_disabled++;
  1977. return 0;
  1978. fail:
  1979. pr_warn("md: failed to register dev-%s for %s\n",
  1980. b, mdname(mddev));
  1981. return err;
  1982. }
  1983. static void md_delayed_delete(struct work_struct *ws)
  1984. {
  1985. struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
  1986. kobject_del(&rdev->kobj);
  1987. kobject_put(&rdev->kobj);
  1988. }
  1989. static void unbind_rdev_from_array(struct md_rdev *rdev)
  1990. {
  1991. char b[BDEVNAME_SIZE];
  1992. bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
  1993. list_del_rcu(&rdev->same_set);
  1994. pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
  1995. rdev->mddev = NULL;
  1996. sysfs_remove_link(&rdev->kobj, "block");
  1997. sysfs_put(rdev->sysfs_state);
  1998. rdev->sysfs_state = NULL;
  1999. rdev->badblocks.count = 0;
  2000. /* We need to delay this, otherwise we can deadlock when
  2001. * writing to 'remove' to "dev/state". We also need
  2002. * to delay it due to rcu usage.
  2003. */
  2004. synchronize_rcu();
  2005. INIT_WORK(&rdev->del_work, md_delayed_delete);
  2006. kobject_get(&rdev->kobj);
  2007. queue_work(md_misc_wq, &rdev->del_work);
  2008. }
  2009. /*
  2010. * prevent the device from being mounted, repartitioned or
  2011. * otherwise reused by a RAID array (or any other kernel
  2012. * subsystem), by bd_claiming the device.
  2013. */
  2014. static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
  2015. {
  2016. int err = 0;
  2017. struct block_device *bdev;
  2018. char b[BDEVNAME_SIZE];
  2019. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  2020. shared ? (struct md_rdev *)lock_rdev : rdev);
  2021. if (IS_ERR(bdev)) {
  2022. pr_warn("md: could not open %s.\n", __bdevname(dev, b));
  2023. return PTR_ERR(bdev);
  2024. }
  2025. rdev->bdev = bdev;
  2026. return err;
  2027. }
  2028. static void unlock_rdev(struct md_rdev *rdev)
  2029. {
  2030. struct block_device *bdev = rdev->bdev;
  2031. rdev->bdev = NULL;
  2032. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  2033. }
  2034. void md_autodetect_dev(dev_t dev);
  2035. static void export_rdev(struct md_rdev *rdev)
  2036. {
  2037. char b[BDEVNAME_SIZE];
  2038. pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
  2039. md_rdev_clear(rdev);
  2040. #ifndef MODULE
  2041. if (test_bit(AutoDetected, &rdev->flags))
  2042. md_autodetect_dev(rdev->bdev->bd_dev);
  2043. #endif
  2044. unlock_rdev(rdev);
  2045. kobject_put(&rdev->kobj);
  2046. }
  2047. void md_kick_rdev_from_array(struct md_rdev *rdev)
  2048. {
  2049. unbind_rdev_from_array(rdev);
  2050. export_rdev(rdev);
  2051. }
  2052. EXPORT_SYMBOL_GPL(md_kick_rdev_from_array);
  2053. static void export_array(struct mddev *mddev)
  2054. {
  2055. struct md_rdev *rdev;
  2056. while (!list_empty(&mddev->disks)) {
  2057. rdev = list_first_entry(&mddev->disks, struct md_rdev,
  2058. same_set);
  2059. md_kick_rdev_from_array(rdev);
  2060. }
  2061. mddev->raid_disks = 0;
  2062. mddev->major_version = 0;
  2063. }
  2064. static bool set_in_sync(struct mddev *mddev)
  2065. {
  2066. lockdep_assert_held(&mddev->lock);
  2067. if (!mddev->in_sync) {
  2068. mddev->sync_checkers++;
  2069. spin_unlock(&mddev->lock);
  2070. percpu_ref_switch_to_atomic_sync(&mddev->writes_pending);
  2071. spin_lock(&mddev->lock);
  2072. if (!mddev->in_sync &&
  2073. percpu_ref_is_zero(&mddev->writes_pending)) {
  2074. mddev->in_sync = 1;
  2075. /*
  2076. * Ensure ->in_sync is visible before we clear
  2077. * ->sync_checkers.
  2078. */
  2079. smp_mb();
  2080. set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
  2081. sysfs_notify_dirent_safe(mddev->sysfs_state);
  2082. }
  2083. if (--mddev->sync_checkers == 0)
  2084. percpu_ref_switch_to_percpu(&mddev->writes_pending);
  2085. }
  2086. if (mddev->safemode == 1)
  2087. mddev->safemode = 0;
  2088. return mddev->in_sync;
  2089. }
  2090. static void sync_sbs(struct mddev *mddev, int nospares)
  2091. {
  2092. /* Update each superblock (in-memory image), but
  2093. * if we are allowed to, skip spares which already
  2094. * have the right event counter, or have one earlier
  2095. * (which would mean they aren't being marked as dirty
  2096. * with the rest of the array)
  2097. */
  2098. struct md_rdev *rdev;
  2099. rdev_for_each(rdev, mddev) {
  2100. if (rdev->sb_events == mddev->events ||
  2101. (nospares &&
  2102. rdev->raid_disk < 0 &&
  2103. rdev->sb_events+1 == mddev->events)) {
  2104. /* Don't update this superblock */
  2105. rdev->sb_loaded = 2;
  2106. } else {
  2107. sync_super(mddev, rdev);
  2108. rdev->sb_loaded = 1;
  2109. }
  2110. }
  2111. }
  2112. static bool does_sb_need_changing(struct mddev *mddev)
  2113. {
  2114. struct md_rdev *rdev;
  2115. struct mdp_superblock_1 *sb;
  2116. int role;
  2117. /* Find a good rdev */
  2118. rdev_for_each(rdev, mddev)
  2119. if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags))
  2120. break;
  2121. /* No good device found. */
  2122. if (!rdev)
  2123. return false;
  2124. sb = page_address(rdev->sb_page);
  2125. /* Check if a device has become faulty or a spare become active */
  2126. rdev_for_each(rdev, mddev) {
  2127. role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
  2128. /* Device activated? */
  2129. if (role == 0xffff && rdev->raid_disk >=0 &&
  2130. !test_bit(Faulty, &rdev->flags))
  2131. return true;
  2132. /* Device turned faulty? */
  2133. if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
  2134. return true;
  2135. }
  2136. /* Check if any mddev parameters have changed */
  2137. if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
  2138. (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
  2139. (mddev->layout != le32_to_cpu(sb->layout)) ||
  2140. (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
  2141. (mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
  2142. return true;
  2143. return false;
  2144. }
  2145. void md_update_sb(struct mddev *mddev, int force_change)
  2146. {
  2147. struct md_rdev *rdev;
  2148. int sync_req;
  2149. int nospares = 0;
  2150. int any_badblocks_changed = 0;
  2151. int ret = -1;
  2152. if (mddev->ro) {
  2153. if (force_change)
  2154. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  2155. return;
  2156. }
  2157. repeat:
  2158. if (mddev_is_clustered(mddev)) {
  2159. if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
  2160. force_change = 1;
  2161. if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
  2162. nospares = 1;
  2163. ret = md_cluster_ops->metadata_update_start(mddev);
  2164. /* Has someone else has updated the sb */
  2165. if (!does_sb_need_changing(mddev)) {
  2166. if (ret == 0)
  2167. md_cluster_ops->metadata_update_cancel(mddev);
  2168. bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
  2169. BIT(MD_SB_CHANGE_DEVS) |
  2170. BIT(MD_SB_CHANGE_CLEAN));
  2171. return;
  2172. }
  2173. }
  2174. /*
  2175. * First make sure individual recovery_offsets are correct
  2176. * curr_resync_completed can only be used during recovery.
  2177. * During reshape/resync it might use array-addresses rather
  2178. * that device addresses.
  2179. */
  2180. rdev_for_each(rdev, mddev) {
  2181. if (rdev->raid_disk >= 0 &&
  2182. mddev->delta_disks >= 0 &&
  2183. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
  2184. test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) &&
  2185. !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  2186. !test_bit(Journal, &rdev->flags) &&
  2187. !test_bit(In_sync, &rdev->flags) &&
  2188. mddev->curr_resync_completed > rdev->recovery_offset)
  2189. rdev->recovery_offset = mddev->curr_resync_completed;
  2190. }
  2191. if (!mddev->persistent) {
  2192. clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
  2193. clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  2194. if (!mddev->external) {
  2195. clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
  2196. rdev_for_each(rdev, mddev) {
  2197. if (rdev->badblocks.changed) {
  2198. rdev->badblocks.changed = 0;
  2199. ack_all_badblocks(&rdev->badblocks);
  2200. md_error(mddev, rdev);
  2201. }
  2202. clear_bit(Blocked, &rdev->flags);
  2203. clear_bit(BlockedBadBlocks, &rdev->flags);
  2204. wake_up(&rdev->blocked_wait);
  2205. }
  2206. }
  2207. wake_up(&mddev->sb_wait);
  2208. return;
  2209. }
  2210. spin_lock(&mddev->lock);
  2211. mddev->utime = ktime_get_real_seconds();
  2212. if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags))
  2213. force_change = 1;
  2214. if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags))
  2215. /* just a clean<-> dirty transition, possibly leave spares alone,
  2216. * though if events isn't the right even/odd, we will have to do
  2217. * spares after all
  2218. */
  2219. nospares = 1;
  2220. if (force_change)
  2221. nospares = 0;
  2222. if (mddev->degraded)
  2223. /* If the array is degraded, then skipping spares is both
  2224. * dangerous and fairly pointless.
  2225. * Dangerous because a device that was removed from the array
  2226. * might have a event_count that still looks up-to-date,
  2227. * so it can be re-added without a resync.
  2228. * Pointless because if there are any spares to skip,
  2229. * then a recovery will happen and soon that array won't
  2230. * be degraded any more and the spare can go back to sleep then.
  2231. */
  2232. nospares = 0;
  2233. sync_req = mddev->in_sync;
  2234. /* If this is just a dirty<->clean transition, and the array is clean
  2235. * and 'events' is odd, we can roll back to the previous clean state */
  2236. if (nospares
  2237. && (mddev->in_sync && mddev->recovery_cp == MaxSector)
  2238. && mddev->can_decrease_events
  2239. && mddev->events != 1) {
  2240. mddev->events--;
  2241. mddev->can_decrease_events = 0;
  2242. } else {
  2243. /* otherwise we have to go forward and ... */
  2244. mddev->events ++;
  2245. mddev->can_decrease_events = nospares;
  2246. }
  2247. /*
  2248. * This 64-bit counter should never wrap.
  2249. * Either we are in around ~1 trillion A.C., assuming
  2250. * 1 reboot per second, or we have a bug...
  2251. */
  2252. WARN_ON(mddev->events == 0);
  2253. rdev_for_each(rdev, mddev) {
  2254. if (rdev->badblocks.changed)
  2255. any_badblocks_changed++;
  2256. if (test_bit(Faulty, &rdev->flags))
  2257. set_bit(FaultRecorded, &rdev->flags);
  2258. }
  2259. sync_sbs(mddev, nospares);
  2260. spin_unlock(&mddev->lock);
  2261. pr_debug("md: updating %s RAID superblock on device (in sync %d)\n",
  2262. mdname(mddev), mddev->in_sync);
  2263. if (mddev->queue)
  2264. blk_add_trace_msg(mddev->queue, "md md_update_sb");
  2265. rewrite:
  2266. bitmap_update_sb(mddev->bitmap);
  2267. rdev_for_each(rdev, mddev) {
  2268. char b[BDEVNAME_SIZE];
  2269. if (rdev->sb_loaded != 1)
  2270. continue; /* no noise on spare devices */
  2271. if (!test_bit(Faulty, &rdev->flags)) {
  2272. md_super_write(mddev,rdev,
  2273. rdev->sb_start, rdev->sb_size,
  2274. rdev->sb_page);
  2275. pr_debug("md: (write) %s's sb offset: %llu\n",
  2276. bdevname(rdev->bdev, b),
  2277. (unsigned long long)rdev->sb_start);
  2278. rdev->sb_events = mddev->events;
  2279. if (rdev->badblocks.size) {
  2280. md_super_write(mddev, rdev,
  2281. rdev->badblocks.sector,
  2282. rdev->badblocks.size << 9,
  2283. rdev->bb_page);
  2284. rdev->badblocks.size = 0;
  2285. }
  2286. } else
  2287. pr_debug("md: %s (skipping faulty)\n",
  2288. bdevname(rdev->bdev, b));
  2289. if (mddev->level == LEVEL_MULTIPATH)
  2290. /* only need to write one superblock... */
  2291. break;
  2292. }
  2293. if (md_super_wait(mddev) < 0)
  2294. goto rewrite;
  2295. /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */
  2296. if (mddev_is_clustered(mddev) && ret == 0)
  2297. md_cluster_ops->metadata_update_finish(mddev);
  2298. if (mddev->in_sync != sync_req ||
  2299. !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING),
  2300. BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN)))
  2301. /* have to write it out again */
  2302. goto repeat;
  2303. wake_up(&mddev->sb_wait);
  2304. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  2305. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  2306. rdev_for_each(rdev, mddev) {
  2307. if (test_and_clear_bit(FaultRecorded, &rdev->flags))
  2308. clear_bit(Blocked, &rdev->flags);
  2309. if (any_badblocks_changed)
  2310. ack_all_badblocks(&rdev->badblocks);
  2311. clear_bit(BlockedBadBlocks, &rdev->flags);
  2312. wake_up(&rdev->blocked_wait);
  2313. }
  2314. }
  2315. EXPORT_SYMBOL(md_update_sb);
  2316. static int add_bound_rdev(struct md_rdev *rdev)
  2317. {
  2318. struct mddev *mddev = rdev->mddev;
  2319. int err = 0;
  2320. bool add_journal = test_bit(Journal, &rdev->flags);
  2321. if (!mddev->pers->hot_remove_disk || add_journal) {
  2322. /* If there is hot_add_disk but no hot_remove_disk
  2323. * then added disks for geometry changes,
  2324. * and should be added immediately.
  2325. */
  2326. super_types[mddev->major_version].
  2327. validate_super(mddev, rdev);
  2328. if (add_journal)
  2329. mddev_suspend(mddev);
  2330. err = mddev->pers->hot_add_disk(mddev, rdev);
  2331. if (add_journal)
  2332. mddev_resume(mddev);
  2333. if (err) {
  2334. md_kick_rdev_from_array(rdev);
  2335. return err;
  2336. }
  2337. }
  2338. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2339. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  2340. if (mddev->degraded)
  2341. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  2342. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  2343. md_new_event(mddev);
  2344. md_wakeup_thread(mddev->thread);
  2345. return 0;
  2346. }
  2347. /* words written to sysfs files may, or may not, be \n terminated.
  2348. * We want to accept with case. For this we use cmd_match.
  2349. */
  2350. static int cmd_match(const char *cmd, const char *str)
  2351. {
  2352. /* See if cmd, written into a sysfs file, matches
  2353. * str. They must either be the same, or cmd can
  2354. * have a trailing newline
  2355. */
  2356. while (*cmd && *str && *cmd == *str) {
  2357. cmd++;
  2358. str++;
  2359. }
  2360. if (*cmd == '\n')
  2361. cmd++;
  2362. if (*str || *cmd)
  2363. return 0;
  2364. return 1;
  2365. }
  2366. struct rdev_sysfs_entry {
  2367. struct attribute attr;
  2368. ssize_t (*show)(struct md_rdev *, char *);
  2369. ssize_t (*store)(struct md_rdev *, const char *, size_t);
  2370. };
  2371. static ssize_t
  2372. state_show(struct md_rdev *rdev, char *page)
  2373. {
  2374. char *sep = ",";
  2375. size_t len = 0;
  2376. unsigned long flags = READ_ONCE(rdev->flags);
  2377. if (test_bit(Faulty, &flags) ||
  2378. (!test_bit(ExternalBbl, &flags) &&
  2379. rdev->badblocks.unacked_exist))
  2380. len += sprintf(page+len, "faulty%s", sep);
  2381. if (test_bit(In_sync, &flags))
  2382. len += sprintf(page+len, "in_sync%s", sep);
  2383. if (test_bit(Journal, &flags))
  2384. len += sprintf(page+len, "journal%s", sep);
  2385. if (test_bit(WriteMostly, &flags))
  2386. len += sprintf(page+len, "write_mostly%s", sep);
  2387. if (test_bit(Blocked, &flags) ||
  2388. (rdev->badblocks.unacked_exist
  2389. && !test_bit(Faulty, &flags)))
  2390. len += sprintf(page+len, "blocked%s", sep);
  2391. if (!test_bit(Faulty, &flags) &&
  2392. !test_bit(Journal, &flags) &&
  2393. !test_bit(In_sync, &flags))
  2394. len += sprintf(page+len, "spare%s", sep);
  2395. if (test_bit(WriteErrorSeen, &flags))
  2396. len += sprintf(page+len, "write_error%s", sep);
  2397. if (test_bit(WantReplacement, &flags))
  2398. len += sprintf(page+len, "want_replacement%s", sep);
  2399. if (test_bit(Replacement, &flags))
  2400. len += sprintf(page+len, "replacement%s", sep);
  2401. if (test_bit(ExternalBbl, &flags))
  2402. len += sprintf(page+len, "external_bbl%s", sep);
  2403. if (test_bit(FailFast, &flags))
  2404. len += sprintf(page+len, "failfast%s", sep);
  2405. if (len)
  2406. len -= strlen(sep);
  2407. return len+sprintf(page+len, "\n");
  2408. }
  2409. static ssize_t
  2410. state_store(struct md_rdev *rdev, const char *buf, size_t len)
  2411. {
  2412. /* can write
  2413. * faulty - simulates an error
  2414. * remove - disconnects the device
  2415. * writemostly - sets write_mostly
  2416. * -writemostly - clears write_mostly
  2417. * blocked - sets the Blocked flags
  2418. * -blocked - clears the Blocked and possibly simulates an error
  2419. * insync - sets Insync providing device isn't active
  2420. * -insync - clear Insync for a device with a slot assigned,
  2421. * so that it gets rebuilt based on bitmap
  2422. * write_error - sets WriteErrorSeen
  2423. * -write_error - clears WriteErrorSeen
  2424. * {,-}failfast - set/clear FailFast
  2425. */
  2426. int err = -EINVAL;
  2427. if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
  2428. md_error(rdev->mddev, rdev);
  2429. if (test_bit(Faulty, &rdev->flags))
  2430. err = 0;
  2431. else
  2432. err = -EBUSY;
  2433. } else if (cmd_match(buf, "remove")) {
  2434. if (rdev->mddev->pers) {
  2435. clear_bit(Blocked, &rdev->flags);
  2436. remove_and_add_spares(rdev->mddev, rdev);
  2437. }
  2438. if (rdev->raid_disk >= 0)
  2439. err = -EBUSY;
  2440. else {
  2441. struct mddev *mddev = rdev->mddev;
  2442. err = 0;
  2443. if (mddev_is_clustered(mddev))
  2444. err = md_cluster_ops->remove_disk(mddev, rdev);
  2445. if (err == 0) {
  2446. md_kick_rdev_from_array(rdev);
  2447. if (mddev->pers) {
  2448. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  2449. md_wakeup_thread(mddev->thread);
  2450. }
  2451. md_new_event(mddev);
  2452. }
  2453. }
  2454. } else if (cmd_match(buf, "writemostly")) {
  2455. set_bit(WriteMostly, &rdev->flags);
  2456. err = 0;
  2457. } else if (cmd_match(buf, "-writemostly")) {
  2458. clear_bit(WriteMostly, &rdev->flags);
  2459. err = 0;
  2460. } else if (cmd_match(buf, "blocked")) {
  2461. set_bit(Blocked, &rdev->flags);
  2462. err = 0;
  2463. } else if (cmd_match(buf, "-blocked")) {
  2464. if (!test_bit(Faulty, &rdev->flags) &&
  2465. !test_bit(ExternalBbl, &rdev->flags) &&
  2466. rdev->badblocks.unacked_exist) {
  2467. /* metadata handler doesn't understand badblocks,
  2468. * so we need to fail the device
  2469. */
  2470. md_error(rdev->mddev, rdev);
  2471. }
  2472. clear_bit(Blocked, &rdev->flags);
  2473. clear_bit(BlockedBadBlocks, &rdev->flags);
  2474. wake_up(&rdev->blocked_wait);
  2475. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2476. md_wakeup_thread(rdev->mddev->thread);
  2477. err = 0;
  2478. } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
  2479. set_bit(In_sync, &rdev->flags);
  2480. err = 0;
  2481. } else if (cmd_match(buf, "failfast")) {
  2482. set_bit(FailFast, &rdev->flags);
  2483. err = 0;
  2484. } else if (cmd_match(buf, "-failfast")) {
  2485. clear_bit(FailFast, &rdev->flags);
  2486. err = 0;
  2487. } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
  2488. !test_bit(Journal, &rdev->flags)) {
  2489. if (rdev->mddev->pers == NULL) {
  2490. clear_bit(In_sync, &rdev->flags);
  2491. rdev->saved_raid_disk = rdev->raid_disk;
  2492. rdev->raid_disk = -1;
  2493. err = 0;
  2494. }
  2495. } else if (cmd_match(buf, "write_error")) {
  2496. set_bit(WriteErrorSeen, &rdev->flags);
  2497. err = 0;
  2498. } else if (cmd_match(buf, "-write_error")) {
  2499. clear_bit(WriteErrorSeen, &rdev->flags);
  2500. err = 0;
  2501. } else if (cmd_match(buf, "want_replacement")) {
  2502. /* Any non-spare device that is not a replacement can
  2503. * become want_replacement at any time, but we then need to
  2504. * check if recovery is needed.
  2505. */
  2506. if (rdev->raid_disk >= 0 &&
  2507. !test_bit(Journal, &rdev->flags) &&
  2508. !test_bit(Replacement, &rdev->flags))
  2509. set_bit(WantReplacement, &rdev->flags);
  2510. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2511. md_wakeup_thread(rdev->mddev->thread);
  2512. err = 0;
  2513. } else if (cmd_match(buf, "-want_replacement")) {
  2514. /* Clearing 'want_replacement' is always allowed.
  2515. * Once replacements starts it is too late though.
  2516. */
  2517. err = 0;
  2518. clear_bit(WantReplacement, &rdev->flags);
  2519. } else if (cmd_match(buf, "replacement")) {
  2520. /* Can only set a device as a replacement when array has not
  2521. * yet been started. Once running, replacement is automatic
  2522. * from spares, or by assigning 'slot'.
  2523. */
  2524. if (rdev->mddev->pers)
  2525. err = -EBUSY;
  2526. else {
  2527. set_bit(Replacement, &rdev->flags);
  2528. err = 0;
  2529. }
  2530. } else if (cmd_match(buf, "-replacement")) {
  2531. /* Similarly, can only clear Replacement before start */
  2532. if (rdev->mddev->pers)
  2533. err = -EBUSY;
  2534. else {
  2535. clear_bit(Replacement, &rdev->flags);
  2536. err = 0;
  2537. }
  2538. } else if (cmd_match(buf, "re-add")) {
  2539. if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
  2540. /* clear_bit is performed _after_ all the devices
  2541. * have their local Faulty bit cleared. If any writes
  2542. * happen in the meantime in the local node, they
  2543. * will land in the local bitmap, which will be synced
  2544. * by this node eventually
  2545. */
  2546. if (!mddev_is_clustered(rdev->mddev) ||
  2547. (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
  2548. clear_bit(Faulty, &rdev->flags);
  2549. err = add_bound_rdev(rdev);
  2550. }
  2551. } else
  2552. err = -EBUSY;
  2553. } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
  2554. set_bit(ExternalBbl, &rdev->flags);
  2555. rdev->badblocks.shift = 0;
  2556. err = 0;
  2557. } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
  2558. clear_bit(ExternalBbl, &rdev->flags);
  2559. err = 0;
  2560. }
  2561. if (!err)
  2562. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2563. return err ? err : len;
  2564. }
  2565. static struct rdev_sysfs_entry rdev_state =
  2566. __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store);
  2567. static ssize_t
  2568. errors_show(struct md_rdev *rdev, char *page)
  2569. {
  2570. return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
  2571. }
  2572. static ssize_t
  2573. errors_store(struct md_rdev *rdev, const char *buf, size_t len)
  2574. {
  2575. unsigned int n;
  2576. int rv;
  2577. rv = kstrtouint(buf, 10, &n);
  2578. if (rv < 0)
  2579. return rv;
  2580. atomic_set(&rdev->corrected_errors, n);
  2581. return len;
  2582. }
  2583. static struct rdev_sysfs_entry rdev_errors =
  2584. __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store);
  2585. static ssize_t
  2586. slot_show(struct md_rdev *rdev, char *page)
  2587. {
  2588. if (test_bit(Journal, &rdev->flags))
  2589. return sprintf(page, "journal\n");
  2590. else if (rdev->raid_disk < 0)
  2591. return sprintf(page, "none\n");
  2592. else
  2593. return sprintf(page, "%d\n", rdev->raid_disk);
  2594. }
  2595. static ssize_t
  2596. slot_store(struct md_rdev *rdev, const char *buf, size_t len)
  2597. {
  2598. int slot;
  2599. int err;
  2600. if (test_bit(Journal, &rdev->flags))
  2601. return -EBUSY;
  2602. if (strncmp(buf, "none", 4)==0)
  2603. slot = -1;
  2604. else {
  2605. err = kstrtouint(buf, 10, (unsigned int *)&slot);
  2606. if (err < 0)
  2607. return err;
  2608. }
  2609. if (rdev->mddev->pers && slot == -1) {
  2610. /* Setting 'slot' on an active array requires also
  2611. * updating the 'rd%d' link, and communicating
  2612. * with the personality with ->hot_*_disk.
  2613. * For now we only support removing
  2614. * failed/spare devices. This normally happens automatically,
  2615. * but not when the metadata is externally managed.
  2616. */
  2617. if (rdev->raid_disk == -1)
  2618. return -EEXIST;
  2619. /* personality does all needed checks */
  2620. if (rdev->mddev->pers->hot_remove_disk == NULL)
  2621. return -EINVAL;
  2622. clear_bit(Blocked, &rdev->flags);
  2623. remove_and_add_spares(rdev->mddev, rdev);
  2624. if (rdev->raid_disk >= 0)
  2625. return -EBUSY;
  2626. set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
  2627. md_wakeup_thread(rdev->mddev->thread);
  2628. } else if (rdev->mddev->pers) {
  2629. /* Activating a spare .. or possibly reactivating
  2630. * if we ever get bitmaps working here.
  2631. */
  2632. int err;
  2633. if (rdev->raid_disk != -1)
  2634. return -EBUSY;
  2635. if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
  2636. return -EBUSY;
  2637. if (rdev->mddev->pers->hot_add_disk == NULL)
  2638. return -EINVAL;
  2639. if (slot >= rdev->mddev->raid_disks &&
  2640. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2641. return -ENOSPC;
  2642. rdev->raid_disk = slot;
  2643. if (test_bit(In_sync, &rdev->flags))
  2644. rdev->saved_raid_disk = slot;
  2645. else
  2646. rdev->saved_raid_disk = -1;
  2647. clear_bit(In_sync, &rdev->flags);
  2648. clear_bit(Bitmap_sync, &rdev->flags);
  2649. err = rdev->mddev->pers->
  2650. hot_add_disk(rdev->mddev, rdev);
  2651. if (err) {
  2652. rdev->raid_disk = -1;
  2653. return err;
  2654. } else
  2655. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2656. if (sysfs_link_rdev(rdev->mddev, rdev))
  2657. /* failure here is OK */;
  2658. /* don't wakeup anyone, leave that to userspace. */
  2659. } else {
  2660. if (slot >= rdev->mddev->raid_disks &&
  2661. slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
  2662. return -ENOSPC;
  2663. rdev->raid_disk = slot;
  2664. /* assume it is working */
  2665. clear_bit(Faulty, &rdev->flags);
  2666. clear_bit(WriteMostly, &rdev->flags);
  2667. set_bit(In_sync, &rdev->flags);
  2668. sysfs_notify_dirent_safe(rdev->sysfs_state);
  2669. }
  2670. return len;
  2671. }
  2672. static struct rdev_sysfs_entry rdev_slot =
  2673. __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store);
  2674. static ssize_t
  2675. offset_show(struct md_rdev *rdev, char *page)
  2676. {
  2677. return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
  2678. }
  2679. static ssize_t
  2680. offset_store(struct md_rdev *rdev, const char *buf, size_t len)
  2681. {
  2682. unsigned long long offset;
  2683. if (kstrtoull(buf, 10, &offset) < 0)
  2684. return -EINVAL;
  2685. if (rdev->mddev->pers && rdev->raid_disk >= 0)
  2686. return -EBUSY;
  2687. if (rdev->sectors && rdev->mddev->external)
  2688. /* Must set offset before size, so overlap checks
  2689. * can be sane */
  2690. return -EBUSY;
  2691. rdev->data_offset = offset;
  2692. rdev->new_data_offset = offset;
  2693. return len;
  2694. }
  2695. static struct rdev_sysfs_entry rdev_offset =
  2696. __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store);
  2697. static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
  2698. {
  2699. return sprintf(page, "%llu\n",
  2700. (unsigned long long)rdev->new_data_offset);
  2701. }
  2702. static ssize_t new_offset_store(struct md_rdev *rdev,
  2703. const char *buf, size_t len)
  2704. {
  2705. unsigned long long new_offset;
  2706. struct mddev *mddev = rdev->mddev;
  2707. if (kstrtoull(buf, 10, &new_offset) < 0)
  2708. return -EINVAL;
  2709. if (mddev->sync_thread ||
  2710. test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
  2711. return -EBUSY;
  2712. if (new_offset == rdev->data_offset)
  2713. /* reset is always permitted */
  2714. ;
  2715. else if (new_offset > rdev->data_offset) {
  2716. /* must not push array size beyond rdev_sectors */
  2717. if (new_offset - rdev->data_offset
  2718. + mddev->dev_sectors > rdev->sectors)
  2719. return -E2BIG;
  2720. }
  2721. /* Metadata worries about other space details. */
  2722. /* decreasing the offset is inconsistent with a backwards
  2723. * reshape.
  2724. */
  2725. if (new_offset < rdev->data_offset &&
  2726. mddev->reshape_backwards)
  2727. return -EINVAL;
  2728. /* Increasing offset is inconsistent with forwards
  2729. * reshape. reshape_direction should be set to
  2730. * 'backwards' first.
  2731. */
  2732. if (new_offset > rdev->data_offset &&
  2733. !mddev->reshape_backwards)
  2734. return -EINVAL;
  2735. if (mddev->pers && mddev->persistent &&
  2736. !super_types[mddev->major_version]
  2737. .allow_new_offset(rdev, new_offset))
  2738. return -E2BIG;
  2739. rdev->new_data_offset = new_offset;
  2740. if (new_offset > rdev->data_offset)
  2741. mddev->reshape_backwards = 1;
  2742. else if (new_offset < rdev->data_offset)
  2743. mddev->reshape_backwards = 0;
  2744. return len;
  2745. }
  2746. static struct rdev_sysfs_entry rdev_new_offset =
  2747. __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store);
  2748. static ssize_t
  2749. rdev_size_show(struct md_rdev *rdev, char *page)
  2750. {
  2751. return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
  2752. }
  2753. static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
  2754. {
  2755. /* check if two start/length pairs overlap */
  2756. if (s1+l1 <= s2)
  2757. return 0;
  2758. if (s2+l2 <= s1)
  2759. return 0;
  2760. return 1;
  2761. }
  2762. static int strict_blocks_to_sectors(const char *buf, sector_t *sectors)
  2763. {
  2764. unsigned long long blocks;
  2765. sector_t new;
  2766. if (kstrtoull(buf, 10, &blocks) < 0)
  2767. return -EINVAL;
  2768. if (blocks & 1ULL << (8 * sizeof(blocks) - 1))
  2769. return -EINVAL; /* sector conversion overflow */
  2770. new = blocks * 2;
  2771. if (new != blocks * 2)
  2772. return -EINVAL; /* unsigned long long to sector_t overflow */
  2773. *sectors = new;
  2774. return 0;
  2775. }
  2776. static ssize_t
  2777. rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
  2778. {
  2779. struct mddev *my_mddev = rdev->mddev;
  2780. sector_t oldsectors = rdev->sectors;
  2781. sector_t sectors;
  2782. if (test_bit(Journal, &rdev->flags))
  2783. return -EBUSY;
  2784. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  2785. return -EINVAL;
  2786. if (rdev->data_offset != rdev->new_data_offset)
  2787. return -EINVAL; /* too confusing */
  2788. if (my_mddev->pers && rdev->raid_disk >= 0) {
  2789. if (my_mddev->persistent) {
  2790. sectors = super_types[my_mddev->major_version].
  2791. rdev_size_change(rdev, sectors);
  2792. if (!sectors)
  2793. return -EBUSY;
  2794. } else if (!sectors)
  2795. sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
  2796. rdev->data_offset;
  2797. if (!my_mddev->pers->resize)
  2798. /* Cannot change size for RAID0 or Linear etc */
  2799. return -EINVAL;
  2800. }
  2801. if (sectors < my_mddev->dev_sectors)
  2802. return -EINVAL; /* component must fit device */
  2803. rdev->sectors = sectors;
  2804. if (sectors > oldsectors && my_mddev->external) {
  2805. /* Need to check that all other rdevs with the same
  2806. * ->bdev do not overlap. 'rcu' is sufficient to walk
  2807. * the rdev lists safely.
  2808. * This check does not provide a hard guarantee, it
  2809. * just helps avoid dangerous mistakes.
  2810. */
  2811. struct mddev *mddev;
  2812. int overlap = 0;
  2813. struct list_head *tmp;
  2814. rcu_read_lock();
  2815. for_each_mddev(mddev, tmp) {
  2816. struct md_rdev *rdev2;
  2817. rdev_for_each(rdev2, mddev)
  2818. if (rdev->bdev == rdev2->bdev &&
  2819. rdev != rdev2 &&
  2820. overlaps(rdev->data_offset, rdev->sectors,
  2821. rdev2->data_offset,
  2822. rdev2->sectors)) {
  2823. overlap = 1;
  2824. break;
  2825. }
  2826. if (overlap) {
  2827. mddev_put(mddev);
  2828. break;
  2829. }
  2830. }
  2831. rcu_read_unlock();
  2832. if (overlap) {
  2833. /* Someone else could have slipped in a size
  2834. * change here, but doing so is just silly.
  2835. * We put oldsectors back because we *know* it is
  2836. * safe, and trust userspace not to race with
  2837. * itself
  2838. */
  2839. rdev->sectors = oldsectors;
  2840. return -EBUSY;
  2841. }
  2842. }
  2843. return len;
  2844. }
  2845. static struct rdev_sysfs_entry rdev_size =
  2846. __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store);
  2847. static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
  2848. {
  2849. unsigned long long recovery_start = rdev->recovery_offset;
  2850. if (test_bit(In_sync, &rdev->flags) ||
  2851. recovery_start == MaxSector)
  2852. return sprintf(page, "none\n");
  2853. return sprintf(page, "%llu\n", recovery_start);
  2854. }
  2855. static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
  2856. {
  2857. unsigned long long recovery_start;
  2858. if (cmd_match(buf, "none"))
  2859. recovery_start = MaxSector;
  2860. else if (kstrtoull(buf, 10, &recovery_start))
  2861. return -EINVAL;
  2862. if (rdev->mddev->pers &&
  2863. rdev->raid_disk >= 0)
  2864. return -EBUSY;
  2865. rdev->recovery_offset = recovery_start;
  2866. if (recovery_start == MaxSector)
  2867. set_bit(In_sync, &rdev->flags);
  2868. else
  2869. clear_bit(In_sync, &rdev->flags);
  2870. return len;
  2871. }
  2872. static struct rdev_sysfs_entry rdev_recovery_start =
  2873. __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store);
  2874. /* sysfs access to bad-blocks list.
  2875. * We present two files.
  2876. * 'bad-blocks' lists sector numbers and lengths of ranges that
  2877. * are recorded as bad. The list is truncated to fit within
  2878. * the one-page limit of sysfs.
  2879. * Writing "sector length" to this file adds an acknowledged
  2880. * bad block list.
  2881. * 'unacknowledged-bad-blocks' lists bad blocks that have not yet
  2882. * been acknowledged. Writing to this file adds bad blocks
  2883. * without acknowledging them. This is largely for testing.
  2884. */
  2885. static ssize_t bb_show(struct md_rdev *rdev, char *page)
  2886. {
  2887. return badblocks_show(&rdev->badblocks, page, 0);
  2888. }
  2889. static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
  2890. {
  2891. int rv = badblocks_store(&rdev->badblocks, page, len, 0);
  2892. /* Maybe that ack was all we needed */
  2893. if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
  2894. wake_up(&rdev->blocked_wait);
  2895. return rv;
  2896. }
  2897. static struct rdev_sysfs_entry rdev_bad_blocks =
  2898. __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store);
  2899. static ssize_t ubb_show(struct md_rdev *rdev, char *page)
  2900. {
  2901. return badblocks_show(&rdev->badblocks, page, 1);
  2902. }
  2903. static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
  2904. {
  2905. return badblocks_store(&rdev->badblocks, page, len, 1);
  2906. }
  2907. static struct rdev_sysfs_entry rdev_unack_bad_blocks =
  2908. __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store);
  2909. static ssize_t
  2910. ppl_sector_show(struct md_rdev *rdev, char *page)
  2911. {
  2912. return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
  2913. }
  2914. static ssize_t
  2915. ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
  2916. {
  2917. unsigned long long sector;
  2918. if (kstrtoull(buf, 10, &sector) < 0)
  2919. return -EINVAL;
  2920. if (sector != (sector_t)sector)
  2921. return -EINVAL;
  2922. if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
  2923. rdev->raid_disk >= 0)
  2924. return -EBUSY;
  2925. if (rdev->mddev->persistent) {
  2926. if (rdev->mddev->major_version == 0)
  2927. return -EINVAL;
  2928. if ((sector > rdev->sb_start &&
  2929. sector - rdev->sb_start > S16_MAX) ||
  2930. (sector < rdev->sb_start &&
  2931. rdev->sb_start - sector > -S16_MIN))
  2932. return -EINVAL;
  2933. rdev->ppl.offset = sector - rdev->sb_start;
  2934. } else if (!rdev->mddev->external) {
  2935. return -EBUSY;
  2936. }
  2937. rdev->ppl.sector = sector;
  2938. return len;
  2939. }
  2940. static struct rdev_sysfs_entry rdev_ppl_sector =
  2941. __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store);
  2942. static ssize_t
  2943. ppl_size_show(struct md_rdev *rdev, char *page)
  2944. {
  2945. return sprintf(page, "%u\n", rdev->ppl.size);
  2946. }
  2947. static ssize_t
  2948. ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
  2949. {
  2950. unsigned int size;
  2951. if (kstrtouint(buf, 10, &size) < 0)
  2952. return -EINVAL;
  2953. if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
  2954. rdev->raid_disk >= 0)
  2955. return -EBUSY;
  2956. if (rdev->mddev->persistent) {
  2957. if (rdev->mddev->major_version == 0)
  2958. return -EINVAL;
  2959. if (size > U16_MAX)
  2960. return -EINVAL;
  2961. } else if (!rdev->mddev->external) {
  2962. return -EBUSY;
  2963. }
  2964. rdev->ppl.size = size;
  2965. return len;
  2966. }
  2967. static struct rdev_sysfs_entry rdev_ppl_size =
  2968. __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store);
  2969. static struct attribute *rdev_default_attrs[] = {
  2970. &rdev_state.attr,
  2971. &rdev_errors.attr,
  2972. &rdev_slot.attr,
  2973. &rdev_offset.attr,
  2974. &rdev_new_offset.attr,
  2975. &rdev_size.attr,
  2976. &rdev_recovery_start.attr,
  2977. &rdev_bad_blocks.attr,
  2978. &rdev_unack_bad_blocks.attr,
  2979. &rdev_ppl_sector.attr,
  2980. &rdev_ppl_size.attr,
  2981. NULL,
  2982. };
  2983. static ssize_t
  2984. rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  2985. {
  2986. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2987. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  2988. if (!entry->show)
  2989. return -EIO;
  2990. if (!rdev->mddev)
  2991. return -EBUSY;
  2992. return entry->show(rdev, page);
  2993. }
  2994. static ssize_t
  2995. rdev_attr_store(struct kobject *kobj, struct attribute *attr,
  2996. const char *page, size_t length)
  2997. {
  2998. struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
  2999. struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
  3000. ssize_t rv;
  3001. struct mddev *mddev = rdev->mddev;
  3002. if (!entry->store)
  3003. return -EIO;
  3004. if (!capable(CAP_SYS_ADMIN))
  3005. return -EACCES;
  3006. rv = mddev ? mddev_lock(mddev): -EBUSY;
  3007. if (!rv) {
  3008. if (rdev->mddev == NULL)
  3009. rv = -EBUSY;
  3010. else
  3011. rv = entry->store(rdev, page, length);
  3012. mddev_unlock(mddev);
  3013. }
  3014. return rv;
  3015. }
  3016. static void rdev_free(struct kobject *ko)
  3017. {
  3018. struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
  3019. kfree(rdev);
  3020. }
  3021. static const struct sysfs_ops rdev_sysfs_ops = {
  3022. .show = rdev_attr_show,
  3023. .store = rdev_attr_store,
  3024. };
  3025. static struct kobj_type rdev_ktype = {
  3026. .release = rdev_free,
  3027. .sysfs_ops = &rdev_sysfs_ops,
  3028. .default_attrs = rdev_default_attrs,
  3029. };
  3030. int md_rdev_init(struct md_rdev *rdev)
  3031. {
  3032. rdev->desc_nr = -1;
  3033. rdev->saved_raid_disk = -1;
  3034. rdev->raid_disk = -1;
  3035. rdev->flags = 0;
  3036. rdev->data_offset = 0;
  3037. rdev->new_data_offset = 0;
  3038. rdev->sb_events = 0;
  3039. rdev->last_read_error = 0;
  3040. rdev->sb_loaded = 0;
  3041. rdev->bb_page = NULL;
  3042. atomic_set(&rdev->nr_pending, 0);
  3043. atomic_set(&rdev->read_errors, 0);
  3044. atomic_set(&rdev->corrected_errors, 0);
  3045. INIT_LIST_HEAD(&rdev->same_set);
  3046. init_waitqueue_head(&rdev->blocked_wait);
  3047. /* Add space to store bad block list.
  3048. * This reserves the space even on arrays where it cannot
  3049. * be used - I wonder if that matters
  3050. */
  3051. return badblocks_init(&rdev->badblocks, 0);
  3052. }
  3053. EXPORT_SYMBOL_GPL(md_rdev_init);
  3054. /*
  3055. * Import a device. If 'super_format' >= 0, then sanity check the superblock
  3056. *
  3057. * mark the device faulty if:
  3058. *
  3059. * - the device is nonexistent (zero size)
  3060. * - the device has no valid superblock
  3061. *
  3062. * a faulty rdev _never_ has rdev->sb set.
  3063. */
  3064. static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
  3065. {
  3066. char b[BDEVNAME_SIZE];
  3067. int err;
  3068. struct md_rdev *rdev;
  3069. sector_t size;
  3070. rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
  3071. if (!rdev)
  3072. return ERR_PTR(-ENOMEM);
  3073. err = md_rdev_init(rdev);
  3074. if (err)
  3075. goto abort_free;
  3076. err = alloc_disk_sb(rdev);
  3077. if (err)
  3078. goto abort_free;
  3079. err = lock_rdev(rdev, newdev, super_format == -2);
  3080. if (err)
  3081. goto abort_free;
  3082. kobject_init(&rdev->kobj, &rdev_ktype);
  3083. size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
  3084. if (!size) {
  3085. pr_warn("md: %s has zero or unknown size, marking faulty!\n",
  3086. bdevname(rdev->bdev,b));
  3087. err = -EINVAL;
  3088. goto abort_free;
  3089. }
  3090. if (super_format >= 0) {
  3091. err = super_types[super_format].
  3092. load_super(rdev, NULL, super_minor);
  3093. if (err == -EINVAL) {
  3094. pr_warn("md: %s does not have a valid v%d.%d superblock, not importing!\n",
  3095. bdevname(rdev->bdev,b),
  3096. super_format, super_minor);
  3097. goto abort_free;
  3098. }
  3099. if (err < 0) {
  3100. pr_warn("md: could not read %s's sb, not importing!\n",
  3101. bdevname(rdev->bdev,b));
  3102. goto abort_free;
  3103. }
  3104. }
  3105. return rdev;
  3106. abort_free:
  3107. if (rdev->bdev)
  3108. unlock_rdev(rdev);
  3109. md_rdev_clear(rdev);
  3110. kfree(rdev);
  3111. return ERR_PTR(err);
  3112. }
  3113. /*
  3114. * Check a full RAID array for plausibility
  3115. */
  3116. static void analyze_sbs(struct mddev *mddev)
  3117. {
  3118. int i;
  3119. struct md_rdev *rdev, *freshest, *tmp;
  3120. char b[BDEVNAME_SIZE];
  3121. freshest = NULL;
  3122. rdev_for_each_safe(rdev, tmp, mddev)
  3123. switch (super_types[mddev->major_version].
  3124. load_super(rdev, freshest, mddev->minor_version)) {
  3125. case 1:
  3126. freshest = rdev;
  3127. break;
  3128. case 0:
  3129. break;
  3130. default:
  3131. pr_warn("md: fatal superblock inconsistency in %s -- removing from array\n",
  3132. bdevname(rdev->bdev,b));
  3133. md_kick_rdev_from_array(rdev);
  3134. }
  3135. super_types[mddev->major_version].
  3136. validate_super(mddev, freshest);
  3137. i = 0;
  3138. rdev_for_each_safe(rdev, tmp, mddev) {
  3139. if (mddev->max_disks &&
  3140. (rdev->desc_nr >= mddev->max_disks ||
  3141. i > mddev->max_disks)) {
  3142. pr_warn("md: %s: %s: only %d devices permitted\n",
  3143. mdname(mddev), bdevname(rdev->bdev, b),
  3144. mddev->max_disks);
  3145. md_kick_rdev_from_array(rdev);
  3146. continue;
  3147. }
  3148. if (rdev != freshest) {
  3149. if (super_types[mddev->major_version].
  3150. validate_super(mddev, rdev)) {
  3151. pr_warn("md: kicking non-fresh %s from array!\n",
  3152. bdevname(rdev->bdev,b));
  3153. md_kick_rdev_from_array(rdev);
  3154. continue;
  3155. }
  3156. }
  3157. if (mddev->level == LEVEL_MULTIPATH) {
  3158. rdev->desc_nr = i++;
  3159. rdev->raid_disk = rdev->desc_nr;
  3160. set_bit(In_sync, &rdev->flags);
  3161. } else if (rdev->raid_disk >=
  3162. (mddev->raid_disks - min(0, mddev->delta_disks)) &&
  3163. !test_bit(Journal, &rdev->flags)) {
  3164. rdev->raid_disk = -1;
  3165. clear_bit(In_sync, &rdev->flags);
  3166. }
  3167. }
  3168. }
  3169. /* Read a fixed-point number.
  3170. * Numbers in sysfs attributes should be in "standard" units where
  3171. * possible, so time should be in seconds.
  3172. * However we internally use a a much smaller unit such as
  3173. * milliseconds or jiffies.
  3174. * This function takes a decimal number with a possible fractional
  3175. * component, and produces an integer which is the result of
  3176. * multiplying that number by 10^'scale'.
  3177. * all without any floating-point arithmetic.
  3178. */
  3179. int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale)
  3180. {
  3181. unsigned long result = 0;
  3182. long decimals = -1;
  3183. while (isdigit(*cp) || (*cp == '.' && decimals < 0)) {
  3184. if (*cp == '.')
  3185. decimals = 0;
  3186. else if (decimals < scale) {
  3187. unsigned int value;
  3188. value = *cp - '0';
  3189. result = result * 10 + value;
  3190. if (decimals >= 0)
  3191. decimals++;
  3192. }
  3193. cp++;
  3194. }
  3195. if (*cp == '\n')
  3196. cp++;
  3197. if (*cp)
  3198. return -EINVAL;
  3199. if (decimals < 0)
  3200. decimals = 0;
  3201. while (decimals < scale) {
  3202. result *= 10;
  3203. decimals ++;
  3204. }
  3205. *res = result;
  3206. return 0;
  3207. }
  3208. static ssize_t
  3209. safe_delay_show(struct mddev *mddev, char *page)
  3210. {
  3211. int msec = (mddev->safemode_delay*1000)/HZ;
  3212. return sprintf(page, "%d.%03d\n", msec/1000, msec%1000);
  3213. }
  3214. static ssize_t
  3215. safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len)
  3216. {
  3217. unsigned long msec;
  3218. if (mddev_is_clustered(mddev)) {
  3219. pr_warn("md: Safemode is disabled for clustered mode\n");
  3220. return -EINVAL;
  3221. }
  3222. if (strict_strtoul_scaled(cbuf, &msec, 3) < 0)
  3223. return -EINVAL;
  3224. if (msec == 0)
  3225. mddev->safemode_delay = 0;
  3226. else {
  3227. unsigned long old_delay = mddev->safemode_delay;
  3228. unsigned long new_delay = (msec*HZ)/1000;
  3229. if (new_delay == 0)
  3230. new_delay = 1;
  3231. mddev->safemode_delay = new_delay;
  3232. if (new_delay < old_delay || old_delay == 0)
  3233. mod_timer(&mddev->safemode_timer, jiffies+1);
  3234. }
  3235. return len;
  3236. }
  3237. static struct md_sysfs_entry md_safe_delay =
  3238. __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store);
  3239. static ssize_t
  3240. level_show(struct mddev *mddev, char *page)
  3241. {
  3242. struct md_personality *p;
  3243. int ret;
  3244. spin_lock(&mddev->lock);
  3245. p = mddev->pers;
  3246. if (p)
  3247. ret = sprintf(page, "%s\n", p->name);
  3248. else if (mddev->clevel[0])
  3249. ret = sprintf(page, "%s\n", mddev->clevel);
  3250. else if (mddev->level != LEVEL_NONE)
  3251. ret = sprintf(page, "%d\n", mddev->level);
  3252. else
  3253. ret = 0;
  3254. spin_unlock(&mddev->lock);
  3255. return ret;
  3256. }
  3257. static ssize_t
  3258. level_store(struct mddev *mddev, const char *buf, size_t len)
  3259. {
  3260. char clevel[16];
  3261. ssize_t rv;
  3262. size_t slen = len;
  3263. struct md_personality *pers, *oldpers;
  3264. long level;
  3265. void *priv, *oldpriv;
  3266. struct md_rdev *rdev;
  3267. if (slen == 0 || slen >= sizeof(clevel))
  3268. return -EINVAL;
  3269. rv = mddev_lock(mddev);
  3270. if (rv)
  3271. return rv;
  3272. if (mddev->pers == NULL) {
  3273. strncpy(mddev->clevel, buf, slen);
  3274. if (mddev->clevel[slen-1] == '\n')
  3275. slen--;
  3276. mddev->clevel[slen] = 0;
  3277. mddev->level = LEVEL_NONE;
  3278. rv = len;
  3279. goto out_unlock;
  3280. }
  3281. rv = -EROFS;
  3282. if (mddev->ro)
  3283. goto out_unlock;
  3284. /* request to change the personality. Need to ensure:
  3285. * - array is not engaged in resync/recovery/reshape
  3286. * - old personality can be suspended
  3287. * - new personality will access other array.
  3288. */
  3289. rv = -EBUSY;
  3290. if (mddev->sync_thread ||
  3291. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  3292. mddev->reshape_position != MaxSector ||
  3293. mddev->sysfs_active)
  3294. goto out_unlock;
  3295. rv = -EINVAL;
  3296. if (!mddev->pers->quiesce) {
  3297. pr_warn("md: %s: %s does not support online personality change\n",
  3298. mdname(mddev), mddev->pers->name);
  3299. goto out_unlock;
  3300. }
  3301. /* Now find the new personality */
  3302. strncpy(clevel, buf, slen);
  3303. if (clevel[slen-1] == '\n')
  3304. slen--;
  3305. clevel[slen] = 0;
  3306. if (kstrtol(clevel, 10, &level))
  3307. level = LEVEL_NONE;
  3308. if (request_module("md-%s", clevel) != 0)
  3309. request_module("md-level-%s", clevel);
  3310. spin_lock(&pers_lock);
  3311. pers = find_pers(level, clevel);
  3312. if (!pers || !try_module_get(pers->owner)) {
  3313. spin_unlock(&pers_lock);
  3314. pr_warn("md: personality %s not loaded\n", clevel);
  3315. rv = -EINVAL;
  3316. goto out_unlock;
  3317. }
  3318. spin_unlock(&pers_lock);
  3319. if (pers == mddev->pers) {
  3320. /* Nothing to do! */
  3321. module_put(pers->owner);
  3322. rv = len;
  3323. goto out_unlock;
  3324. }
  3325. if (!pers->takeover) {
  3326. module_put(pers->owner);
  3327. pr_warn("md: %s: %s does not support personality takeover\n",
  3328. mdname(mddev), clevel);
  3329. rv = -EINVAL;
  3330. goto out_unlock;
  3331. }
  3332. rdev_for_each(rdev, mddev)
  3333. rdev->new_raid_disk = rdev->raid_disk;
  3334. /* ->takeover must set new_* and/or delta_disks
  3335. * if it succeeds, and may set them when it fails.
  3336. */
  3337. priv = pers->takeover(mddev);
  3338. if (IS_ERR(priv)) {
  3339. mddev->new_level = mddev->level;
  3340. mddev->new_layout = mddev->layout;
  3341. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3342. mddev->raid_disks -= mddev->delta_disks;
  3343. mddev->delta_disks = 0;
  3344. mddev->reshape_backwards = 0;
  3345. module_put(pers->owner);
  3346. pr_warn("md: %s: %s would not accept array\n",
  3347. mdname(mddev), clevel);
  3348. rv = PTR_ERR(priv);
  3349. goto out_unlock;
  3350. }
  3351. /* Looks like we have a winner */
  3352. mddev_suspend(mddev);
  3353. mddev_detach(mddev);
  3354. spin_lock(&mddev->lock);
  3355. oldpers = mddev->pers;
  3356. oldpriv = mddev->private;
  3357. mddev->pers = pers;
  3358. mddev->private = priv;
  3359. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  3360. mddev->level = mddev->new_level;
  3361. mddev->layout = mddev->new_layout;
  3362. mddev->chunk_sectors = mddev->new_chunk_sectors;
  3363. mddev->delta_disks = 0;
  3364. mddev->reshape_backwards = 0;
  3365. mddev->degraded = 0;
  3366. spin_unlock(&mddev->lock);
  3367. if (oldpers->sync_request == NULL &&
  3368. mddev->external) {
  3369. /* We are converting from a no-redundancy array
  3370. * to a redundancy array and metadata is managed
  3371. * externally so we need to be sure that writes
  3372. * won't block due to a need to transition
  3373. * clean->dirty
  3374. * until external management is started.
  3375. */
  3376. mddev->in_sync = 0;
  3377. mddev->safemode_delay = 0;
  3378. mddev->safemode = 0;
  3379. }
  3380. oldpers->free(mddev, oldpriv);
  3381. if (oldpers->sync_request == NULL &&
  3382. pers->sync_request != NULL) {
  3383. /* need to add the md_redundancy_group */
  3384. if (sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  3385. pr_warn("md: cannot register extra attributes for %s\n",
  3386. mdname(mddev));
  3387. mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action");
  3388. }
  3389. if (oldpers->sync_request != NULL &&
  3390. pers->sync_request == NULL) {
  3391. /* need to remove the md_redundancy_group */
  3392. if (mddev->to_remove == NULL)
  3393. mddev->to_remove = &md_redundancy_group;
  3394. }
  3395. module_put(oldpers->owner);
  3396. rdev_for_each(rdev, mddev) {
  3397. if (rdev->raid_disk < 0)
  3398. continue;
  3399. if (rdev->new_raid_disk >= mddev->raid_disks)
  3400. rdev->new_raid_disk = -1;
  3401. if (rdev->new_raid_disk == rdev->raid_disk)
  3402. continue;
  3403. sysfs_unlink_rdev(mddev, rdev);
  3404. }
  3405. rdev_for_each(rdev, mddev) {
  3406. if (rdev->raid_disk < 0)
  3407. continue;
  3408. if (rdev->new_raid_disk == rdev->raid_disk)
  3409. continue;
  3410. rdev->raid_disk = rdev->new_raid_disk;
  3411. if (rdev->raid_disk < 0)
  3412. clear_bit(In_sync, &rdev->flags);
  3413. else {
  3414. if (sysfs_link_rdev(mddev, rdev))
  3415. pr_warn("md: cannot register rd%d for %s after level change\n",
  3416. rdev->raid_disk, mdname(mddev));
  3417. }
  3418. }
  3419. if (pers->sync_request == NULL) {
  3420. /* this is now an array without redundancy, so
  3421. * it must always be in_sync
  3422. */
  3423. mddev->in_sync = 1;
  3424. del_timer_sync(&mddev->safemode_timer);
  3425. }
  3426. blk_set_stacking_limits(&mddev->queue->limits);
  3427. pers->run(mddev);
  3428. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  3429. mddev_resume(mddev);
  3430. if (!mddev->thread)
  3431. md_update_sb(mddev, 1);
  3432. sysfs_notify(&mddev->kobj, NULL, "level");
  3433. md_new_event(mddev);
  3434. rv = len;
  3435. out_unlock:
  3436. mddev_unlock(mddev);
  3437. return rv;
  3438. }
  3439. static struct md_sysfs_entry md_level =
  3440. __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store);
  3441. static ssize_t
  3442. layout_show(struct mddev *mddev, char *page)
  3443. {
  3444. /* just a number, not meaningful for all levels */
  3445. if (mddev->reshape_position != MaxSector &&
  3446. mddev->layout != mddev->new_layout)
  3447. return sprintf(page, "%d (%d)\n",
  3448. mddev->new_layout, mddev->layout);
  3449. return sprintf(page, "%d\n", mddev->layout);
  3450. }
  3451. static ssize_t
  3452. layout_store(struct mddev *mddev, const char *buf, size_t len)
  3453. {
  3454. unsigned int n;
  3455. int err;
  3456. err = kstrtouint(buf, 10, &n);
  3457. if (err < 0)
  3458. return err;
  3459. err = mddev_lock(mddev);
  3460. if (err)
  3461. return err;
  3462. if (mddev->pers) {
  3463. if (mddev->pers->check_reshape == NULL)
  3464. err = -EBUSY;
  3465. else if (mddev->ro)
  3466. err = -EROFS;
  3467. else {
  3468. mddev->new_layout = n;
  3469. err = mddev->pers->check_reshape(mddev);
  3470. if (err)
  3471. mddev->new_layout = mddev->layout;
  3472. }
  3473. } else {
  3474. mddev->new_layout = n;
  3475. if (mddev->reshape_position == MaxSector)
  3476. mddev->layout = n;
  3477. }
  3478. mddev_unlock(mddev);
  3479. return err ?: len;
  3480. }
  3481. static struct md_sysfs_entry md_layout =
  3482. __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store);
  3483. static ssize_t
  3484. raid_disks_show(struct mddev *mddev, char *page)
  3485. {
  3486. if (mddev->raid_disks == 0)
  3487. return 0;
  3488. if (mddev->reshape_position != MaxSector &&
  3489. mddev->delta_disks != 0)
  3490. return sprintf(page, "%d (%d)\n", mddev->raid_disks,
  3491. mddev->raid_disks - mddev->delta_disks);
  3492. return sprintf(page, "%d\n", mddev->raid_disks);
  3493. }
  3494. static int update_raid_disks(struct mddev *mddev, int raid_disks);
  3495. static ssize_t
  3496. raid_disks_store(struct mddev *mddev, const char *buf, size_t len)
  3497. {
  3498. unsigned int n;
  3499. int err;
  3500. err = kstrtouint(buf, 10, &n);
  3501. if (err < 0)
  3502. return err;
  3503. err = mddev_lock(mddev);
  3504. if (err)
  3505. return err;
  3506. if (mddev->pers)
  3507. err = update_raid_disks(mddev, n);
  3508. else if (mddev->reshape_position != MaxSector) {
  3509. struct md_rdev *rdev;
  3510. int olddisks = mddev->raid_disks - mddev->delta_disks;
  3511. err = -EINVAL;
  3512. rdev_for_each(rdev, mddev) {
  3513. if (olddisks < n &&
  3514. rdev->data_offset < rdev->new_data_offset)
  3515. goto out_unlock;
  3516. if (olddisks > n &&
  3517. rdev->data_offset > rdev->new_data_offset)
  3518. goto out_unlock;
  3519. }
  3520. err = 0;
  3521. mddev->delta_disks = n - olddisks;
  3522. mddev->raid_disks = n;
  3523. mddev->reshape_backwards = (mddev->delta_disks < 0);
  3524. } else
  3525. mddev->raid_disks = n;
  3526. out_unlock:
  3527. mddev_unlock(mddev);
  3528. return err ? err : len;
  3529. }
  3530. static struct md_sysfs_entry md_raid_disks =
  3531. __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store);
  3532. static ssize_t
  3533. chunk_size_show(struct mddev *mddev, char *page)
  3534. {
  3535. if (mddev->reshape_position != MaxSector &&
  3536. mddev->chunk_sectors != mddev->new_chunk_sectors)
  3537. return sprintf(page, "%d (%d)\n",
  3538. mddev->new_chunk_sectors << 9,
  3539. mddev->chunk_sectors << 9);
  3540. return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
  3541. }
  3542. static ssize_t
  3543. chunk_size_store(struct mddev *mddev, const char *buf, size_t len)
  3544. {
  3545. unsigned long n;
  3546. int err;
  3547. err = kstrtoul(buf, 10, &n);
  3548. if (err < 0)
  3549. return err;
  3550. err = mddev_lock(mddev);
  3551. if (err)
  3552. return err;
  3553. if (mddev->pers) {
  3554. if (mddev->pers->check_reshape == NULL)
  3555. err = -EBUSY;
  3556. else if (mddev->ro)
  3557. err = -EROFS;
  3558. else {
  3559. mddev->new_chunk_sectors = n >> 9;
  3560. err = mddev->pers->check_reshape(mddev);
  3561. if (err)
  3562. mddev->new_chunk_sectors = mddev->chunk_sectors;
  3563. }
  3564. } else {
  3565. mddev->new_chunk_sectors = n >> 9;
  3566. if (mddev->reshape_position == MaxSector)
  3567. mddev->chunk_sectors = n >> 9;
  3568. }
  3569. mddev_unlock(mddev);
  3570. return err ?: len;
  3571. }
  3572. static struct md_sysfs_entry md_chunk_size =
  3573. __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
  3574. static ssize_t
  3575. resync_start_show(struct mddev *mddev, char *page)
  3576. {
  3577. if (mddev->recovery_cp == MaxSector)
  3578. return sprintf(page, "none\n");
  3579. return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
  3580. }
  3581. static ssize_t
  3582. resync_start_store(struct mddev *mddev, const char *buf, size_t len)
  3583. {
  3584. unsigned long long n;
  3585. int err;
  3586. if (cmd_match(buf, "none"))
  3587. n = MaxSector;
  3588. else {
  3589. err = kstrtoull(buf, 10, &n);
  3590. if (err < 0)
  3591. return err;
  3592. if (n != (sector_t)n)
  3593. return -EINVAL;
  3594. }
  3595. err = mddev_lock(mddev);
  3596. if (err)
  3597. return err;
  3598. if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  3599. err = -EBUSY;
  3600. if (!err) {
  3601. mddev->recovery_cp = n;
  3602. if (mddev->pers)
  3603. set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
  3604. }
  3605. mddev_unlock(mddev);
  3606. return err ?: len;
  3607. }
  3608. static struct md_sysfs_entry md_resync_start =
  3609. __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR,
  3610. resync_start_show, resync_start_store);
  3611. /*
  3612. * The array state can be:
  3613. *
  3614. * clear
  3615. * No devices, no size, no level
  3616. * Equivalent to STOP_ARRAY ioctl
  3617. * inactive
  3618. * May have some settings, but array is not active
  3619. * all IO results in error
  3620. * When written, doesn't tear down array, but just stops it
  3621. * suspended (not supported yet)
  3622. * All IO requests will block. The array can be reconfigured.
  3623. * Writing this, if accepted, will block until array is quiescent
  3624. * readonly
  3625. * no resync can happen. no superblocks get written.
  3626. * write requests fail
  3627. * read-auto
  3628. * like readonly, but behaves like 'clean' on a write request.
  3629. *
  3630. * clean - no pending writes, but otherwise active.
  3631. * When written to inactive array, starts without resync
  3632. * If a write request arrives then
  3633. * if metadata is known, mark 'dirty' and switch to 'active'.
  3634. * if not known, block and switch to write-pending
  3635. * If written to an active array that has pending writes, then fails.
  3636. * active
  3637. * fully active: IO and resync can be happening.
  3638. * When written to inactive array, starts with resync
  3639. *
  3640. * write-pending
  3641. * clean, but writes are blocked waiting for 'active' to be written.
  3642. *
  3643. * active-idle
  3644. * like active, but no writes have been seen for a while (100msec).
  3645. *
  3646. */
  3647. enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active,
  3648. write_pending, active_idle, bad_word};
  3649. static char *array_states[] = {
  3650. "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active",
  3651. "write-pending", "active-idle", NULL };
  3652. static int match_word(const char *word, char **list)
  3653. {
  3654. int n;
  3655. for (n=0; list[n]; n++)
  3656. if (cmd_match(word, list[n]))
  3657. break;
  3658. return n;
  3659. }
  3660. static ssize_t
  3661. array_state_show(struct mddev *mddev, char *page)
  3662. {
  3663. enum array_state st = inactive;
  3664. if (mddev->pers)
  3665. switch(mddev->ro) {
  3666. case 1:
  3667. st = readonly;
  3668. break;
  3669. case 2:
  3670. st = read_auto;
  3671. break;
  3672. case 0:
  3673. spin_lock(&mddev->lock);
  3674. if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
  3675. st = write_pending;
  3676. else if (mddev->in_sync)
  3677. st = clean;
  3678. else if (mddev->safemode)
  3679. st = active_idle;
  3680. else
  3681. st = active;
  3682. spin_unlock(&mddev->lock);
  3683. }
  3684. else {
  3685. if (list_empty(&mddev->disks) &&
  3686. mddev->raid_disks == 0 &&
  3687. mddev->dev_sectors == 0)
  3688. st = clear;
  3689. else
  3690. st = inactive;
  3691. }
  3692. return sprintf(page, "%s\n", array_states[st]);
  3693. }
  3694. static int do_md_stop(struct mddev *mddev, int ro, struct block_device *bdev);
  3695. static int md_set_readonly(struct mddev *mddev, struct block_device *bdev);
  3696. static int do_md_run(struct mddev *mddev);
  3697. static int restart_array(struct mddev *mddev);
  3698. static ssize_t
  3699. array_state_store(struct mddev *mddev, const char *buf, size_t len)
  3700. {
  3701. int err = 0;
  3702. enum array_state st = match_word(buf, array_states);
  3703. if (mddev->pers && (st == active || st == clean) && mddev->ro != 1) {
  3704. /* don't take reconfig_mutex when toggling between
  3705. * clean and active
  3706. */
  3707. spin_lock(&mddev->lock);
  3708. if (st == active) {
  3709. restart_array(mddev);
  3710. clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
  3711. md_wakeup_thread(mddev->thread);
  3712. wake_up(&mddev->sb_wait);
  3713. } else /* st == clean */ {
  3714. restart_array(mddev);
  3715. if (!set_in_sync(mddev))
  3716. err = -EBUSY;
  3717. }
  3718. if (!err)
  3719. sysfs_notify_dirent_safe(mddev->sysfs_state);
  3720. spin_unlock(&mddev->lock);
  3721. return err ?: len;
  3722. }
  3723. err = mddev_lock(mddev);
  3724. if (err)
  3725. return err;
  3726. err = -EINVAL;
  3727. switch(st) {
  3728. case bad_word:
  3729. break;
  3730. case clear:
  3731. /* stopping an active array */
  3732. err = do_md_stop(mddev, 0, NULL);
  3733. break;
  3734. case inactive:
  3735. /* stopping an active array */
  3736. if (mddev->pers)
  3737. err = do_md_stop(mddev, 2, NULL);
  3738. else
  3739. err = 0; /* already inactive */
  3740. break;
  3741. case suspended:
  3742. break; /* not supported yet */
  3743. case readonly:
  3744. if (mddev->pers)
  3745. err = md_set_readonly(mddev, NULL);
  3746. else {
  3747. mddev->ro = 1;
  3748. set_disk_ro(mddev->gendisk, 1);
  3749. err = do_md_run(mddev);
  3750. }
  3751. break;
  3752. case read_auto:
  3753. if (mddev->pers) {
  3754. if (mddev->ro == 0)
  3755. err = md_set_readonly(mddev, NULL);
  3756. else if (mddev->ro == 1)
  3757. err = restart_array(mddev);
  3758. if (err == 0) {
  3759. mddev->ro = 2;
  3760. set_disk_ro(mddev->gendisk, 0);
  3761. }
  3762. } else {
  3763. mddev->ro = 2;
  3764. err = do_md_run(mddev);
  3765. }
  3766. break;
  3767. case clean:
  3768. if (mddev->pers) {
  3769. err = restart_array(mddev);
  3770. if (err)
  3771. break;
  3772. spin_lock(&mddev->lock);
  3773. if (!set_in_sync(mddev))
  3774. err = -EBUSY;
  3775. spin_unlock(&mddev->lock);
  3776. } else
  3777. err = -EINVAL;
  3778. break;
  3779. case active:
  3780. if (mddev->pers) {
  3781. err = restart_array(mddev);
  3782. if (err)
  3783. break;
  3784. clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
  3785. wake_up(&mddev->sb_wait);
  3786. err = 0;
  3787. } else {
  3788. mddev->ro = 0;
  3789. set_disk_ro(mddev->gendisk, 0);
  3790. err = do_md_run(mddev);
  3791. }
  3792. break;
  3793. case write_pending:
  3794. case active_idle:
  3795. /* these cannot be set */
  3796. break;
  3797. }
  3798. if (!err) {
  3799. if (mddev->hold_active == UNTIL_IOCTL)
  3800. mddev->hold_active = 0;
  3801. sysfs_notify_dirent_safe(mddev->sysfs_state);
  3802. }
  3803. mddev_unlock(mddev);
  3804. return err ?: len;
  3805. }
  3806. static struct md_sysfs_entry md_array_state =
  3807. __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store);
  3808. static ssize_t
  3809. max_corrected_read_errors_show(struct mddev *mddev, char *page) {
  3810. return sprintf(page, "%d\n",
  3811. atomic_read(&mddev->max_corr_read_errors));
  3812. }
  3813. static ssize_t
  3814. max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len)
  3815. {
  3816. unsigned int n;
  3817. int rv;
  3818. rv = kstrtouint(buf, 10, &n);
  3819. if (rv < 0)
  3820. return rv;
  3821. atomic_set(&mddev->max_corr_read_errors, n);
  3822. return len;
  3823. }
  3824. static struct md_sysfs_entry max_corr_read_errors =
  3825. __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show,
  3826. max_corrected_read_errors_store);
  3827. static ssize_t
  3828. null_show(struct mddev *mddev, char *page)
  3829. {
  3830. return -EINVAL;
  3831. }
  3832. static ssize_t
  3833. new_dev_store(struct mddev *mddev, const char *buf, size_t len)
  3834. {
  3835. /* buf must be %d:%d\n? giving major and minor numbers */
  3836. /* The new device is added to the array.
  3837. * If the array has a persistent superblock, we read the
  3838. * superblock to initialise info and check validity.
  3839. * Otherwise, only checking done is that in bind_rdev_to_array,
  3840. * which mainly checks size.
  3841. */
  3842. char *e;
  3843. int major = simple_strtoul(buf, &e, 10);
  3844. int minor;
  3845. dev_t dev;
  3846. struct md_rdev *rdev;
  3847. int err;
  3848. if (!*buf || *e != ':' || !e[1] || e[1] == '\n')
  3849. return -EINVAL;
  3850. minor = simple_strtoul(e+1, &e, 10);
  3851. if (*e && *e != '\n')
  3852. return -EINVAL;
  3853. dev = MKDEV(major, minor);
  3854. if (major != MAJOR(dev) ||
  3855. minor != MINOR(dev))
  3856. return -EOVERFLOW;
  3857. flush_workqueue(md_misc_wq);
  3858. err = mddev_lock(mddev);
  3859. if (err)
  3860. return err;
  3861. if (mddev->persistent) {
  3862. rdev = md_import_device(dev, mddev->major_version,
  3863. mddev->minor_version);
  3864. if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
  3865. struct md_rdev *rdev0
  3866. = list_entry(mddev->disks.next,
  3867. struct md_rdev, same_set);
  3868. err = super_types[mddev->major_version]
  3869. .load_super(rdev, rdev0, mddev->minor_version);
  3870. if (err < 0)
  3871. goto out;
  3872. }
  3873. } else if (mddev->external)
  3874. rdev = md_import_device(dev, -2, -1);
  3875. else
  3876. rdev = md_import_device(dev, -1, -1);
  3877. if (IS_ERR(rdev)) {
  3878. mddev_unlock(mddev);
  3879. return PTR_ERR(rdev);
  3880. }
  3881. err = bind_rdev_to_array(rdev, mddev);
  3882. out:
  3883. if (err)
  3884. export_rdev(rdev);
  3885. mddev_unlock(mddev);
  3886. if (!err)
  3887. md_new_event(mddev);
  3888. return err ? err : len;
  3889. }
  3890. static struct md_sysfs_entry md_new_device =
  3891. __ATTR(new_dev, S_IWUSR, null_show, new_dev_store);
  3892. static ssize_t
  3893. bitmap_store(struct mddev *mddev, const char *buf, size_t len)
  3894. {
  3895. char *end;
  3896. unsigned long chunk, end_chunk;
  3897. int err;
  3898. err = mddev_lock(mddev);
  3899. if (err)
  3900. return err;
  3901. if (!mddev->bitmap)
  3902. goto out;
  3903. /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */
  3904. while (*buf) {
  3905. chunk = end_chunk = simple_strtoul(buf, &end, 0);
  3906. if (buf == end) break;
  3907. if (*end == '-') { /* range */
  3908. buf = end + 1;
  3909. end_chunk = simple_strtoul(buf, &end, 0);
  3910. if (buf == end) break;
  3911. }
  3912. if (*end && !isspace(*end)) break;
  3913. bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk);
  3914. buf = skip_spaces(end);
  3915. }
  3916. bitmap_unplug(mddev->bitmap); /* flush the bits to disk */
  3917. out:
  3918. mddev_unlock(mddev);
  3919. return len;
  3920. }
  3921. static struct md_sysfs_entry md_bitmap =
  3922. __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store);
  3923. static ssize_t
  3924. size_show(struct mddev *mddev, char *page)
  3925. {
  3926. return sprintf(page, "%llu\n",
  3927. (unsigned long long)mddev->dev_sectors / 2);
  3928. }
  3929. static int update_size(struct mddev *mddev, sector_t num_sectors);
  3930. static ssize_t
  3931. size_store(struct mddev *mddev, const char *buf, size_t len)
  3932. {
  3933. /* If array is inactive, we can reduce the component size, but
  3934. * not increase it (except from 0).
  3935. * If array is active, we can try an on-line resize
  3936. */
  3937. sector_t sectors;
  3938. int err = strict_blocks_to_sectors(buf, &sectors);
  3939. if (err < 0)
  3940. return err;
  3941. err = mddev_lock(mddev);
  3942. if (err)
  3943. return err;
  3944. if (mddev->pers) {
  3945. err = update_size(mddev, sectors);
  3946. if (err == 0)
  3947. md_update_sb(mddev, 1);
  3948. } else {
  3949. if (mddev->dev_sectors == 0 ||
  3950. mddev->dev_sectors > sectors)
  3951. mddev->dev_sectors = sectors;
  3952. else
  3953. err = -ENOSPC;
  3954. }
  3955. mddev_unlock(mddev);
  3956. return err ? err : len;
  3957. }
  3958. static struct md_sysfs_entry md_size =
  3959. __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
  3960. /* Metadata version.
  3961. * This is one of
  3962. * 'none' for arrays with no metadata (good luck...)
  3963. * 'external' for arrays with externally managed metadata,
  3964. * or N.M for internally known formats
  3965. */
  3966. static ssize_t
  3967. metadata_show(struct mddev *mddev, char *page)
  3968. {
  3969. if (mddev->persistent)
  3970. return sprintf(page, "%d.%d\n",
  3971. mddev->major_version, mddev->minor_version);
  3972. else if (mddev->external)
  3973. return sprintf(page, "external:%s\n", mddev->metadata_type);
  3974. else
  3975. return sprintf(page, "none\n");
  3976. }
  3977. static ssize_t
  3978. metadata_store(struct mddev *mddev, const char *buf, size_t len)
  3979. {
  3980. int major, minor;
  3981. char *e;
  3982. int err;
  3983. /* Changing the details of 'external' metadata is
  3984. * always permitted. Otherwise there must be
  3985. * no devices attached to the array.
  3986. */
  3987. err = mddev_lock(mddev);
  3988. if (err)
  3989. return err;
  3990. err = -EBUSY;
  3991. if (mddev->external && strncmp(buf, "external:", 9) == 0)
  3992. ;
  3993. else if (!list_empty(&mddev->disks))
  3994. goto out_unlock;
  3995. err = 0;
  3996. if (cmd_match(buf, "none")) {
  3997. mddev->persistent = 0;
  3998. mddev->external = 0;
  3999. mddev->major_version = 0;
  4000. mddev->minor_version = 90;
  4001. goto out_unlock;
  4002. }
  4003. if (strncmp(buf, "external:", 9) == 0) {
  4004. size_t namelen = len-9;
  4005. if (namelen >= sizeof(mddev->metadata_type))
  4006. namelen = sizeof(mddev->metadata_type)-1;
  4007. strncpy(mddev->metadata_type, buf+9, namelen);
  4008. mddev->metadata_type[namelen] = 0;
  4009. if (namelen && mddev->metadata_type[namelen-1] == '\n')
  4010. mddev->metadata_type[--namelen] = 0;
  4011. mddev->persistent = 0;
  4012. mddev->external = 1;
  4013. mddev->major_version = 0;
  4014. mddev->minor_version = 90;
  4015. goto out_unlock;
  4016. }
  4017. major = simple_strtoul(buf, &e, 10);
  4018. err = -EINVAL;
  4019. if (e==buf || *e != '.')
  4020. goto out_unlock;
  4021. buf = e+1;
  4022. minor = simple_strtoul(buf, &e, 10);
  4023. if (e==buf || (*e && *e != '\n') )
  4024. goto out_unlock;
  4025. err = -ENOENT;
  4026. if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL)
  4027. goto out_unlock;
  4028. mddev->major_version = major;
  4029. mddev->minor_version = minor;
  4030. mddev->persistent = 1;
  4031. mddev->external = 0;
  4032. err = 0;
  4033. out_unlock:
  4034. mddev_unlock(mddev);
  4035. return err ?: len;
  4036. }
  4037. static struct md_sysfs_entry md_metadata =
  4038. __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
  4039. static ssize_t
  4040. action_show(struct mddev *mddev, char *page)
  4041. {
  4042. char *type = "idle";
  4043. unsigned long recovery = mddev->recovery;
  4044. if (test_bit(MD_RECOVERY_FROZEN, &recovery))
  4045. type = "frozen";
  4046. else if (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
  4047. (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery))) {
  4048. if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
  4049. type = "reshape";
  4050. else if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
  4051. if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
  4052. type = "resync";
  4053. else if (test_bit(MD_RECOVERY_CHECK, &recovery))
  4054. type = "check";
  4055. else
  4056. type = "repair";
  4057. } else if (test_bit(MD_RECOVERY_RECOVER, &recovery))
  4058. type = "recover";
  4059. else if (mddev->reshape_position != MaxSector)
  4060. type = "reshape";
  4061. }
  4062. return sprintf(page, "%s\n", type);
  4063. }
  4064. static ssize_t
  4065. action_store(struct mddev *mddev, const char *page, size_t len)
  4066. {
  4067. if (!mddev->pers || !mddev->pers->sync_request)
  4068. return -EINVAL;
  4069. if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
  4070. if (cmd_match(page, "frozen"))
  4071. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4072. else
  4073. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4074. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
  4075. mddev_lock(mddev) == 0) {
  4076. flush_workqueue(md_misc_wq);
  4077. if (mddev->sync_thread) {
  4078. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  4079. md_reap_sync_thread(mddev);
  4080. }
  4081. mddev_unlock(mddev);
  4082. }
  4083. } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4084. return -EBUSY;
  4085. else if (cmd_match(page, "resync"))
  4086. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4087. else if (cmd_match(page, "recover")) {
  4088. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4089. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  4090. } else if (cmd_match(page, "reshape")) {
  4091. int err;
  4092. if (mddev->pers->start_reshape == NULL)
  4093. return -EINVAL;
  4094. err = mddev_lock(mddev);
  4095. if (!err) {
  4096. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4097. err = -EBUSY;
  4098. else {
  4099. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4100. err = mddev->pers->start_reshape(mddev);
  4101. }
  4102. mddev_unlock(mddev);
  4103. }
  4104. if (err)
  4105. return err;
  4106. sysfs_notify(&mddev->kobj, NULL, "degraded");
  4107. } else {
  4108. if (cmd_match(page, "check"))
  4109. set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  4110. else if (!cmd_match(page, "repair"))
  4111. return -EINVAL;
  4112. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  4113. set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  4114. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  4115. }
  4116. if (mddev->ro == 2) {
  4117. /* A write to sync_action is enough to justify
  4118. * canceling read-auto mode
  4119. */
  4120. mddev->ro = 0;
  4121. md_wakeup_thread(mddev->sync_thread);
  4122. }
  4123. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  4124. md_wakeup_thread(mddev->thread);
  4125. sysfs_notify_dirent_safe(mddev->sysfs_action);
  4126. return len;
  4127. }
  4128. static struct md_sysfs_entry md_scan_mode =
  4129. __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store);
  4130. static ssize_t
  4131. last_sync_action_show(struct mddev *mddev, char *page)
  4132. {
  4133. return sprintf(page, "%s\n", mddev->last_sync_action);
  4134. }
  4135. static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action);
  4136. static ssize_t
  4137. mismatch_cnt_show(struct mddev *mddev, char *page)
  4138. {
  4139. return sprintf(page, "%llu\n",
  4140. (unsigned long long)
  4141. atomic64_read(&mddev->resync_mismatches));
  4142. }
  4143. static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt);
  4144. static ssize_t
  4145. sync_min_show(struct mddev *mddev, char *page)
  4146. {
  4147. return sprintf(page, "%d (%s)\n", speed_min(mddev),
  4148. mddev->sync_speed_min ? "local": "system");
  4149. }
  4150. static ssize_t
  4151. sync_min_store(struct mddev *mddev, const char *buf, size_t len)
  4152. {
  4153. unsigned int min;
  4154. int rv;
  4155. if (strncmp(buf, "system", 6)==0) {
  4156. min = 0;
  4157. } else {
  4158. rv = kstrtouint(buf, 10, &min);
  4159. if (rv < 0)
  4160. return rv;
  4161. if (min == 0)
  4162. return -EINVAL;
  4163. }
  4164. mddev->sync_speed_min = min;
  4165. return len;
  4166. }
  4167. static struct md_sysfs_entry md_sync_min =
  4168. __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store);
  4169. static ssize_t
  4170. sync_max_show(struct mddev *mddev, char *page)
  4171. {
  4172. return sprintf(page, "%d (%s)\n", speed_max(mddev),
  4173. mddev->sync_speed_max ? "local": "system");
  4174. }
  4175. static ssize_t
  4176. sync_max_store(struct mddev *mddev, const char *buf, size_t len)
  4177. {
  4178. unsigned int max;
  4179. int rv;
  4180. if (strncmp(buf, "system", 6)==0) {
  4181. max = 0;
  4182. } else {
  4183. rv = kstrtouint(buf, 10, &max);
  4184. if (rv < 0)
  4185. return rv;
  4186. if (max == 0)
  4187. return -EINVAL;
  4188. }
  4189. mddev->sync_speed_max = max;
  4190. return len;
  4191. }
  4192. static struct md_sysfs_entry md_sync_max =
  4193. __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store);
  4194. static ssize_t
  4195. degraded_show(struct mddev *mddev, char *page)
  4196. {
  4197. return sprintf(page, "%d\n", mddev->degraded);
  4198. }
  4199. static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded);
  4200. static ssize_t
  4201. sync_force_parallel_show(struct mddev *mddev, char *page)
  4202. {
  4203. return sprintf(page, "%d\n", mddev->parallel_resync);
  4204. }
  4205. static ssize_t
  4206. sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len)
  4207. {
  4208. long n;
  4209. if (kstrtol(buf, 10, &n))
  4210. return -EINVAL;
  4211. if (n != 0 && n != 1)
  4212. return -EINVAL;
  4213. mddev->parallel_resync = n;
  4214. if (mddev->sync_thread)
  4215. wake_up(&resync_wait);
  4216. return len;
  4217. }
  4218. /* force parallel resync, even with shared block devices */
  4219. static struct md_sysfs_entry md_sync_force_parallel =
  4220. __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR,
  4221. sync_force_parallel_show, sync_force_parallel_store);
  4222. static ssize_t
  4223. sync_speed_show(struct mddev *mddev, char *page)
  4224. {
  4225. unsigned long resync, dt, db;
  4226. if (mddev->curr_resync == 0)
  4227. return sprintf(page, "none\n");
  4228. resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active);
  4229. dt = (jiffies - mddev->resync_mark) / HZ;
  4230. if (!dt) dt++;
  4231. db = resync - mddev->resync_mark_cnt;
  4232. return sprintf(page, "%lu\n", db/dt/2); /* K/sec */
  4233. }
  4234. static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed);
  4235. static ssize_t
  4236. sync_completed_show(struct mddev *mddev, char *page)
  4237. {
  4238. unsigned long long max_sectors, resync;
  4239. if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4240. return sprintf(page, "none\n");
  4241. if (mddev->curr_resync == 1 ||
  4242. mddev->curr_resync == 2)
  4243. return sprintf(page, "delayed\n");
  4244. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  4245. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  4246. max_sectors = mddev->resync_max_sectors;
  4247. else
  4248. max_sectors = mddev->dev_sectors;
  4249. resync = mddev->curr_resync_completed;
  4250. return sprintf(page, "%llu / %llu\n", resync, max_sectors);
  4251. }
  4252. static struct md_sysfs_entry md_sync_completed =
  4253. __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL);
  4254. static ssize_t
  4255. min_sync_show(struct mddev *mddev, char *page)
  4256. {
  4257. return sprintf(page, "%llu\n",
  4258. (unsigned long long)mddev->resync_min);
  4259. }
  4260. static ssize_t
  4261. min_sync_store(struct mddev *mddev, const char *buf, size_t len)
  4262. {
  4263. unsigned long long min;
  4264. int err;
  4265. if (kstrtoull(buf, 10, &min))
  4266. return -EINVAL;
  4267. spin_lock(&mddev->lock);
  4268. err = -EINVAL;
  4269. if (min > mddev->resync_max)
  4270. goto out_unlock;
  4271. err = -EBUSY;
  4272. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4273. goto out_unlock;
  4274. /* Round down to multiple of 4K for safety */
  4275. mddev->resync_min = round_down(min, 8);
  4276. err = 0;
  4277. out_unlock:
  4278. spin_unlock(&mddev->lock);
  4279. return err ?: len;
  4280. }
  4281. static struct md_sysfs_entry md_min_sync =
  4282. __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store);
  4283. static ssize_t
  4284. max_sync_show(struct mddev *mddev, char *page)
  4285. {
  4286. if (mddev->resync_max == MaxSector)
  4287. return sprintf(page, "max\n");
  4288. else
  4289. return sprintf(page, "%llu\n",
  4290. (unsigned long long)mddev->resync_max);
  4291. }
  4292. static ssize_t
  4293. max_sync_store(struct mddev *mddev, const char *buf, size_t len)
  4294. {
  4295. int err;
  4296. spin_lock(&mddev->lock);
  4297. if (strncmp(buf, "max", 3) == 0)
  4298. mddev->resync_max = MaxSector;
  4299. else {
  4300. unsigned long long max;
  4301. int chunk;
  4302. err = -EINVAL;
  4303. if (kstrtoull(buf, 10, &max))
  4304. goto out_unlock;
  4305. if (max < mddev->resync_min)
  4306. goto out_unlock;
  4307. err = -EBUSY;
  4308. if (max < mddev->resync_max &&
  4309. mddev->ro == 0 &&
  4310. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  4311. goto out_unlock;
  4312. /* Must be a multiple of chunk_size */
  4313. chunk = mddev->chunk_sectors;
  4314. if (chunk) {
  4315. sector_t temp = max;
  4316. err = -EINVAL;
  4317. if (sector_div(temp, chunk))
  4318. goto out_unlock;
  4319. }
  4320. mddev->resync_max = max;
  4321. }
  4322. wake_up(&mddev->recovery_wait);
  4323. err = 0;
  4324. out_unlock:
  4325. spin_unlock(&mddev->lock);
  4326. return err ?: len;
  4327. }
  4328. static struct md_sysfs_entry md_max_sync =
  4329. __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
  4330. static ssize_t
  4331. suspend_lo_show(struct mddev *mddev, char *page)
  4332. {
  4333. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
  4334. }
  4335. static ssize_t
  4336. suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
  4337. {
  4338. unsigned long long new;
  4339. int err;
  4340. err = kstrtoull(buf, 10, &new);
  4341. if (err < 0)
  4342. return err;
  4343. if (new != (sector_t)new)
  4344. return -EINVAL;
  4345. err = mddev_lock(mddev);
  4346. if (err)
  4347. return err;
  4348. err = -EINVAL;
  4349. if (mddev->pers == NULL ||
  4350. mddev->pers->quiesce == NULL)
  4351. goto unlock;
  4352. mddev_suspend(mddev);
  4353. mddev->suspend_lo = new;
  4354. mddev_resume(mddev);
  4355. err = 0;
  4356. unlock:
  4357. mddev_unlock(mddev);
  4358. return err ?: len;
  4359. }
  4360. static struct md_sysfs_entry md_suspend_lo =
  4361. __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store);
  4362. static ssize_t
  4363. suspend_hi_show(struct mddev *mddev, char *page)
  4364. {
  4365. return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi);
  4366. }
  4367. static ssize_t
  4368. suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
  4369. {
  4370. unsigned long long new;
  4371. int err;
  4372. err = kstrtoull(buf, 10, &new);
  4373. if (err < 0)
  4374. return err;
  4375. if (new != (sector_t)new)
  4376. return -EINVAL;
  4377. err = mddev_lock(mddev);
  4378. if (err)
  4379. return err;
  4380. err = -EINVAL;
  4381. if (mddev->pers == NULL)
  4382. goto unlock;
  4383. mddev_suspend(mddev);
  4384. mddev->suspend_hi = new;
  4385. mddev_resume(mddev);
  4386. err = 0;
  4387. unlock:
  4388. mddev_unlock(mddev);
  4389. return err ?: len;
  4390. }
  4391. static struct md_sysfs_entry md_suspend_hi =
  4392. __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store);
  4393. static ssize_t
  4394. reshape_position_show(struct mddev *mddev, char *page)
  4395. {
  4396. if (mddev->reshape_position != MaxSector)
  4397. return sprintf(page, "%llu\n",
  4398. (unsigned long long)mddev->reshape_position);
  4399. strcpy(page, "none\n");
  4400. return 5;
  4401. }
  4402. static ssize_t
  4403. reshape_position_store(struct mddev *mddev, const char *buf, size_t len)
  4404. {
  4405. struct md_rdev *rdev;
  4406. unsigned long long new;
  4407. int err;
  4408. err = kstrtoull(buf, 10, &new);
  4409. if (err < 0)
  4410. return err;
  4411. if (new != (sector_t)new)
  4412. return -EINVAL;
  4413. err = mddev_lock(mddev);
  4414. if (err)
  4415. return err;
  4416. err = -EBUSY;
  4417. if (mddev->pers)
  4418. goto unlock;
  4419. mddev->reshape_position = new;
  4420. mddev->delta_disks = 0;
  4421. mddev->reshape_backwards = 0;
  4422. mddev->new_level = mddev->level;
  4423. mddev->new_layout = mddev->layout;
  4424. mddev->new_chunk_sectors = mddev->chunk_sectors;
  4425. rdev_for_each(rdev, mddev)
  4426. rdev->new_data_offset = rdev->data_offset;
  4427. err = 0;
  4428. unlock:
  4429. mddev_unlock(mddev);
  4430. return err ?: len;
  4431. }
  4432. static struct md_sysfs_entry md_reshape_position =
  4433. __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show,
  4434. reshape_position_store);
  4435. static ssize_t
  4436. reshape_direction_show(struct mddev *mddev, char *page)
  4437. {
  4438. return sprintf(page, "%s\n",
  4439. mddev->reshape_backwards ? "backwards" : "forwards");
  4440. }
  4441. static ssize_t
  4442. reshape_direction_store(struct mddev *mddev, const char *buf, size_t len)
  4443. {
  4444. int backwards = 0;
  4445. int err;
  4446. if (cmd_match(buf, "forwards"))
  4447. backwards = 0;
  4448. else if (cmd_match(buf, "backwards"))
  4449. backwards = 1;
  4450. else
  4451. return -EINVAL;
  4452. if (mddev->reshape_backwards == backwards)
  4453. return len;
  4454. err = mddev_lock(mddev);
  4455. if (err)
  4456. return err;
  4457. /* check if we are allowed to change */
  4458. if (mddev->delta_disks)
  4459. err = -EBUSY;
  4460. else if (mddev->persistent &&
  4461. mddev->major_version == 0)
  4462. err = -EINVAL;
  4463. else
  4464. mddev->reshape_backwards = backwards;
  4465. mddev_unlock(mddev);
  4466. return err ?: len;
  4467. }
  4468. static struct md_sysfs_entry md_reshape_direction =
  4469. __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show,
  4470. reshape_direction_store);
  4471. static ssize_t
  4472. array_size_show(struct mddev *mddev, char *page)
  4473. {
  4474. if (mddev->external_size)
  4475. return sprintf(page, "%llu\n",
  4476. (unsigned long long)mddev->array_sectors/2);
  4477. else
  4478. return sprintf(page, "default\n");
  4479. }
  4480. static ssize_t
  4481. array_size_store(struct mddev *mddev, const char *buf, size_t len)
  4482. {
  4483. sector_t sectors;
  4484. int err;
  4485. err = mddev_lock(mddev);
  4486. if (err)
  4487. return err;
  4488. /* cluster raid doesn't support change array_sectors */
  4489. if (mddev_is_clustered(mddev)) {
  4490. mddev_unlock(mddev);
  4491. return -EINVAL;
  4492. }
  4493. if (strncmp(buf, "default", 7) == 0) {
  4494. if (mddev->pers)
  4495. sectors = mddev->pers->size(mddev, 0, 0);
  4496. else
  4497. sectors = mddev->array_sectors;
  4498. mddev->external_size = 0;
  4499. } else {
  4500. if (strict_blocks_to_sectors(buf, &sectors) < 0)
  4501. err = -EINVAL;
  4502. else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors)
  4503. err = -E2BIG;
  4504. else
  4505. mddev->external_size = 1;
  4506. }
  4507. if (!err) {
  4508. mddev->array_sectors = sectors;
  4509. if (mddev->pers) {
  4510. set_capacity(mddev->gendisk, mddev->array_sectors);
  4511. revalidate_disk(mddev->gendisk);
  4512. }
  4513. }
  4514. mddev_unlock(mddev);
  4515. return err ?: len;
  4516. }
  4517. static struct md_sysfs_entry md_array_size =
  4518. __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show,
  4519. array_size_store);
  4520. static ssize_t
  4521. consistency_policy_show(struct mddev *mddev, char *page)
  4522. {
  4523. int ret;
  4524. if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
  4525. ret = sprintf(page, "journal\n");
  4526. } else if (test_bit(MD_HAS_PPL, &mddev->flags)) {
  4527. ret = sprintf(page, "ppl\n");
  4528. } else if (mddev->bitmap) {
  4529. ret = sprintf(page, "bitmap\n");
  4530. } else if (mddev->pers) {
  4531. if (mddev->pers->sync_request)
  4532. ret = sprintf(page, "resync\n");
  4533. else
  4534. ret = sprintf(page, "none\n");
  4535. } else {
  4536. ret = sprintf(page, "unknown\n");
  4537. }
  4538. return ret;
  4539. }
  4540. static ssize_t
  4541. consistency_policy_store(struct mddev *mddev, const char *buf, size_t len)
  4542. {
  4543. int err = 0;
  4544. if (mddev->pers) {
  4545. if (mddev->pers->change_consistency_policy)
  4546. err = mddev->pers->change_consistency_policy(mddev, buf);
  4547. else
  4548. err = -EBUSY;
  4549. } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) {
  4550. set_bit(MD_HAS_PPL, &mddev->flags);
  4551. } else {
  4552. err = -EINVAL;
  4553. }
  4554. return err ? err : len;
  4555. }
  4556. static struct md_sysfs_entry md_consistency_policy =
  4557. __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show,
  4558. consistency_policy_store);
  4559. static struct attribute *md_default_attrs[] = {
  4560. &md_level.attr,
  4561. &md_layout.attr,
  4562. &md_raid_disks.attr,
  4563. &md_chunk_size.attr,
  4564. &md_size.attr,
  4565. &md_resync_start.attr,
  4566. &md_metadata.attr,
  4567. &md_new_device.attr,
  4568. &md_safe_delay.attr,
  4569. &md_array_state.attr,
  4570. &md_reshape_position.attr,
  4571. &md_reshape_direction.attr,
  4572. &md_array_size.attr,
  4573. &max_corr_read_errors.attr,
  4574. &md_consistency_policy.attr,
  4575. NULL,
  4576. };
  4577. static struct attribute *md_redundancy_attrs[] = {
  4578. &md_scan_mode.attr,
  4579. &md_last_scan_mode.attr,
  4580. &md_mismatches.attr,
  4581. &md_sync_min.attr,
  4582. &md_sync_max.attr,
  4583. &md_sync_speed.attr,
  4584. &md_sync_force_parallel.attr,
  4585. &md_sync_completed.attr,
  4586. &md_min_sync.attr,
  4587. &md_max_sync.attr,
  4588. &md_suspend_lo.attr,
  4589. &md_suspend_hi.attr,
  4590. &md_bitmap.attr,
  4591. &md_degraded.attr,
  4592. NULL,
  4593. };
  4594. static struct attribute_group md_redundancy_group = {
  4595. .name = NULL,
  4596. .attrs = md_redundancy_attrs,
  4597. };
  4598. static ssize_t
  4599. md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  4600. {
  4601. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4602. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4603. ssize_t rv;
  4604. if (!entry->show)
  4605. return -EIO;
  4606. spin_lock(&all_mddevs_lock);
  4607. if (list_empty(&mddev->all_mddevs)) {
  4608. spin_unlock(&all_mddevs_lock);
  4609. return -EBUSY;
  4610. }
  4611. mddev_get(mddev);
  4612. spin_unlock(&all_mddevs_lock);
  4613. rv = entry->show(mddev, page);
  4614. mddev_put(mddev);
  4615. return rv;
  4616. }
  4617. static ssize_t
  4618. md_attr_store(struct kobject *kobj, struct attribute *attr,
  4619. const char *page, size_t length)
  4620. {
  4621. struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
  4622. struct mddev *mddev = container_of(kobj, struct mddev, kobj);
  4623. ssize_t rv;
  4624. if (!entry->store)
  4625. return -EIO;
  4626. if (!capable(CAP_SYS_ADMIN))
  4627. return -EACCES;
  4628. spin_lock(&all_mddevs_lock);
  4629. if (list_empty(&mddev->all_mddevs)) {
  4630. spin_unlock(&all_mddevs_lock);
  4631. return -EBUSY;
  4632. }
  4633. mddev_get(mddev);
  4634. spin_unlock(&all_mddevs_lock);
  4635. rv = entry->store(mddev, page, length);
  4636. mddev_put(mddev);
  4637. return rv;
  4638. }
  4639. static void md_free(struct kobject *ko)
  4640. {
  4641. struct mddev *mddev = container_of(ko, struct mddev, kobj);
  4642. if (mddev->sysfs_state)
  4643. sysfs_put(mddev->sysfs_state);
  4644. if (mddev->queue)
  4645. blk_cleanup_queue(mddev->queue);
  4646. if (mddev->gendisk) {
  4647. del_gendisk(mddev->gendisk);
  4648. put_disk(mddev->gendisk);
  4649. }
  4650. percpu_ref_exit(&mddev->writes_pending);
  4651. kfree(mddev);
  4652. }
  4653. static const struct sysfs_ops md_sysfs_ops = {
  4654. .show = md_attr_show,
  4655. .store = md_attr_store,
  4656. };
  4657. static struct kobj_type md_ktype = {
  4658. .release = md_free,
  4659. .sysfs_ops = &md_sysfs_ops,
  4660. .default_attrs = md_default_attrs,
  4661. };
  4662. int mdp_major = 0;
  4663. static void mddev_delayed_delete(struct work_struct *ws)
  4664. {
  4665. struct mddev *mddev = container_of(ws, struct mddev, del_work);
  4666. sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
  4667. kobject_del(&mddev->kobj);
  4668. kobject_put(&mddev->kobj);
  4669. }
  4670. static void no_op(struct percpu_ref *r) {}
  4671. int mddev_init_writes_pending(struct mddev *mddev)
  4672. {
  4673. if (mddev->writes_pending.percpu_count_ptr)
  4674. return 0;
  4675. if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
  4676. return -ENOMEM;
  4677. /* We want to start with the refcount at zero */
  4678. percpu_ref_put(&mddev->writes_pending);
  4679. return 0;
  4680. }
  4681. EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
  4682. static int md_alloc(dev_t dev, char *name)
  4683. {
  4684. /*
  4685. * If dev is zero, name is the name of a device to allocate with
  4686. * an arbitrary minor number. It will be "md_???"
  4687. * If dev is non-zero it must be a device number with a MAJOR of
  4688. * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then
  4689. * the device is being created by opening a node in /dev.
  4690. * If "name" is not NULL, the device is being created by
  4691. * writing to /sys/module/md_mod/parameters/new_array.
  4692. */
  4693. static DEFINE_MUTEX(disks_mutex);
  4694. struct mddev *mddev = mddev_find(dev);
  4695. struct gendisk *disk;
  4696. int partitioned;
  4697. int shift;
  4698. int unit;
  4699. int error;
  4700. if (!mddev)
  4701. return -ENODEV;
  4702. partitioned = (MAJOR(mddev->unit) != MD_MAJOR);
  4703. shift = partitioned ? MdpMinorShift : 0;
  4704. unit = MINOR(mddev->unit) >> shift;
  4705. /* wait for any previous instance of this device to be
  4706. * completely removed (mddev_delayed_delete).
  4707. */
  4708. flush_workqueue(md_misc_wq);
  4709. mutex_lock(&disks_mutex);
  4710. error = -EEXIST;
  4711. if (mddev->gendisk)
  4712. goto abort;
  4713. if (name && !dev) {
  4714. /* Need to ensure that 'name' is not a duplicate.
  4715. */
  4716. struct mddev *mddev2;
  4717. spin_lock(&all_mddevs_lock);
  4718. list_for_each_entry(mddev2, &all_mddevs, all_mddevs)
  4719. if (mddev2->gendisk &&
  4720. strcmp(mddev2->gendisk->disk_name, name) == 0) {
  4721. spin_unlock(&all_mddevs_lock);
  4722. goto abort;
  4723. }
  4724. spin_unlock(&all_mddevs_lock);
  4725. }
  4726. if (name && dev)
  4727. /*
  4728. * Creating /dev/mdNNN via "newarray", so adjust hold_active.
  4729. */
  4730. mddev->hold_active = UNTIL_STOP;
  4731. error = -ENOMEM;
  4732. mddev->queue = blk_alloc_queue(GFP_KERNEL);
  4733. if (!mddev->queue)
  4734. goto abort;
  4735. mddev->queue->queuedata = mddev;
  4736. blk_queue_make_request(mddev->queue, md_make_request);
  4737. blk_set_stacking_limits(&mddev->queue->limits);
  4738. disk = alloc_disk(1 << shift);
  4739. if (!disk) {
  4740. blk_cleanup_queue(mddev->queue);
  4741. mddev->queue = NULL;
  4742. goto abort;
  4743. }
  4744. disk->major = MAJOR(mddev->unit);
  4745. disk->first_minor = unit << shift;
  4746. if (name)
  4747. strcpy(disk->disk_name, name);
  4748. else if (partitioned)
  4749. sprintf(disk->disk_name, "md_d%d", unit);
  4750. else
  4751. sprintf(disk->disk_name, "md%d", unit);
  4752. disk->fops = &md_fops;
  4753. disk->private_data = mddev;
  4754. disk->queue = mddev->queue;
  4755. blk_queue_write_cache(mddev->queue, true, true);
  4756. /* Allow extended partitions. This makes the
  4757. * 'mdp' device redundant, but we can't really
  4758. * remove it now.
  4759. */
  4760. disk->flags |= GENHD_FL_EXT_DEVT;
  4761. mddev->gendisk = disk;
  4762. /* As soon as we call add_disk(), another thread could get
  4763. * through to md_open, so make sure it doesn't get too far
  4764. */
  4765. mutex_lock(&mddev->open_mutex);
  4766. add_disk(disk);
  4767. error = kobject_init_and_add(&mddev->kobj, &md_ktype,
  4768. &disk_to_dev(disk)->kobj, "%s", "md");
  4769. if (error) {
  4770. /* This isn't possible, but as kobject_init_and_add is marked
  4771. * __must_check, we must do something with the result
  4772. */
  4773. pr_debug("md: cannot register %s/md - name in use\n",
  4774. disk->disk_name);
  4775. error = 0;
  4776. }
  4777. if (mddev->kobj.sd &&
  4778. sysfs_create_group(&mddev->kobj, &md_bitmap_group))
  4779. pr_debug("pointless warning\n");
  4780. mutex_unlock(&mddev->open_mutex);
  4781. abort:
  4782. mutex_unlock(&disks_mutex);
  4783. if (!error && mddev->kobj.sd) {
  4784. kobject_uevent(&mddev->kobj, KOBJ_ADD);
  4785. mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
  4786. }
  4787. mddev_put(mddev);
  4788. return error;
  4789. }
  4790. static struct kobject *md_probe(dev_t dev, int *part, void *data)
  4791. {
  4792. if (create_on_open)
  4793. md_alloc(dev, NULL);
  4794. return NULL;
  4795. }
  4796. static int add_named_array(const char *val, const struct kernel_param *kp)
  4797. {
  4798. /*
  4799. * val must be "md_*" or "mdNNN".
  4800. * For "md_*" we allocate an array with a large free minor number, and
  4801. * set the name to val. val must not already be an active name.
  4802. * For "mdNNN" we allocate an array with the minor number NNN
  4803. * which must not already be in use.
  4804. */
  4805. int len = strlen(val);
  4806. char buf[DISK_NAME_LEN];
  4807. unsigned long devnum;
  4808. while (len && val[len-1] == '\n')
  4809. len--;
  4810. if (len >= DISK_NAME_LEN)
  4811. return -E2BIG;
  4812. strlcpy(buf, val, len+1);
  4813. if (strncmp(buf, "md_", 3) == 0)
  4814. return md_alloc(0, buf);
  4815. if (strncmp(buf, "md", 2) == 0 &&
  4816. isdigit(buf[2]) &&
  4817. kstrtoul(buf+2, 10, &devnum) == 0 &&
  4818. devnum <= MINORMASK)
  4819. return md_alloc(MKDEV(MD_MAJOR, devnum), NULL);
  4820. return -EINVAL;
  4821. }
  4822. static void md_safemode_timeout(unsigned long data)
  4823. {
  4824. struct mddev *mddev = (struct mddev *) data;
  4825. mddev->safemode = 1;
  4826. if (mddev->external)
  4827. sysfs_notify_dirent_safe(mddev->sysfs_state);
  4828. md_wakeup_thread(mddev->thread);
  4829. }
  4830. static int start_dirty_degraded;
  4831. int md_run(struct mddev *mddev)
  4832. {
  4833. int err;
  4834. struct md_rdev *rdev;
  4835. struct md_personality *pers;
  4836. if (list_empty(&mddev->disks))
  4837. /* cannot run an array with no devices.. */
  4838. return -EINVAL;
  4839. if (mddev->pers)
  4840. return -EBUSY;
  4841. /* Cannot run until previous stop completes properly */
  4842. if (mddev->sysfs_active)
  4843. return -EBUSY;
  4844. /*
  4845. * Analyze all RAID superblock(s)
  4846. */
  4847. if (!mddev->raid_disks) {
  4848. if (!mddev->persistent)
  4849. return -EINVAL;
  4850. analyze_sbs(mddev);
  4851. }
  4852. if (mddev->level != LEVEL_NONE)
  4853. request_module("md-level-%d", mddev->level);
  4854. else if (mddev->clevel[0])
  4855. request_module("md-%s", mddev->clevel);
  4856. /*
  4857. * Drop all container device buffers, from now on
  4858. * the only valid external interface is through the md
  4859. * device.
  4860. */
  4861. rdev_for_each(rdev, mddev) {
  4862. if (test_bit(Faulty, &rdev->flags))
  4863. continue;
  4864. sync_blockdev(rdev->bdev);
  4865. invalidate_bdev(rdev->bdev);
  4866. if (mddev->ro != 1 &&
  4867. (bdev_read_only(rdev->bdev) ||
  4868. bdev_read_only(rdev->meta_bdev))) {
  4869. mddev->ro = 1;
  4870. if (mddev->gendisk)
  4871. set_disk_ro(mddev->gendisk, 1);
  4872. }
  4873. /* perform some consistency tests on the device.
  4874. * We don't want the data to overlap the metadata,
  4875. * Internal Bitmap issues have been handled elsewhere.
  4876. */
  4877. if (rdev->meta_bdev) {
  4878. /* Nothing to check */;
  4879. } else if (rdev->data_offset < rdev->sb_start) {
  4880. if (mddev->dev_sectors &&
  4881. rdev->data_offset + mddev->dev_sectors
  4882. > rdev->sb_start) {
  4883. pr_warn("md: %s: data overlaps metadata\n",
  4884. mdname(mddev));
  4885. return -EINVAL;
  4886. }
  4887. } else {
  4888. if (rdev->sb_start + rdev->sb_size/512
  4889. > rdev->data_offset) {
  4890. pr_warn("md: %s: metadata overlaps data\n",
  4891. mdname(mddev));
  4892. return -EINVAL;
  4893. }
  4894. }
  4895. sysfs_notify_dirent_safe(rdev->sysfs_state);
  4896. }
  4897. if (mddev->bio_set == NULL) {
  4898. mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
  4899. if (!mddev->bio_set)
  4900. return -ENOMEM;
  4901. }
  4902. if (mddev->sync_set == NULL) {
  4903. mddev->sync_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
  4904. if (!mddev->sync_set)
  4905. return -ENOMEM;
  4906. }
  4907. spin_lock(&pers_lock);
  4908. pers = find_pers(mddev->level, mddev->clevel);
  4909. if (!pers || !try_module_get(pers->owner)) {
  4910. spin_unlock(&pers_lock);
  4911. if (mddev->level != LEVEL_NONE)
  4912. pr_warn("md: personality for level %d is not loaded!\n",
  4913. mddev->level);
  4914. else
  4915. pr_warn("md: personality for level %s is not loaded!\n",
  4916. mddev->clevel);
  4917. return -EINVAL;
  4918. }
  4919. spin_unlock(&pers_lock);
  4920. if (mddev->level != pers->level) {
  4921. mddev->level = pers->level;
  4922. mddev->new_level = pers->level;
  4923. }
  4924. strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
  4925. if (mddev->reshape_position != MaxSector &&
  4926. pers->start_reshape == NULL) {
  4927. /* This personality cannot handle reshaping... */
  4928. module_put(pers->owner);
  4929. return -EINVAL;
  4930. }
  4931. if (pers->sync_request) {
  4932. /* Warn if this is a potentially silly
  4933. * configuration.
  4934. */
  4935. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  4936. struct md_rdev *rdev2;
  4937. int warned = 0;
  4938. rdev_for_each(rdev, mddev)
  4939. rdev_for_each(rdev2, mddev) {
  4940. if (rdev < rdev2 &&
  4941. rdev->bdev->bd_contains ==
  4942. rdev2->bdev->bd_contains) {
  4943. pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n",
  4944. mdname(mddev),
  4945. bdevname(rdev->bdev,b),
  4946. bdevname(rdev2->bdev,b2));
  4947. warned = 1;
  4948. }
  4949. }
  4950. if (warned)
  4951. pr_warn("True protection against single-disk failure might be compromised.\n");
  4952. }
  4953. mddev->recovery = 0;
  4954. /* may be over-ridden by personality */
  4955. mddev->resync_max_sectors = mddev->dev_sectors;
  4956. mddev->ok_start_degraded = start_dirty_degraded;
  4957. if (start_readonly && mddev->ro == 0)
  4958. mddev->ro = 2; /* read-only, but switch on first write */
  4959. /*
  4960. * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
  4961. * up mddev->thread. It is important to initialize critical
  4962. * resources for mddev->thread BEFORE calling pers->run().
  4963. */
  4964. err = pers->run(mddev);
  4965. if (err)
  4966. pr_warn("md: pers->run() failed ...\n");
  4967. else if (pers->size(mddev, 0, 0) < mddev->array_sectors) {
  4968. WARN_ONCE(!mddev->external_size,
  4969. "%s: default size too small, but 'external_size' not in effect?\n",
  4970. __func__);
  4971. pr_warn("md: invalid array_size %llu > default size %llu\n",
  4972. (unsigned long long)mddev->array_sectors / 2,
  4973. (unsigned long long)pers->size(mddev, 0, 0) / 2);
  4974. err = -EINVAL;
  4975. }
  4976. if (err == 0 && pers->sync_request &&
  4977. (mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
  4978. struct bitmap *bitmap;
  4979. bitmap = bitmap_create(mddev, -1);
  4980. if (IS_ERR(bitmap)) {
  4981. err = PTR_ERR(bitmap);
  4982. pr_warn("%s: failed to create bitmap (%d)\n",
  4983. mdname(mddev), err);
  4984. } else
  4985. mddev->bitmap = bitmap;
  4986. }
  4987. if (err) {
  4988. mddev_detach(mddev);
  4989. if (mddev->private)
  4990. pers->free(mddev, mddev->private);
  4991. mddev->private = NULL;
  4992. module_put(pers->owner);
  4993. bitmap_destroy(mddev);
  4994. return err;
  4995. }
  4996. if (mddev->queue) {
  4997. bool nonrot = true;
  4998. rdev_for_each(rdev, mddev) {
  4999. if (rdev->raid_disk >= 0 &&
  5000. !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
  5001. nonrot = false;
  5002. break;
  5003. }
  5004. }
  5005. if (mddev->degraded)
  5006. nonrot = false;
  5007. if (nonrot)
  5008. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
  5009. else
  5010. queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
  5011. mddev->queue->backing_dev_info->congested_data = mddev;
  5012. mddev->queue->backing_dev_info->congested_fn = md_congested;
  5013. }
  5014. if (pers->sync_request) {
  5015. if (mddev->kobj.sd &&
  5016. sysfs_create_group(&mddev->kobj, &md_redundancy_group))
  5017. pr_warn("md: cannot register extra attributes for %s\n",
  5018. mdname(mddev));
  5019. mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action");
  5020. } else if (mddev->ro == 2) /* auto-readonly not meaningful */
  5021. mddev->ro = 0;
  5022. atomic_set(&mddev->max_corr_read_errors,
  5023. MD_DEFAULT_MAX_CORRECTED_READ_ERRORS);
  5024. mddev->safemode = 0;
  5025. if (mddev_is_clustered(mddev))
  5026. mddev->safemode_delay = 0;
  5027. else
  5028. mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
  5029. mddev->in_sync = 1;
  5030. smp_wmb();
  5031. spin_lock(&mddev->lock);
  5032. mddev->pers = pers;
  5033. spin_unlock(&mddev->lock);
  5034. rdev_for_each(rdev, mddev)
  5035. if (rdev->raid_disk >= 0)
  5036. if (sysfs_link_rdev(mddev, rdev))
  5037. /* failure here is OK */;
  5038. if (mddev->degraded && !mddev->ro)
  5039. /* This ensures that recovering status is reported immediately
  5040. * via sysfs - until a lack of spares is confirmed.
  5041. */
  5042. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  5043. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5044. if (mddev->sb_flags)
  5045. md_update_sb(mddev, 0);
  5046. md_new_event(mddev);
  5047. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5048. sysfs_notify_dirent_safe(mddev->sysfs_action);
  5049. sysfs_notify(&mddev->kobj, NULL, "degraded");
  5050. return 0;
  5051. }
  5052. EXPORT_SYMBOL_GPL(md_run);
  5053. static int do_md_run(struct mddev *mddev)
  5054. {
  5055. int err;
  5056. err = md_run(mddev);
  5057. if (err)
  5058. goto out;
  5059. err = bitmap_load(mddev);
  5060. if (err) {
  5061. bitmap_destroy(mddev);
  5062. goto out;
  5063. }
  5064. if (mddev_is_clustered(mddev))
  5065. md_allow_write(mddev);
  5066. md_wakeup_thread(mddev->thread);
  5067. md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
  5068. set_capacity(mddev->gendisk, mddev->array_sectors);
  5069. revalidate_disk(mddev->gendisk);
  5070. mddev->changed = 1;
  5071. kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
  5072. out:
  5073. return err;
  5074. }
  5075. static int restart_array(struct mddev *mddev)
  5076. {
  5077. struct gendisk *disk = mddev->gendisk;
  5078. struct md_rdev *rdev;
  5079. bool has_journal = false;
  5080. bool has_readonly = false;
  5081. /* Complain if it has no devices */
  5082. if (list_empty(&mddev->disks))
  5083. return -ENXIO;
  5084. if (!mddev->pers)
  5085. return -EINVAL;
  5086. if (!mddev->ro)
  5087. return -EBUSY;
  5088. rcu_read_lock();
  5089. rdev_for_each_rcu(rdev, mddev) {
  5090. if (test_bit(Journal, &rdev->flags) &&
  5091. !test_bit(Faulty, &rdev->flags))
  5092. has_journal = true;
  5093. if (bdev_read_only(rdev->bdev))
  5094. has_readonly = true;
  5095. }
  5096. rcu_read_unlock();
  5097. if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal)
  5098. /* Don't restart rw with journal missing/faulty */
  5099. return -EINVAL;
  5100. if (has_readonly)
  5101. return -EROFS;
  5102. mddev->safemode = 0;
  5103. mddev->ro = 0;
  5104. set_disk_ro(disk, 0);
  5105. pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
  5106. /* Kick recovery or resync if necessary */
  5107. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5108. md_wakeup_thread(mddev->thread);
  5109. md_wakeup_thread(mddev->sync_thread);
  5110. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5111. return 0;
  5112. }
  5113. static void md_clean(struct mddev *mddev)
  5114. {
  5115. mddev->array_sectors = 0;
  5116. mddev->external_size = 0;
  5117. mddev->dev_sectors = 0;
  5118. mddev->raid_disks = 0;
  5119. mddev->recovery_cp = 0;
  5120. mddev->resync_min = 0;
  5121. mddev->resync_max = MaxSector;
  5122. mddev->reshape_position = MaxSector;
  5123. mddev->external = 0;
  5124. mddev->persistent = 0;
  5125. mddev->level = LEVEL_NONE;
  5126. mddev->clevel[0] = 0;
  5127. mddev->flags = 0;
  5128. mddev->sb_flags = 0;
  5129. mddev->ro = 0;
  5130. mddev->metadata_type[0] = 0;
  5131. mddev->chunk_sectors = 0;
  5132. mddev->ctime = mddev->utime = 0;
  5133. mddev->layout = 0;
  5134. mddev->max_disks = 0;
  5135. mddev->events = 0;
  5136. mddev->can_decrease_events = 0;
  5137. mddev->delta_disks = 0;
  5138. mddev->reshape_backwards = 0;
  5139. mddev->new_level = LEVEL_NONE;
  5140. mddev->new_layout = 0;
  5141. mddev->new_chunk_sectors = 0;
  5142. mddev->curr_resync = 0;
  5143. atomic64_set(&mddev->resync_mismatches, 0);
  5144. mddev->suspend_lo = mddev->suspend_hi = 0;
  5145. mddev->sync_speed_min = mddev->sync_speed_max = 0;
  5146. mddev->recovery = 0;
  5147. mddev->in_sync = 0;
  5148. mddev->changed = 0;
  5149. mddev->degraded = 0;
  5150. mddev->safemode = 0;
  5151. mddev->private = NULL;
  5152. mddev->cluster_info = NULL;
  5153. mddev->bitmap_info.offset = 0;
  5154. mddev->bitmap_info.default_offset = 0;
  5155. mddev->bitmap_info.default_space = 0;
  5156. mddev->bitmap_info.chunksize = 0;
  5157. mddev->bitmap_info.daemon_sleep = 0;
  5158. mddev->bitmap_info.max_write_behind = 0;
  5159. mddev->bitmap_info.nodes = 0;
  5160. }
  5161. static void __md_stop_writes(struct mddev *mddev)
  5162. {
  5163. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5164. flush_workqueue(md_misc_wq);
  5165. if (mddev->sync_thread) {
  5166. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  5167. md_reap_sync_thread(mddev);
  5168. }
  5169. del_timer_sync(&mddev->safemode_timer);
  5170. if (mddev->pers && mddev->pers->quiesce) {
  5171. mddev->pers->quiesce(mddev, 1);
  5172. mddev->pers->quiesce(mddev, 0);
  5173. }
  5174. bitmap_flush(mddev);
  5175. if (mddev->ro == 0 &&
  5176. ((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
  5177. mddev->sb_flags)) {
  5178. /* mark array as shutdown cleanly */
  5179. if (!mddev_is_clustered(mddev))
  5180. mddev->in_sync = 1;
  5181. md_update_sb(mddev, 1);
  5182. }
  5183. }
  5184. void md_stop_writes(struct mddev *mddev)
  5185. {
  5186. mddev_lock_nointr(mddev);
  5187. __md_stop_writes(mddev);
  5188. mddev_unlock(mddev);
  5189. }
  5190. EXPORT_SYMBOL_GPL(md_stop_writes);
  5191. static void mddev_detach(struct mddev *mddev)
  5192. {
  5193. bitmap_wait_behind_writes(mddev);
  5194. if (mddev->pers && mddev->pers->quiesce) {
  5195. mddev->pers->quiesce(mddev, 1);
  5196. mddev->pers->quiesce(mddev, 0);
  5197. }
  5198. md_unregister_thread(&mddev->thread);
  5199. if (mddev->queue)
  5200. blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
  5201. }
  5202. static void __md_stop(struct mddev *mddev)
  5203. {
  5204. struct md_personality *pers = mddev->pers;
  5205. bitmap_destroy(mddev);
  5206. mddev_detach(mddev);
  5207. /* Ensure ->event_work is done */
  5208. flush_workqueue(md_misc_wq);
  5209. spin_lock(&mddev->lock);
  5210. mddev->pers = NULL;
  5211. spin_unlock(&mddev->lock);
  5212. pers->free(mddev, mddev->private);
  5213. mddev->private = NULL;
  5214. if (pers->sync_request && mddev->to_remove == NULL)
  5215. mddev->to_remove = &md_redundancy_group;
  5216. module_put(pers->owner);
  5217. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5218. }
  5219. void md_stop(struct mddev *mddev)
  5220. {
  5221. /* stop the array and free an attached data structures.
  5222. * This is called from dm-raid
  5223. */
  5224. __md_stop(mddev);
  5225. if (mddev->bio_set) {
  5226. bioset_free(mddev->bio_set);
  5227. mddev->bio_set = NULL;
  5228. }
  5229. if (mddev->sync_set) {
  5230. bioset_free(mddev->sync_set);
  5231. mddev->sync_set = NULL;
  5232. }
  5233. }
  5234. EXPORT_SYMBOL_GPL(md_stop);
  5235. static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
  5236. {
  5237. int err = 0;
  5238. int did_freeze = 0;
  5239. if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
  5240. did_freeze = 1;
  5241. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5242. md_wakeup_thread(mddev->thread);
  5243. }
  5244. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  5245. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  5246. if (mddev->sync_thread)
  5247. /* Thread might be blocked waiting for metadata update
  5248. * which will now never happen */
  5249. wake_up_process(mddev->sync_thread->tsk);
  5250. if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
  5251. return -EBUSY;
  5252. mddev_unlock(mddev);
  5253. wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
  5254. &mddev->recovery));
  5255. wait_event(mddev->sb_wait,
  5256. !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
  5257. mddev_lock_nointr(mddev);
  5258. mutex_lock(&mddev->open_mutex);
  5259. if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
  5260. mddev->sync_thread ||
  5261. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
  5262. pr_warn("md: %s still in use.\n",mdname(mddev));
  5263. if (did_freeze) {
  5264. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5265. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5266. md_wakeup_thread(mddev->thread);
  5267. }
  5268. err = -EBUSY;
  5269. goto out;
  5270. }
  5271. if (mddev->pers) {
  5272. __md_stop_writes(mddev);
  5273. err = -ENXIO;
  5274. if (mddev->ro==1)
  5275. goto out;
  5276. mddev->ro = 1;
  5277. set_disk_ro(mddev->gendisk, 1);
  5278. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5279. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5280. md_wakeup_thread(mddev->thread);
  5281. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5282. err = 0;
  5283. }
  5284. out:
  5285. mutex_unlock(&mddev->open_mutex);
  5286. return err;
  5287. }
  5288. /* mode:
  5289. * 0 - completely stop and dis-assemble array
  5290. * 2 - stop but do not disassemble array
  5291. */
  5292. static int do_md_stop(struct mddev *mddev, int mode,
  5293. struct block_device *bdev)
  5294. {
  5295. struct gendisk *disk = mddev->gendisk;
  5296. struct md_rdev *rdev;
  5297. int did_freeze = 0;
  5298. if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
  5299. did_freeze = 1;
  5300. set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5301. md_wakeup_thread(mddev->thread);
  5302. }
  5303. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
  5304. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  5305. if (mddev->sync_thread)
  5306. /* Thread might be blocked waiting for metadata update
  5307. * which will now never happen */
  5308. wake_up_process(mddev->sync_thread->tsk);
  5309. mddev_unlock(mddev);
  5310. wait_event(resync_wait, (mddev->sync_thread == NULL &&
  5311. !test_bit(MD_RECOVERY_RUNNING,
  5312. &mddev->recovery)));
  5313. mddev_lock_nointr(mddev);
  5314. mutex_lock(&mddev->open_mutex);
  5315. if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
  5316. mddev->sysfs_active ||
  5317. mddev->sync_thread ||
  5318. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
  5319. pr_warn("md: %s still in use.\n",mdname(mddev));
  5320. mutex_unlock(&mddev->open_mutex);
  5321. if (did_freeze) {
  5322. clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
  5323. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5324. md_wakeup_thread(mddev->thread);
  5325. }
  5326. return -EBUSY;
  5327. }
  5328. if (mddev->pers) {
  5329. if (mddev->ro)
  5330. set_disk_ro(disk, 0);
  5331. __md_stop_writes(mddev);
  5332. __md_stop(mddev);
  5333. mddev->queue->backing_dev_info->congested_fn = NULL;
  5334. /* tell userspace to handle 'inactive' */
  5335. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5336. rdev_for_each(rdev, mddev)
  5337. if (rdev->raid_disk >= 0)
  5338. sysfs_unlink_rdev(mddev, rdev);
  5339. set_capacity(disk, 0);
  5340. mutex_unlock(&mddev->open_mutex);
  5341. mddev->changed = 1;
  5342. revalidate_disk(disk);
  5343. if (mddev->ro)
  5344. mddev->ro = 0;
  5345. } else
  5346. mutex_unlock(&mddev->open_mutex);
  5347. /*
  5348. * Free resources if final stop
  5349. */
  5350. if (mode == 0) {
  5351. pr_info("md: %s stopped.\n", mdname(mddev));
  5352. if (mddev->bitmap_info.file) {
  5353. struct file *f = mddev->bitmap_info.file;
  5354. spin_lock(&mddev->lock);
  5355. mddev->bitmap_info.file = NULL;
  5356. spin_unlock(&mddev->lock);
  5357. fput(f);
  5358. }
  5359. mddev->bitmap_info.offset = 0;
  5360. export_array(mddev);
  5361. md_clean(mddev);
  5362. if (mddev->hold_active == UNTIL_STOP)
  5363. mddev->hold_active = 0;
  5364. }
  5365. md_new_event(mddev);
  5366. sysfs_notify_dirent_safe(mddev->sysfs_state);
  5367. return 0;
  5368. }
  5369. #ifndef MODULE
  5370. static void autorun_array(struct mddev *mddev)
  5371. {
  5372. struct md_rdev *rdev;
  5373. int err;
  5374. if (list_empty(&mddev->disks))
  5375. return;
  5376. pr_info("md: running: ");
  5377. rdev_for_each(rdev, mddev) {
  5378. char b[BDEVNAME_SIZE];
  5379. pr_cont("<%s>", bdevname(rdev->bdev,b));
  5380. }
  5381. pr_cont("\n");
  5382. err = do_md_run(mddev);
  5383. if (err) {
  5384. pr_warn("md: do_md_run() returned %d\n", err);
  5385. do_md_stop(mddev, 0, NULL);
  5386. }
  5387. }
  5388. /*
  5389. * lets try to run arrays based on all disks that have arrived
  5390. * until now. (those are in pending_raid_disks)
  5391. *
  5392. * the method: pick the first pending disk, collect all disks with
  5393. * the same UUID, remove all from the pending list and put them into
  5394. * the 'same_array' list. Then order this list based on superblock
  5395. * update time (freshest comes first), kick out 'old' disks and
  5396. * compare superblocks. If everything's fine then run it.
  5397. *
  5398. * If "unit" is allocated, then bump its reference count
  5399. */
  5400. static void autorun_devices(int part)
  5401. {
  5402. struct md_rdev *rdev0, *rdev, *tmp;
  5403. struct mddev *mddev;
  5404. char b[BDEVNAME_SIZE];
  5405. pr_info("md: autorun ...\n");
  5406. while (!list_empty(&pending_raid_disks)) {
  5407. int unit;
  5408. dev_t dev;
  5409. LIST_HEAD(candidates);
  5410. rdev0 = list_entry(pending_raid_disks.next,
  5411. struct md_rdev, same_set);
  5412. pr_debug("md: considering %s ...\n", bdevname(rdev0->bdev,b));
  5413. INIT_LIST_HEAD(&candidates);
  5414. rdev_for_each_list(rdev, tmp, &pending_raid_disks)
  5415. if (super_90_load(rdev, rdev0, 0) >= 0) {
  5416. pr_debug("md: adding %s ...\n",
  5417. bdevname(rdev->bdev,b));
  5418. list_move(&rdev->same_set, &candidates);
  5419. }
  5420. /*
  5421. * now we have a set of devices, with all of them having
  5422. * mostly sane superblocks. It's time to allocate the
  5423. * mddev.
  5424. */
  5425. if (part) {
  5426. dev = MKDEV(mdp_major,
  5427. rdev0->preferred_minor << MdpMinorShift);
  5428. unit = MINOR(dev) >> MdpMinorShift;
  5429. } else {
  5430. dev = MKDEV(MD_MAJOR, rdev0->preferred_minor);
  5431. unit = MINOR(dev);
  5432. }
  5433. if (rdev0->preferred_minor != unit) {
  5434. pr_warn("md: unit number in %s is bad: %d\n",
  5435. bdevname(rdev0->bdev, b), rdev0->preferred_minor);
  5436. break;
  5437. }
  5438. md_probe(dev, NULL, NULL);
  5439. mddev = mddev_find(dev);
  5440. if (!mddev || !mddev->gendisk) {
  5441. if (mddev)
  5442. mddev_put(mddev);
  5443. break;
  5444. }
  5445. if (mddev_lock(mddev))
  5446. pr_warn("md: %s locked, cannot run\n", mdname(mddev));
  5447. else if (mddev->raid_disks || mddev->major_version
  5448. || !list_empty(&mddev->disks)) {
  5449. pr_warn("md: %s already running, cannot run %s\n",
  5450. mdname(mddev), bdevname(rdev0->bdev,b));
  5451. mddev_unlock(mddev);
  5452. } else {
  5453. pr_debug("md: created %s\n", mdname(mddev));
  5454. mddev->persistent = 1;
  5455. rdev_for_each_list(rdev, tmp, &candidates) {
  5456. list_del_init(&rdev->same_set);
  5457. if (bind_rdev_to_array(rdev, mddev))
  5458. export_rdev(rdev);
  5459. }
  5460. autorun_array(mddev);
  5461. mddev_unlock(mddev);
  5462. }
  5463. /* on success, candidates will be empty, on error
  5464. * it won't...
  5465. */
  5466. rdev_for_each_list(rdev, tmp, &candidates) {
  5467. list_del_init(&rdev->same_set);
  5468. export_rdev(rdev);
  5469. }
  5470. mddev_put(mddev);
  5471. }
  5472. pr_info("md: ... autorun DONE.\n");
  5473. }
  5474. #endif /* !MODULE */
  5475. static int get_version(void __user *arg)
  5476. {
  5477. mdu_version_t ver;
  5478. ver.major = MD_MAJOR_VERSION;
  5479. ver.minor = MD_MINOR_VERSION;
  5480. ver.patchlevel = MD_PATCHLEVEL_VERSION;
  5481. if (copy_to_user(arg, &ver, sizeof(ver)))
  5482. return -EFAULT;
  5483. return 0;
  5484. }
  5485. static int get_array_info(struct mddev *mddev, void __user *arg)
  5486. {
  5487. mdu_array_info_t info;
  5488. int nr,working,insync,failed,spare;
  5489. struct md_rdev *rdev;
  5490. nr = working = insync = failed = spare = 0;
  5491. rcu_read_lock();
  5492. rdev_for_each_rcu(rdev, mddev) {
  5493. nr++;
  5494. if (test_bit(Faulty, &rdev->flags))
  5495. failed++;
  5496. else {
  5497. working++;
  5498. if (test_bit(In_sync, &rdev->flags))
  5499. insync++;
  5500. else if (test_bit(Journal, &rdev->flags))
  5501. /* TODO: add journal count to md_u.h */
  5502. ;
  5503. else
  5504. spare++;
  5505. }
  5506. }
  5507. rcu_read_unlock();
  5508. info.major_version = mddev->major_version;
  5509. info.minor_version = mddev->minor_version;
  5510. info.patch_version = MD_PATCHLEVEL_VERSION;
  5511. info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX);
  5512. info.level = mddev->level;
  5513. info.size = mddev->dev_sectors / 2;
  5514. if (info.size != mddev->dev_sectors / 2) /* overflow */
  5515. info.size = -1;
  5516. info.nr_disks = nr;
  5517. info.raid_disks = mddev->raid_disks;
  5518. info.md_minor = mddev->md_minor;
  5519. info.not_persistent= !mddev->persistent;
  5520. info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX);
  5521. info.state = 0;
  5522. if (mddev->in_sync)
  5523. info.state = (1<<MD_SB_CLEAN);
  5524. if (mddev->bitmap && mddev->bitmap_info.offset)
  5525. info.state |= (1<<MD_SB_BITMAP_PRESENT);
  5526. if (mddev_is_clustered(mddev))
  5527. info.state |= (1<<MD_SB_CLUSTERED);
  5528. info.active_disks = insync;
  5529. info.working_disks = working;
  5530. info.failed_disks = failed;
  5531. info.spare_disks = spare;
  5532. info.layout = mddev->layout;
  5533. info.chunk_size = mddev->chunk_sectors << 9;
  5534. if (copy_to_user(arg, &info, sizeof(info)))
  5535. return -EFAULT;
  5536. return 0;
  5537. }
  5538. static int get_bitmap_file(struct mddev *mddev, void __user * arg)
  5539. {
  5540. mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */
  5541. char *ptr;
  5542. int err;
  5543. file = kzalloc(sizeof(*file), GFP_NOIO);
  5544. if (!file)
  5545. return -ENOMEM;
  5546. err = 0;
  5547. spin_lock(&mddev->lock);
  5548. /* bitmap enabled */
  5549. if (mddev->bitmap_info.file) {
  5550. ptr = file_path(mddev->bitmap_info.file, file->pathname,
  5551. sizeof(file->pathname));
  5552. if (IS_ERR(ptr))
  5553. err = PTR_ERR(ptr);
  5554. else
  5555. memmove(file->pathname, ptr,
  5556. sizeof(file->pathname)-(ptr-file->pathname));
  5557. }
  5558. spin_unlock(&mddev->lock);
  5559. if (err == 0 &&
  5560. copy_to_user(arg, file, sizeof(*file)))
  5561. err = -EFAULT;
  5562. kfree(file);
  5563. return err;
  5564. }
  5565. static int get_disk_info(struct mddev *mddev, void __user * arg)
  5566. {
  5567. mdu_disk_info_t info;
  5568. struct md_rdev *rdev;
  5569. if (copy_from_user(&info, arg, sizeof(info)))
  5570. return -EFAULT;
  5571. rcu_read_lock();
  5572. rdev = md_find_rdev_nr_rcu(mddev, info.number);
  5573. if (rdev) {
  5574. info.major = MAJOR(rdev->bdev->bd_dev);
  5575. info.minor = MINOR(rdev->bdev->bd_dev);
  5576. info.raid_disk = rdev->raid_disk;
  5577. info.state = 0;
  5578. if (test_bit(Faulty, &rdev->flags))
  5579. info.state |= (1<<MD_DISK_FAULTY);
  5580. else if (test_bit(In_sync, &rdev->flags)) {
  5581. info.state |= (1<<MD_DISK_ACTIVE);
  5582. info.state |= (1<<MD_DISK_SYNC);
  5583. }
  5584. if (test_bit(Journal, &rdev->flags))
  5585. info.state |= (1<<MD_DISK_JOURNAL);
  5586. if (test_bit(WriteMostly, &rdev->flags))
  5587. info.state |= (1<<MD_DISK_WRITEMOSTLY);
  5588. if (test_bit(FailFast, &rdev->flags))
  5589. info.state |= (1<<MD_DISK_FAILFAST);
  5590. } else {
  5591. info.major = info.minor = 0;
  5592. info.raid_disk = -1;
  5593. info.state = (1<<MD_DISK_REMOVED);
  5594. }
  5595. rcu_read_unlock();
  5596. if (copy_to_user(arg, &info, sizeof(info)))
  5597. return -EFAULT;
  5598. return 0;
  5599. }
  5600. static int add_new_disk(struct mddev *mddev, mdu_disk_info_t *info)
  5601. {
  5602. char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
  5603. struct md_rdev *rdev;
  5604. dev_t dev = MKDEV(info->major,info->minor);
  5605. if (mddev_is_clustered(mddev) &&
  5606. !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) {
  5607. pr_warn("%s: Cannot add to clustered mddev.\n",
  5608. mdname(mddev));
  5609. return -EINVAL;
  5610. }
  5611. if (info->major != MAJOR(dev) || info->minor != MINOR(dev))
  5612. return -EOVERFLOW;
  5613. if (!mddev->raid_disks) {
  5614. int err;
  5615. /* expecting a device which has a superblock */
  5616. rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
  5617. if (IS_ERR(rdev)) {
  5618. pr_warn("md: md_import_device returned %ld\n",
  5619. PTR_ERR(rdev));
  5620. return PTR_ERR(rdev);
  5621. }
  5622. if (!list_empty(&mddev->disks)) {
  5623. struct md_rdev *rdev0
  5624. = list_entry(mddev->disks.next,
  5625. struct md_rdev, same_set);
  5626. err = super_types[mddev->major_version]
  5627. .load_super(rdev, rdev0, mddev->minor_version);
  5628. if (err < 0) {
  5629. pr_warn("md: %s has different UUID to %s\n",
  5630. bdevname(rdev->bdev,b),
  5631. bdevname(rdev0->bdev,b2));
  5632. export_rdev(rdev);
  5633. return -EINVAL;
  5634. }
  5635. }
  5636. err = bind_rdev_to_array(rdev, mddev);
  5637. if (err)
  5638. export_rdev(rdev);
  5639. return err;
  5640. }
  5641. /*
  5642. * add_new_disk can be used once the array is assembled
  5643. * to add "hot spares". They must already have a superblock
  5644. * written
  5645. */
  5646. if (mddev->pers) {
  5647. int err;
  5648. if (!mddev->pers->hot_add_disk) {
  5649. pr_warn("%s: personality does not support diskops!\n",
  5650. mdname(mddev));
  5651. return -EINVAL;
  5652. }
  5653. if (mddev->persistent)
  5654. rdev = md_import_device(dev, mddev->major_version,
  5655. mddev->minor_version);
  5656. else
  5657. rdev = md_import_device(dev, -1, -1);
  5658. if (IS_ERR(rdev)) {
  5659. pr_warn("md: md_import_device returned %ld\n",
  5660. PTR_ERR(rdev));
  5661. return PTR_ERR(rdev);
  5662. }
  5663. /* set saved_raid_disk if appropriate */
  5664. if (!mddev->persistent) {
  5665. if (info->state & (1<<MD_DISK_SYNC) &&
  5666. info->raid_disk < mddev->raid_disks) {
  5667. rdev->raid_disk = info->raid_disk;
  5668. set_bit(In_sync, &rdev->flags);
  5669. clear_bit(Bitmap_sync, &rdev->flags);
  5670. } else
  5671. rdev->raid_disk = -1;
  5672. rdev->saved_raid_disk = rdev->raid_disk;
  5673. } else
  5674. super_types[mddev->major_version].
  5675. validate_super(mddev, rdev);
  5676. if ((info->state & (1<<MD_DISK_SYNC)) &&
  5677. rdev->raid_disk != info->raid_disk) {
  5678. /* This was a hot-add request, but events doesn't
  5679. * match, so reject it.
  5680. */
  5681. export_rdev(rdev);
  5682. return -EINVAL;
  5683. }
  5684. clear_bit(In_sync, &rdev->flags); /* just to be sure */
  5685. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5686. set_bit(WriteMostly, &rdev->flags);
  5687. else
  5688. clear_bit(WriteMostly, &rdev->flags);
  5689. if (info->state & (1<<MD_DISK_FAILFAST))
  5690. set_bit(FailFast, &rdev->flags);
  5691. else
  5692. clear_bit(FailFast, &rdev->flags);
  5693. if (info->state & (1<<MD_DISK_JOURNAL)) {
  5694. struct md_rdev *rdev2;
  5695. bool has_journal = false;
  5696. /* make sure no existing journal disk */
  5697. rdev_for_each(rdev2, mddev) {
  5698. if (test_bit(Journal, &rdev2->flags)) {
  5699. has_journal = true;
  5700. break;
  5701. }
  5702. }
  5703. if (has_journal || mddev->bitmap) {
  5704. export_rdev(rdev);
  5705. return -EBUSY;
  5706. }
  5707. set_bit(Journal, &rdev->flags);
  5708. }
  5709. /*
  5710. * check whether the device shows up in other nodes
  5711. */
  5712. if (mddev_is_clustered(mddev)) {
  5713. if (info->state & (1 << MD_DISK_CANDIDATE))
  5714. set_bit(Candidate, &rdev->flags);
  5715. else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) {
  5716. /* --add initiated by this node */
  5717. err = md_cluster_ops->add_new_disk(mddev, rdev);
  5718. if (err) {
  5719. export_rdev(rdev);
  5720. return err;
  5721. }
  5722. }
  5723. }
  5724. rdev->raid_disk = -1;
  5725. err = bind_rdev_to_array(rdev, mddev);
  5726. if (err)
  5727. export_rdev(rdev);
  5728. if (mddev_is_clustered(mddev)) {
  5729. if (info->state & (1 << MD_DISK_CANDIDATE)) {
  5730. if (!err) {
  5731. err = md_cluster_ops->new_disk_ack(mddev,
  5732. err == 0);
  5733. if (err)
  5734. md_kick_rdev_from_array(rdev);
  5735. }
  5736. } else {
  5737. if (err)
  5738. md_cluster_ops->add_new_disk_cancel(mddev);
  5739. else
  5740. err = add_bound_rdev(rdev);
  5741. }
  5742. } else if (!err)
  5743. err = add_bound_rdev(rdev);
  5744. return err;
  5745. }
  5746. /* otherwise, add_new_disk is only allowed
  5747. * for major_version==0 superblocks
  5748. */
  5749. if (mddev->major_version != 0) {
  5750. pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev));
  5751. return -EINVAL;
  5752. }
  5753. if (!(info->state & (1<<MD_DISK_FAULTY))) {
  5754. int err;
  5755. rdev = md_import_device(dev, -1, 0);
  5756. if (IS_ERR(rdev)) {
  5757. pr_warn("md: error, md_import_device() returned %ld\n",
  5758. PTR_ERR(rdev));
  5759. return PTR_ERR(rdev);
  5760. }
  5761. rdev->desc_nr = info->number;
  5762. if (info->raid_disk < mddev->raid_disks)
  5763. rdev->raid_disk = info->raid_disk;
  5764. else
  5765. rdev->raid_disk = -1;
  5766. if (rdev->raid_disk < mddev->raid_disks)
  5767. if (info->state & (1<<MD_DISK_SYNC))
  5768. set_bit(In_sync, &rdev->flags);
  5769. if (info->state & (1<<MD_DISK_WRITEMOSTLY))
  5770. set_bit(WriteMostly, &rdev->flags);
  5771. if (info->state & (1<<MD_DISK_FAILFAST))
  5772. set_bit(FailFast, &rdev->flags);
  5773. if (!mddev->persistent) {
  5774. pr_debug("md: nonpersistent superblock ...\n");
  5775. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5776. } else
  5777. rdev->sb_start = calc_dev_sboffset(rdev);
  5778. rdev->sectors = rdev->sb_start;
  5779. err = bind_rdev_to_array(rdev, mddev);
  5780. if (err) {
  5781. export_rdev(rdev);
  5782. return err;
  5783. }
  5784. }
  5785. return 0;
  5786. }
  5787. static int hot_remove_disk(struct mddev *mddev, dev_t dev)
  5788. {
  5789. char b[BDEVNAME_SIZE];
  5790. struct md_rdev *rdev;
  5791. rdev = find_rdev(mddev, dev);
  5792. if (!rdev)
  5793. return -ENXIO;
  5794. if (rdev->raid_disk < 0)
  5795. goto kick_rdev;
  5796. clear_bit(Blocked, &rdev->flags);
  5797. remove_and_add_spares(mddev, rdev);
  5798. if (rdev->raid_disk >= 0)
  5799. goto busy;
  5800. kick_rdev:
  5801. if (mddev_is_clustered(mddev))
  5802. md_cluster_ops->remove_disk(mddev, rdev);
  5803. md_kick_rdev_from_array(rdev);
  5804. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  5805. if (mddev->thread)
  5806. md_wakeup_thread(mddev->thread);
  5807. else
  5808. md_update_sb(mddev, 1);
  5809. md_new_event(mddev);
  5810. return 0;
  5811. busy:
  5812. pr_debug("md: cannot remove active disk %s from %s ...\n",
  5813. bdevname(rdev->bdev,b), mdname(mddev));
  5814. return -EBUSY;
  5815. }
  5816. static int hot_add_disk(struct mddev *mddev, dev_t dev)
  5817. {
  5818. char b[BDEVNAME_SIZE];
  5819. int err;
  5820. struct md_rdev *rdev;
  5821. if (!mddev->pers)
  5822. return -ENODEV;
  5823. if (mddev->major_version != 0) {
  5824. pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n",
  5825. mdname(mddev));
  5826. return -EINVAL;
  5827. }
  5828. if (!mddev->pers->hot_add_disk) {
  5829. pr_warn("%s: personality does not support diskops!\n",
  5830. mdname(mddev));
  5831. return -EINVAL;
  5832. }
  5833. rdev = md_import_device(dev, -1, 0);
  5834. if (IS_ERR(rdev)) {
  5835. pr_warn("md: error, md_import_device() returned %ld\n",
  5836. PTR_ERR(rdev));
  5837. return -EINVAL;
  5838. }
  5839. if (mddev->persistent)
  5840. rdev->sb_start = calc_dev_sboffset(rdev);
  5841. else
  5842. rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
  5843. rdev->sectors = rdev->sb_start;
  5844. if (test_bit(Faulty, &rdev->flags)) {
  5845. pr_warn("md: can not hot-add faulty %s disk to %s!\n",
  5846. bdevname(rdev->bdev,b), mdname(mddev));
  5847. err = -EINVAL;
  5848. goto abort_export;
  5849. }
  5850. clear_bit(In_sync, &rdev->flags);
  5851. rdev->desc_nr = -1;
  5852. rdev->saved_raid_disk = -1;
  5853. err = bind_rdev_to_array(rdev, mddev);
  5854. if (err)
  5855. goto abort_export;
  5856. /*
  5857. * The rest should better be atomic, we can have disk failures
  5858. * noticed in interrupt contexts ...
  5859. */
  5860. rdev->raid_disk = -1;
  5861. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  5862. if (!mddev->thread)
  5863. md_update_sb(mddev, 1);
  5864. /*
  5865. * Kick recovery, maybe this spare has to be added to the
  5866. * array immediately.
  5867. */
  5868. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  5869. md_wakeup_thread(mddev->thread);
  5870. md_new_event(mddev);
  5871. return 0;
  5872. abort_export:
  5873. export_rdev(rdev);
  5874. return err;
  5875. }
  5876. static int set_bitmap_file(struct mddev *mddev, int fd)
  5877. {
  5878. int err = 0;
  5879. if (mddev->pers) {
  5880. if (!mddev->pers->quiesce || !mddev->thread)
  5881. return -EBUSY;
  5882. if (mddev->recovery || mddev->sync_thread)
  5883. return -EBUSY;
  5884. /* we should be able to change the bitmap.. */
  5885. }
  5886. if (fd >= 0) {
  5887. struct inode *inode;
  5888. struct file *f;
  5889. if (mddev->bitmap || mddev->bitmap_info.file)
  5890. return -EEXIST; /* cannot add when bitmap is present */
  5891. f = fget(fd);
  5892. if (f == NULL) {
  5893. pr_warn("%s: error: failed to get bitmap file\n",
  5894. mdname(mddev));
  5895. return -EBADF;
  5896. }
  5897. inode = f->f_mapping->host;
  5898. if (!S_ISREG(inode->i_mode)) {
  5899. pr_warn("%s: error: bitmap file must be a regular file\n",
  5900. mdname(mddev));
  5901. err = -EBADF;
  5902. } else if (!(f->f_mode & FMODE_WRITE)) {
  5903. pr_warn("%s: error: bitmap file must open for write\n",
  5904. mdname(mddev));
  5905. err = -EBADF;
  5906. } else if (atomic_read(&inode->i_writecount) != 1) {
  5907. pr_warn("%s: error: bitmap file is already in use\n",
  5908. mdname(mddev));
  5909. err = -EBUSY;
  5910. }
  5911. if (err) {
  5912. fput(f);
  5913. return err;
  5914. }
  5915. mddev->bitmap_info.file = f;
  5916. mddev->bitmap_info.offset = 0; /* file overrides offset */
  5917. } else if (mddev->bitmap == NULL)
  5918. return -ENOENT; /* cannot remove what isn't there */
  5919. err = 0;
  5920. if (mddev->pers) {
  5921. if (fd >= 0) {
  5922. struct bitmap *bitmap;
  5923. bitmap = bitmap_create(mddev, -1);
  5924. mddev_suspend(mddev);
  5925. if (!IS_ERR(bitmap)) {
  5926. mddev->bitmap = bitmap;
  5927. err = bitmap_load(mddev);
  5928. } else
  5929. err = PTR_ERR(bitmap);
  5930. if (err) {
  5931. bitmap_destroy(mddev);
  5932. fd = -1;
  5933. }
  5934. mddev_resume(mddev);
  5935. } else if (fd < 0) {
  5936. mddev_suspend(mddev);
  5937. bitmap_destroy(mddev);
  5938. mddev_resume(mddev);
  5939. }
  5940. }
  5941. if (fd < 0) {
  5942. struct file *f = mddev->bitmap_info.file;
  5943. if (f) {
  5944. spin_lock(&mddev->lock);
  5945. mddev->bitmap_info.file = NULL;
  5946. spin_unlock(&mddev->lock);
  5947. fput(f);
  5948. }
  5949. }
  5950. return err;
  5951. }
  5952. /*
  5953. * set_array_info is used two different ways
  5954. * The original usage is when creating a new array.
  5955. * In this usage, raid_disks is > 0 and it together with
  5956. * level, size, not_persistent,layout,chunksize determine the
  5957. * shape of the array.
  5958. * This will always create an array with a type-0.90.0 superblock.
  5959. * The newer usage is when assembling an array.
  5960. * In this case raid_disks will be 0, and the major_version field is
  5961. * use to determine which style super-blocks are to be found on the devices.
  5962. * The minor and patch _version numbers are also kept incase the
  5963. * super_block handler wishes to interpret them.
  5964. */
  5965. static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
  5966. {
  5967. if (info->raid_disks == 0) {
  5968. /* just setting version number for superblock loading */
  5969. if (info->major_version < 0 ||
  5970. info->major_version >= ARRAY_SIZE(super_types) ||
  5971. super_types[info->major_version].name == NULL) {
  5972. /* maybe try to auto-load a module? */
  5973. pr_warn("md: superblock version %d not known\n",
  5974. info->major_version);
  5975. return -EINVAL;
  5976. }
  5977. mddev->major_version = info->major_version;
  5978. mddev->minor_version = info->minor_version;
  5979. mddev->patch_version = info->patch_version;
  5980. mddev->persistent = !info->not_persistent;
  5981. /* ensure mddev_put doesn't delete this now that there
  5982. * is some minimal configuration.
  5983. */
  5984. mddev->ctime = ktime_get_real_seconds();
  5985. return 0;
  5986. }
  5987. mddev->major_version = MD_MAJOR_VERSION;
  5988. mddev->minor_version = MD_MINOR_VERSION;
  5989. mddev->patch_version = MD_PATCHLEVEL_VERSION;
  5990. mddev->ctime = ktime_get_real_seconds();
  5991. mddev->level = info->level;
  5992. mddev->clevel[0] = 0;
  5993. mddev->dev_sectors = 2 * (sector_t)info->size;
  5994. mddev->raid_disks = info->raid_disks;
  5995. /* don't set md_minor, it is determined by which /dev/md* was
  5996. * openned
  5997. */
  5998. if (info->state & (1<<MD_SB_CLEAN))
  5999. mddev->recovery_cp = MaxSector;
  6000. else
  6001. mddev->recovery_cp = 0;
  6002. mddev->persistent = ! info->not_persistent;
  6003. mddev->external = 0;
  6004. mddev->layout = info->layout;
  6005. mddev->chunk_sectors = info->chunk_size >> 9;
  6006. if (mddev->persistent) {
  6007. mddev->max_disks = MD_SB_DISKS;
  6008. mddev->flags = 0;
  6009. mddev->sb_flags = 0;
  6010. }
  6011. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  6012. mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
  6013. mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9);
  6014. mddev->bitmap_info.offset = 0;
  6015. mddev->reshape_position = MaxSector;
  6016. /*
  6017. * Generate a 128 bit UUID
  6018. */
  6019. get_random_bytes(mddev->uuid, 16);
  6020. mddev->new_level = mddev->level;
  6021. mddev->new_chunk_sectors = mddev->chunk_sectors;
  6022. mddev->new_layout = mddev->layout;
  6023. mddev->delta_disks = 0;
  6024. mddev->reshape_backwards = 0;
  6025. return 0;
  6026. }
  6027. void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
  6028. {
  6029. lockdep_assert_held(&mddev->reconfig_mutex);
  6030. if (mddev->external_size)
  6031. return;
  6032. mddev->array_sectors = array_sectors;
  6033. }
  6034. EXPORT_SYMBOL(md_set_array_sectors);
  6035. static int update_size(struct mddev *mddev, sector_t num_sectors)
  6036. {
  6037. struct md_rdev *rdev;
  6038. int rv;
  6039. int fit = (num_sectors == 0);
  6040. sector_t old_dev_sectors = mddev->dev_sectors;
  6041. if (mddev->pers->resize == NULL)
  6042. return -EINVAL;
  6043. /* The "num_sectors" is the number of sectors of each device that
  6044. * is used. This can only make sense for arrays with redundancy.
  6045. * linear and raid0 always use whatever space is available. We can only
  6046. * consider changing this number if no resync or reconstruction is
  6047. * happening, and if the new size is acceptable. It must fit before the
  6048. * sb_start or, if that is <data_offset, it must fit before the size
  6049. * of each device. If num_sectors is zero, we find the largest size
  6050. * that fits.
  6051. */
  6052. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  6053. mddev->sync_thread)
  6054. return -EBUSY;
  6055. if (mddev->ro)
  6056. return -EROFS;
  6057. rdev_for_each(rdev, mddev) {
  6058. sector_t avail = rdev->sectors;
  6059. if (fit && (num_sectors == 0 || num_sectors > avail))
  6060. num_sectors = avail;
  6061. if (avail < num_sectors)
  6062. return -ENOSPC;
  6063. }
  6064. rv = mddev->pers->resize(mddev, num_sectors);
  6065. if (!rv) {
  6066. if (mddev_is_clustered(mddev))
  6067. md_cluster_ops->update_size(mddev, old_dev_sectors);
  6068. else if (mddev->queue) {
  6069. set_capacity(mddev->gendisk, mddev->array_sectors);
  6070. revalidate_disk(mddev->gendisk);
  6071. }
  6072. }
  6073. return rv;
  6074. }
  6075. static int update_raid_disks(struct mddev *mddev, int raid_disks)
  6076. {
  6077. int rv;
  6078. struct md_rdev *rdev;
  6079. /* change the number of raid disks */
  6080. if (mddev->pers->check_reshape == NULL)
  6081. return -EINVAL;
  6082. if (mddev->ro)
  6083. return -EROFS;
  6084. if (raid_disks <= 0 ||
  6085. (mddev->max_disks && raid_disks >= mddev->max_disks))
  6086. return -EINVAL;
  6087. if (mddev->sync_thread ||
  6088. test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
  6089. mddev->reshape_position != MaxSector)
  6090. return -EBUSY;
  6091. rdev_for_each(rdev, mddev) {
  6092. if (mddev->raid_disks < raid_disks &&
  6093. rdev->data_offset < rdev->new_data_offset)
  6094. return -EINVAL;
  6095. if (mddev->raid_disks > raid_disks &&
  6096. rdev->data_offset > rdev->new_data_offset)
  6097. return -EINVAL;
  6098. }
  6099. mddev->delta_disks = raid_disks - mddev->raid_disks;
  6100. if (mddev->delta_disks < 0)
  6101. mddev->reshape_backwards = 1;
  6102. else if (mddev->delta_disks > 0)
  6103. mddev->reshape_backwards = 0;
  6104. rv = mddev->pers->check_reshape(mddev);
  6105. if (rv < 0) {
  6106. mddev->delta_disks = 0;
  6107. mddev->reshape_backwards = 0;
  6108. }
  6109. return rv;
  6110. }
  6111. /*
  6112. * update_array_info is used to change the configuration of an
  6113. * on-line array.
  6114. * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size
  6115. * fields in the info are checked against the array.
  6116. * Any differences that cannot be handled will cause an error.
  6117. * Normally, only one change can be managed at a time.
  6118. */
  6119. static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
  6120. {
  6121. int rv = 0;
  6122. int cnt = 0;
  6123. int state = 0;
  6124. /* calculate expected state,ignoring low bits */
  6125. if (mddev->bitmap && mddev->bitmap_info.offset)
  6126. state |= (1 << MD_SB_BITMAP_PRESENT);
  6127. if (mddev->major_version != info->major_version ||
  6128. mddev->minor_version != info->minor_version ||
  6129. /* mddev->patch_version != info->patch_version || */
  6130. mddev->ctime != info->ctime ||
  6131. mddev->level != info->level ||
  6132. /* mddev->layout != info->layout || */
  6133. mddev->persistent != !info->not_persistent ||
  6134. mddev->chunk_sectors != info->chunk_size >> 9 ||
  6135. /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
  6136. ((state^info->state) & 0xfffffe00)
  6137. )
  6138. return -EINVAL;
  6139. /* Check there is only one change */
  6140. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  6141. cnt++;
  6142. if (mddev->raid_disks != info->raid_disks)
  6143. cnt++;
  6144. if (mddev->layout != info->layout)
  6145. cnt++;
  6146. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT))
  6147. cnt++;
  6148. if (cnt == 0)
  6149. return 0;
  6150. if (cnt > 1)
  6151. return -EINVAL;
  6152. if (mddev->layout != info->layout) {
  6153. /* Change layout
  6154. * we don't need to do anything at the md level, the
  6155. * personality will take care of it all.
  6156. */
  6157. if (mddev->pers->check_reshape == NULL)
  6158. return -EINVAL;
  6159. else {
  6160. mddev->new_layout = info->layout;
  6161. rv = mddev->pers->check_reshape(mddev);
  6162. if (rv)
  6163. mddev->new_layout = mddev->layout;
  6164. return rv;
  6165. }
  6166. }
  6167. if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
  6168. rv = update_size(mddev, (sector_t)info->size * 2);
  6169. if (mddev->raid_disks != info->raid_disks)
  6170. rv = update_raid_disks(mddev, info->raid_disks);
  6171. if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
  6172. if (mddev->pers->quiesce == NULL || mddev->thread == NULL) {
  6173. rv = -EINVAL;
  6174. goto err;
  6175. }
  6176. if (mddev->recovery || mddev->sync_thread) {
  6177. rv = -EBUSY;
  6178. goto err;
  6179. }
  6180. if (info->state & (1<<MD_SB_BITMAP_PRESENT)) {
  6181. struct bitmap *bitmap;
  6182. /* add the bitmap */
  6183. if (mddev->bitmap) {
  6184. rv = -EEXIST;
  6185. goto err;
  6186. }
  6187. if (mddev->bitmap_info.default_offset == 0) {
  6188. rv = -EINVAL;
  6189. goto err;
  6190. }
  6191. mddev->bitmap_info.offset =
  6192. mddev->bitmap_info.default_offset;
  6193. mddev->bitmap_info.space =
  6194. mddev->bitmap_info.default_space;
  6195. bitmap = bitmap_create(mddev, -1);
  6196. mddev_suspend(mddev);
  6197. if (!IS_ERR(bitmap)) {
  6198. mddev->bitmap = bitmap;
  6199. rv = bitmap_load(mddev);
  6200. } else
  6201. rv = PTR_ERR(bitmap);
  6202. if (rv)
  6203. bitmap_destroy(mddev);
  6204. mddev_resume(mddev);
  6205. } else {
  6206. /* remove the bitmap */
  6207. if (!mddev->bitmap) {
  6208. rv = -ENOENT;
  6209. goto err;
  6210. }
  6211. if (mddev->bitmap->storage.file) {
  6212. rv = -EINVAL;
  6213. goto err;
  6214. }
  6215. if (mddev->bitmap_info.nodes) {
  6216. /* hold PW on all the bitmap lock */
  6217. if (md_cluster_ops->lock_all_bitmaps(mddev) <= 0) {
  6218. pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n");
  6219. rv = -EPERM;
  6220. md_cluster_ops->unlock_all_bitmaps(mddev);
  6221. goto err;
  6222. }
  6223. mddev->bitmap_info.nodes = 0;
  6224. md_cluster_ops->leave(mddev);
  6225. }
  6226. mddev_suspend(mddev);
  6227. bitmap_destroy(mddev);
  6228. mddev_resume(mddev);
  6229. mddev->bitmap_info.offset = 0;
  6230. }
  6231. }
  6232. md_update_sb(mddev, 1);
  6233. return rv;
  6234. err:
  6235. return rv;
  6236. }
  6237. static int set_disk_faulty(struct mddev *mddev, dev_t dev)
  6238. {
  6239. struct md_rdev *rdev;
  6240. int err = 0;
  6241. if (mddev->pers == NULL)
  6242. return -ENODEV;
  6243. rcu_read_lock();
  6244. rdev = find_rdev_rcu(mddev, dev);
  6245. if (!rdev)
  6246. err = -ENODEV;
  6247. else {
  6248. md_error(mddev, rdev);
  6249. if (!test_bit(Faulty, &rdev->flags))
  6250. err = -EBUSY;
  6251. }
  6252. rcu_read_unlock();
  6253. return err;
  6254. }
  6255. /*
  6256. * We have a problem here : there is no easy way to give a CHS
  6257. * virtual geometry. We currently pretend that we have a 2 heads
  6258. * 4 sectors (with a BIG number of cylinders...). This drives
  6259. * dosfs just mad... ;-)
  6260. */
  6261. static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  6262. {
  6263. struct mddev *mddev = bdev->bd_disk->private_data;
  6264. geo->heads = 2;
  6265. geo->sectors = 4;
  6266. geo->cylinders = mddev->array_sectors / 8;
  6267. return 0;
  6268. }
  6269. static inline bool md_ioctl_valid(unsigned int cmd)
  6270. {
  6271. switch (cmd) {
  6272. case ADD_NEW_DISK:
  6273. case BLKROSET:
  6274. case GET_ARRAY_INFO:
  6275. case GET_BITMAP_FILE:
  6276. case GET_DISK_INFO:
  6277. case HOT_ADD_DISK:
  6278. case HOT_REMOVE_DISK:
  6279. case RAID_AUTORUN:
  6280. case RAID_VERSION:
  6281. case RESTART_ARRAY_RW:
  6282. case RUN_ARRAY:
  6283. case SET_ARRAY_INFO:
  6284. case SET_BITMAP_FILE:
  6285. case SET_DISK_FAULTY:
  6286. case STOP_ARRAY:
  6287. case STOP_ARRAY_RO:
  6288. case CLUSTERED_DISK_NACK:
  6289. return true;
  6290. default:
  6291. return false;
  6292. }
  6293. }
  6294. static int md_ioctl(struct block_device *bdev, fmode_t mode,
  6295. unsigned int cmd, unsigned long arg)
  6296. {
  6297. int err = 0;
  6298. void __user *argp = (void __user *)arg;
  6299. struct mddev *mddev = NULL;
  6300. int ro;
  6301. bool did_set_md_closing = false;
  6302. if (!md_ioctl_valid(cmd))
  6303. return -ENOTTY;
  6304. switch (cmd) {
  6305. case RAID_VERSION:
  6306. case GET_ARRAY_INFO:
  6307. case GET_DISK_INFO:
  6308. break;
  6309. default:
  6310. if (!capable(CAP_SYS_ADMIN))
  6311. return -EACCES;
  6312. }
  6313. /*
  6314. * Commands dealing with the RAID driver but not any
  6315. * particular array:
  6316. */
  6317. switch (cmd) {
  6318. case RAID_VERSION:
  6319. err = get_version(argp);
  6320. goto out;
  6321. #ifndef MODULE
  6322. case RAID_AUTORUN:
  6323. err = 0;
  6324. autostart_arrays(arg);
  6325. goto out;
  6326. #endif
  6327. default:;
  6328. }
  6329. /*
  6330. * Commands creating/starting a new array:
  6331. */
  6332. mddev = bdev->bd_disk->private_data;
  6333. if (!mddev) {
  6334. BUG();
  6335. goto out;
  6336. }
  6337. /* Some actions do not requires the mutex */
  6338. switch (cmd) {
  6339. case GET_ARRAY_INFO:
  6340. if (!mddev->raid_disks && !mddev->external)
  6341. err = -ENODEV;
  6342. else
  6343. err = get_array_info(mddev, argp);
  6344. goto out;
  6345. case GET_DISK_INFO:
  6346. if (!mddev->raid_disks && !mddev->external)
  6347. err = -ENODEV;
  6348. else
  6349. err = get_disk_info(mddev, argp);
  6350. goto out;
  6351. case SET_DISK_FAULTY:
  6352. err = set_disk_faulty(mddev, new_decode_dev(arg));
  6353. goto out;
  6354. case GET_BITMAP_FILE:
  6355. err = get_bitmap_file(mddev, argp);
  6356. goto out;
  6357. }
  6358. if (cmd == ADD_NEW_DISK)
  6359. /* need to ensure md_delayed_delete() has completed */
  6360. flush_workqueue(md_misc_wq);
  6361. if (cmd == HOT_REMOVE_DISK)
  6362. /* need to ensure recovery thread has run */
  6363. wait_event_interruptible_timeout(mddev->sb_wait,
  6364. !test_bit(MD_RECOVERY_NEEDED,
  6365. &mddev->recovery),
  6366. msecs_to_jiffies(5000));
  6367. if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
  6368. /* Need to flush page cache, and ensure no-one else opens
  6369. * and writes
  6370. */
  6371. mutex_lock(&mddev->open_mutex);
  6372. if (mddev->pers && atomic_read(&mddev->openers) > 1) {
  6373. mutex_unlock(&mddev->open_mutex);
  6374. err = -EBUSY;
  6375. goto out;
  6376. }
  6377. WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
  6378. set_bit(MD_CLOSING, &mddev->flags);
  6379. did_set_md_closing = true;
  6380. mutex_unlock(&mddev->open_mutex);
  6381. sync_blockdev(bdev);
  6382. }
  6383. err = mddev_lock(mddev);
  6384. if (err) {
  6385. pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n",
  6386. err, cmd);
  6387. goto out;
  6388. }
  6389. if (cmd == SET_ARRAY_INFO) {
  6390. mdu_array_info_t info;
  6391. if (!arg)
  6392. memset(&info, 0, sizeof(info));
  6393. else if (copy_from_user(&info, argp, sizeof(info))) {
  6394. err = -EFAULT;
  6395. goto unlock;
  6396. }
  6397. if (mddev->pers) {
  6398. err = update_array_info(mddev, &info);
  6399. if (err) {
  6400. pr_warn("md: couldn't update array info. %d\n", err);
  6401. goto unlock;
  6402. }
  6403. goto unlock;
  6404. }
  6405. if (!list_empty(&mddev->disks)) {
  6406. pr_warn("md: array %s already has disks!\n", mdname(mddev));
  6407. err = -EBUSY;
  6408. goto unlock;
  6409. }
  6410. if (mddev->raid_disks) {
  6411. pr_warn("md: array %s already initialised!\n", mdname(mddev));
  6412. err = -EBUSY;
  6413. goto unlock;
  6414. }
  6415. err = set_array_info(mddev, &info);
  6416. if (err) {
  6417. pr_warn("md: couldn't set array info. %d\n", err);
  6418. goto unlock;
  6419. }
  6420. goto unlock;
  6421. }
  6422. /*
  6423. * Commands querying/configuring an existing array:
  6424. */
  6425. /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
  6426. * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
  6427. if ((!mddev->raid_disks && !mddev->external)
  6428. && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
  6429. && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
  6430. && cmd != GET_BITMAP_FILE) {
  6431. err = -ENODEV;
  6432. goto unlock;
  6433. }
  6434. /*
  6435. * Commands even a read-only array can execute:
  6436. */
  6437. switch (cmd) {
  6438. case RESTART_ARRAY_RW:
  6439. err = restart_array(mddev);
  6440. goto unlock;
  6441. case STOP_ARRAY:
  6442. err = do_md_stop(mddev, 0, bdev);
  6443. goto unlock;
  6444. case STOP_ARRAY_RO:
  6445. err = md_set_readonly(mddev, bdev);
  6446. goto unlock;
  6447. case HOT_REMOVE_DISK:
  6448. err = hot_remove_disk(mddev, new_decode_dev(arg));
  6449. goto unlock;
  6450. case ADD_NEW_DISK:
  6451. /* We can support ADD_NEW_DISK on read-only arrays
  6452. * only if we are re-adding a preexisting device.
  6453. * So require mddev->pers and MD_DISK_SYNC.
  6454. */
  6455. if (mddev->pers) {
  6456. mdu_disk_info_t info;
  6457. if (copy_from_user(&info, argp, sizeof(info)))
  6458. err = -EFAULT;
  6459. else if (!(info.state & (1<<MD_DISK_SYNC)))
  6460. /* Need to clear read-only for this */
  6461. break;
  6462. else
  6463. err = add_new_disk(mddev, &info);
  6464. goto unlock;
  6465. }
  6466. break;
  6467. case BLKROSET:
  6468. if (get_user(ro, (int __user *)(arg))) {
  6469. err = -EFAULT;
  6470. goto unlock;
  6471. }
  6472. err = -EINVAL;
  6473. /* if the bdev is going readonly the value of mddev->ro
  6474. * does not matter, no writes are coming
  6475. */
  6476. if (ro)
  6477. goto unlock;
  6478. /* are we are already prepared for writes? */
  6479. if (mddev->ro != 1)
  6480. goto unlock;
  6481. /* transitioning to readauto need only happen for
  6482. * arrays that call md_write_start
  6483. */
  6484. if (mddev->pers) {
  6485. err = restart_array(mddev);
  6486. if (err == 0) {
  6487. mddev->ro = 2;
  6488. set_disk_ro(mddev->gendisk, 0);
  6489. }
  6490. }
  6491. goto unlock;
  6492. }
  6493. /*
  6494. * The remaining ioctls are changing the state of the
  6495. * superblock, so we do not allow them on read-only arrays.
  6496. */
  6497. if (mddev->ro && mddev->pers) {
  6498. if (mddev->ro == 2) {
  6499. mddev->ro = 0;
  6500. sysfs_notify_dirent_safe(mddev->sysfs_state);
  6501. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6502. /* mddev_unlock will wake thread */
  6503. /* If a device failed while we were read-only, we
  6504. * need to make sure the metadata is updated now.
  6505. */
  6506. if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) {
  6507. mddev_unlock(mddev);
  6508. wait_event(mddev->sb_wait,
  6509. !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) &&
  6510. !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
  6511. mddev_lock_nointr(mddev);
  6512. }
  6513. } else {
  6514. err = -EROFS;
  6515. goto unlock;
  6516. }
  6517. }
  6518. switch (cmd) {
  6519. case ADD_NEW_DISK:
  6520. {
  6521. mdu_disk_info_t info;
  6522. if (copy_from_user(&info, argp, sizeof(info)))
  6523. err = -EFAULT;
  6524. else
  6525. err = add_new_disk(mddev, &info);
  6526. goto unlock;
  6527. }
  6528. case CLUSTERED_DISK_NACK:
  6529. if (mddev_is_clustered(mddev))
  6530. md_cluster_ops->new_disk_ack(mddev, false);
  6531. else
  6532. err = -EINVAL;
  6533. goto unlock;
  6534. case HOT_ADD_DISK:
  6535. err = hot_add_disk(mddev, new_decode_dev(arg));
  6536. goto unlock;
  6537. case RUN_ARRAY:
  6538. err = do_md_run(mddev);
  6539. goto unlock;
  6540. case SET_BITMAP_FILE:
  6541. err = set_bitmap_file(mddev, (int)arg);
  6542. goto unlock;
  6543. default:
  6544. err = -EINVAL;
  6545. goto unlock;
  6546. }
  6547. unlock:
  6548. if (mddev->hold_active == UNTIL_IOCTL &&
  6549. err != -EINVAL)
  6550. mddev->hold_active = 0;
  6551. mddev_unlock(mddev);
  6552. out:
  6553. if(did_set_md_closing)
  6554. clear_bit(MD_CLOSING, &mddev->flags);
  6555. return err;
  6556. }
  6557. #ifdef CONFIG_COMPAT
  6558. static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
  6559. unsigned int cmd, unsigned long arg)
  6560. {
  6561. switch (cmd) {
  6562. case HOT_REMOVE_DISK:
  6563. case HOT_ADD_DISK:
  6564. case SET_DISK_FAULTY:
  6565. case SET_BITMAP_FILE:
  6566. /* These take in integer arg, do not convert */
  6567. break;
  6568. default:
  6569. arg = (unsigned long)compat_ptr(arg);
  6570. break;
  6571. }
  6572. return md_ioctl(bdev, mode, cmd, arg);
  6573. }
  6574. #endif /* CONFIG_COMPAT */
  6575. static int md_open(struct block_device *bdev, fmode_t mode)
  6576. {
  6577. /*
  6578. * Succeed if we can lock the mddev, which confirms that
  6579. * it isn't being stopped right now.
  6580. */
  6581. struct mddev *mddev = mddev_find(bdev->bd_dev);
  6582. int err;
  6583. if (!mddev)
  6584. return -ENODEV;
  6585. if (mddev->gendisk != bdev->bd_disk) {
  6586. /* we are racing with mddev_put which is discarding this
  6587. * bd_disk.
  6588. */
  6589. mddev_put(mddev);
  6590. /* Wait until bdev->bd_disk is definitely gone */
  6591. flush_workqueue(md_misc_wq);
  6592. /* Then retry the open from the top */
  6593. return -ERESTARTSYS;
  6594. }
  6595. BUG_ON(mddev != bdev->bd_disk->private_data);
  6596. if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
  6597. goto out;
  6598. if (test_bit(MD_CLOSING, &mddev->flags)) {
  6599. mutex_unlock(&mddev->open_mutex);
  6600. err = -ENODEV;
  6601. goto out;
  6602. }
  6603. err = 0;
  6604. atomic_inc(&mddev->openers);
  6605. mutex_unlock(&mddev->open_mutex);
  6606. check_disk_change(bdev);
  6607. out:
  6608. if (err)
  6609. mddev_put(mddev);
  6610. return err;
  6611. }
  6612. static void md_release(struct gendisk *disk, fmode_t mode)
  6613. {
  6614. struct mddev *mddev = disk->private_data;
  6615. BUG_ON(!mddev);
  6616. atomic_dec(&mddev->openers);
  6617. mddev_put(mddev);
  6618. }
  6619. static int md_media_changed(struct gendisk *disk)
  6620. {
  6621. struct mddev *mddev = disk->private_data;
  6622. return mddev->changed;
  6623. }
  6624. static int md_revalidate(struct gendisk *disk)
  6625. {
  6626. struct mddev *mddev = disk->private_data;
  6627. mddev->changed = 0;
  6628. return 0;
  6629. }
  6630. static const struct block_device_operations md_fops =
  6631. {
  6632. .owner = THIS_MODULE,
  6633. .open = md_open,
  6634. .release = md_release,
  6635. .ioctl = md_ioctl,
  6636. #ifdef CONFIG_COMPAT
  6637. .compat_ioctl = md_compat_ioctl,
  6638. #endif
  6639. .getgeo = md_getgeo,
  6640. .media_changed = md_media_changed,
  6641. .revalidate_disk= md_revalidate,
  6642. };
  6643. static int md_thread(void *arg)
  6644. {
  6645. struct md_thread *thread = arg;
  6646. /*
  6647. * md_thread is a 'system-thread', it's priority should be very
  6648. * high. We avoid resource deadlocks individually in each
  6649. * raid personality. (RAID5 does preallocation) We also use RR and
  6650. * the very same RT priority as kswapd, thus we will never get
  6651. * into a priority inversion deadlock.
  6652. *
  6653. * we definitely have to have equal or higher priority than
  6654. * bdflush, otherwise bdflush will deadlock if there are too
  6655. * many dirty RAID5 blocks.
  6656. */
  6657. allow_signal(SIGKILL);
  6658. while (!kthread_should_stop()) {
  6659. /* We need to wait INTERRUPTIBLE so that
  6660. * we don't add to the load-average.
  6661. * That means we need to be sure no signals are
  6662. * pending
  6663. */
  6664. if (signal_pending(current))
  6665. flush_signals(current);
  6666. wait_event_interruptible_timeout
  6667. (thread->wqueue,
  6668. test_bit(THREAD_WAKEUP, &thread->flags)
  6669. || kthread_should_stop() || kthread_should_park(),
  6670. thread->timeout);
  6671. clear_bit(THREAD_WAKEUP, &thread->flags);
  6672. if (kthread_should_park())
  6673. kthread_parkme();
  6674. if (!kthread_should_stop())
  6675. thread->run(thread);
  6676. }
  6677. return 0;
  6678. }
  6679. void md_wakeup_thread(struct md_thread *thread)
  6680. {
  6681. if (thread) {
  6682. pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
  6683. set_bit(THREAD_WAKEUP, &thread->flags);
  6684. wake_up(&thread->wqueue);
  6685. }
  6686. }
  6687. EXPORT_SYMBOL(md_wakeup_thread);
  6688. struct md_thread *md_register_thread(void (*run) (struct md_thread *),
  6689. struct mddev *mddev, const char *name)
  6690. {
  6691. struct md_thread *thread;
  6692. thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL);
  6693. if (!thread)
  6694. return NULL;
  6695. init_waitqueue_head(&thread->wqueue);
  6696. thread->run = run;
  6697. thread->mddev = mddev;
  6698. thread->timeout = MAX_SCHEDULE_TIMEOUT;
  6699. thread->tsk = kthread_run(md_thread, thread,
  6700. "%s_%s",
  6701. mdname(thread->mddev),
  6702. name);
  6703. if (IS_ERR(thread->tsk)) {
  6704. kfree(thread);
  6705. return NULL;
  6706. }
  6707. return thread;
  6708. }
  6709. EXPORT_SYMBOL(md_register_thread);
  6710. void md_unregister_thread(struct md_thread **threadp)
  6711. {
  6712. struct md_thread *thread = *threadp;
  6713. if (!thread)
  6714. return;
  6715. pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
  6716. /* Locking ensures that mddev_unlock does not wake_up a
  6717. * non-existent thread
  6718. */
  6719. spin_lock(&pers_lock);
  6720. *threadp = NULL;
  6721. spin_unlock(&pers_lock);
  6722. kthread_stop(thread->tsk);
  6723. kfree(thread);
  6724. }
  6725. EXPORT_SYMBOL(md_unregister_thread);
  6726. void md_error(struct mddev *mddev, struct md_rdev *rdev)
  6727. {
  6728. if (!rdev || test_bit(Faulty, &rdev->flags))
  6729. return;
  6730. if (!mddev->pers || !mddev->pers->error_handler)
  6731. return;
  6732. mddev->pers->error_handler(mddev,rdev);
  6733. if (mddev->degraded)
  6734. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  6735. sysfs_notify_dirent_safe(rdev->sysfs_state);
  6736. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  6737. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  6738. md_wakeup_thread(mddev->thread);
  6739. if (mddev->event_work.func)
  6740. queue_work(md_misc_wq, &mddev->event_work);
  6741. md_new_event(mddev);
  6742. }
  6743. EXPORT_SYMBOL(md_error);
  6744. /* seq_file implementation /proc/mdstat */
  6745. static void status_unused(struct seq_file *seq)
  6746. {
  6747. int i = 0;
  6748. struct md_rdev *rdev;
  6749. seq_printf(seq, "unused devices: ");
  6750. list_for_each_entry(rdev, &pending_raid_disks, same_set) {
  6751. char b[BDEVNAME_SIZE];
  6752. i++;
  6753. seq_printf(seq, "%s ",
  6754. bdevname(rdev->bdev,b));
  6755. }
  6756. if (!i)
  6757. seq_printf(seq, "<none>");
  6758. seq_printf(seq, "\n");
  6759. }
  6760. static int status_resync(struct seq_file *seq, struct mddev *mddev)
  6761. {
  6762. sector_t max_sectors, resync, res;
  6763. unsigned long dt, db;
  6764. sector_t rt;
  6765. int scale;
  6766. unsigned int per_milli;
  6767. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  6768. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  6769. max_sectors = mddev->resync_max_sectors;
  6770. else
  6771. max_sectors = mddev->dev_sectors;
  6772. resync = mddev->curr_resync;
  6773. if (resync <= 3) {
  6774. if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
  6775. /* Still cleaning up */
  6776. resync = max_sectors;
  6777. } else
  6778. resync -= atomic_read(&mddev->recovery_active);
  6779. if (resync == 0) {
  6780. if (mddev->recovery_cp < MaxSector) {
  6781. seq_printf(seq, "\tresync=PENDING");
  6782. return 1;
  6783. }
  6784. return 0;
  6785. }
  6786. if (resync < 3) {
  6787. seq_printf(seq, "\tresync=DELAYED");
  6788. return 1;
  6789. }
  6790. WARN_ON(max_sectors == 0);
  6791. /* Pick 'scale' such that (resync>>scale)*1000 will fit
  6792. * in a sector_t, and (max_sectors>>scale) will fit in a
  6793. * u32, as those are the requirements for sector_div.
  6794. * Thus 'scale' must be at least 10
  6795. */
  6796. scale = 10;
  6797. if (sizeof(sector_t) > sizeof(unsigned long)) {
  6798. while ( max_sectors/2 > (1ULL<<(scale+32)))
  6799. scale++;
  6800. }
  6801. res = (resync>>scale)*1000;
  6802. sector_div(res, (u32)((max_sectors>>scale)+1));
  6803. per_milli = res;
  6804. {
  6805. int i, x = per_milli/50, y = 20-x;
  6806. seq_printf(seq, "[");
  6807. for (i = 0; i < x; i++)
  6808. seq_printf(seq, "=");
  6809. seq_printf(seq, ">");
  6810. for (i = 0; i < y; i++)
  6811. seq_printf(seq, ".");
  6812. seq_printf(seq, "] ");
  6813. }
  6814. seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)",
  6815. (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)?
  6816. "reshape" :
  6817. (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)?
  6818. "check" :
  6819. (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ?
  6820. "resync" : "recovery"))),
  6821. per_milli/10, per_milli % 10,
  6822. (unsigned long long) resync/2,
  6823. (unsigned long long) max_sectors/2);
  6824. /*
  6825. * dt: time from mark until now
  6826. * db: blocks written from mark until now
  6827. * rt: remaining time
  6828. *
  6829. * rt is a sector_t, so could be 32bit or 64bit.
  6830. * So we divide before multiply in case it is 32bit and close
  6831. * to the limit.
  6832. * We scale the divisor (db) by 32 to avoid losing precision
  6833. * near the end of resync when the number of remaining sectors
  6834. * is close to 'db'.
  6835. * We then divide rt by 32 after multiplying by db to compensate.
  6836. * The '+1' avoids division by zero if db is very small.
  6837. */
  6838. dt = ((jiffies - mddev->resync_mark) / HZ);
  6839. if (!dt) dt++;
  6840. db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active))
  6841. - mddev->resync_mark_cnt;
  6842. rt = max_sectors - resync; /* number of remaining sectors */
  6843. sector_div(rt, db/32+1);
  6844. rt *= dt;
  6845. rt >>= 5;
  6846. seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60,
  6847. ((unsigned long)rt % 60)/6);
  6848. seq_printf(seq, " speed=%ldK/sec", db/2/dt);
  6849. return 1;
  6850. }
  6851. static void *md_seq_start(struct seq_file *seq, loff_t *pos)
  6852. {
  6853. struct list_head *tmp;
  6854. loff_t l = *pos;
  6855. struct mddev *mddev;
  6856. if (l >= 0x10000)
  6857. return NULL;
  6858. if (!l--)
  6859. /* header */
  6860. return (void*)1;
  6861. spin_lock(&all_mddevs_lock);
  6862. list_for_each(tmp,&all_mddevs)
  6863. if (!l--) {
  6864. mddev = list_entry(tmp, struct mddev, all_mddevs);
  6865. mddev_get(mddev);
  6866. spin_unlock(&all_mddevs_lock);
  6867. return mddev;
  6868. }
  6869. spin_unlock(&all_mddevs_lock);
  6870. if (!l--)
  6871. return (void*)2;/* tail */
  6872. return NULL;
  6873. }
  6874. static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  6875. {
  6876. struct list_head *tmp;
  6877. struct mddev *next_mddev, *mddev = v;
  6878. ++*pos;
  6879. if (v == (void*)2)
  6880. return NULL;
  6881. spin_lock(&all_mddevs_lock);
  6882. if (v == (void*)1)
  6883. tmp = all_mddevs.next;
  6884. else
  6885. tmp = mddev->all_mddevs.next;
  6886. if (tmp != &all_mddevs)
  6887. next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs));
  6888. else {
  6889. next_mddev = (void*)2;
  6890. *pos = 0x10000;
  6891. }
  6892. spin_unlock(&all_mddevs_lock);
  6893. if (v != (void*)1)
  6894. mddev_put(mddev);
  6895. return next_mddev;
  6896. }
  6897. static void md_seq_stop(struct seq_file *seq, void *v)
  6898. {
  6899. struct mddev *mddev = v;
  6900. if (mddev && v != (void*)1 && v != (void*)2)
  6901. mddev_put(mddev);
  6902. }
  6903. static int md_seq_show(struct seq_file *seq, void *v)
  6904. {
  6905. struct mddev *mddev = v;
  6906. sector_t sectors;
  6907. struct md_rdev *rdev;
  6908. if (v == (void*)1) {
  6909. struct md_personality *pers;
  6910. seq_printf(seq, "Personalities : ");
  6911. spin_lock(&pers_lock);
  6912. list_for_each_entry(pers, &pers_list, list)
  6913. seq_printf(seq, "[%s] ", pers->name);
  6914. spin_unlock(&pers_lock);
  6915. seq_printf(seq, "\n");
  6916. seq->poll_event = atomic_read(&md_event_count);
  6917. return 0;
  6918. }
  6919. if (v == (void*)2) {
  6920. status_unused(seq);
  6921. return 0;
  6922. }
  6923. spin_lock(&mddev->lock);
  6924. if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) {
  6925. seq_printf(seq, "%s : %sactive", mdname(mddev),
  6926. mddev->pers ? "" : "in");
  6927. if (mddev->pers) {
  6928. if (mddev->ro==1)
  6929. seq_printf(seq, " (read-only)");
  6930. if (mddev->ro==2)
  6931. seq_printf(seq, " (auto-read-only)");
  6932. seq_printf(seq, " %s", mddev->pers->name);
  6933. }
  6934. sectors = 0;
  6935. rcu_read_lock();
  6936. rdev_for_each_rcu(rdev, mddev) {
  6937. char b[BDEVNAME_SIZE];
  6938. seq_printf(seq, " %s[%d]",
  6939. bdevname(rdev->bdev,b), rdev->desc_nr);
  6940. if (test_bit(WriteMostly, &rdev->flags))
  6941. seq_printf(seq, "(W)");
  6942. if (test_bit(Journal, &rdev->flags))
  6943. seq_printf(seq, "(J)");
  6944. if (test_bit(Faulty, &rdev->flags)) {
  6945. seq_printf(seq, "(F)");
  6946. continue;
  6947. }
  6948. if (rdev->raid_disk < 0)
  6949. seq_printf(seq, "(S)"); /* spare */
  6950. if (test_bit(Replacement, &rdev->flags))
  6951. seq_printf(seq, "(R)");
  6952. sectors += rdev->sectors;
  6953. }
  6954. rcu_read_unlock();
  6955. if (!list_empty(&mddev->disks)) {
  6956. if (mddev->pers)
  6957. seq_printf(seq, "\n %llu blocks",
  6958. (unsigned long long)
  6959. mddev->array_sectors / 2);
  6960. else
  6961. seq_printf(seq, "\n %llu blocks",
  6962. (unsigned long long)sectors / 2);
  6963. }
  6964. if (mddev->persistent) {
  6965. if (mddev->major_version != 0 ||
  6966. mddev->minor_version != 90) {
  6967. seq_printf(seq," super %d.%d",
  6968. mddev->major_version,
  6969. mddev->minor_version);
  6970. }
  6971. } else if (mddev->external)
  6972. seq_printf(seq, " super external:%s",
  6973. mddev->metadata_type);
  6974. else
  6975. seq_printf(seq, " super non-persistent");
  6976. if (mddev->pers) {
  6977. mddev->pers->status(seq, mddev);
  6978. seq_printf(seq, "\n ");
  6979. if (mddev->pers->sync_request) {
  6980. if (status_resync(seq, mddev))
  6981. seq_printf(seq, "\n ");
  6982. }
  6983. } else
  6984. seq_printf(seq, "\n ");
  6985. bitmap_status(seq, mddev->bitmap);
  6986. seq_printf(seq, "\n");
  6987. }
  6988. spin_unlock(&mddev->lock);
  6989. return 0;
  6990. }
  6991. static const struct seq_operations md_seq_ops = {
  6992. .start = md_seq_start,
  6993. .next = md_seq_next,
  6994. .stop = md_seq_stop,
  6995. .show = md_seq_show,
  6996. };
  6997. static int md_seq_open(struct inode *inode, struct file *file)
  6998. {
  6999. struct seq_file *seq;
  7000. int error;
  7001. error = seq_open(file, &md_seq_ops);
  7002. if (error)
  7003. return error;
  7004. seq = file->private_data;
  7005. seq->poll_event = atomic_read(&md_event_count);
  7006. return error;
  7007. }
  7008. static int md_unloading;
  7009. static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
  7010. {
  7011. struct seq_file *seq = filp->private_data;
  7012. int mask;
  7013. if (md_unloading)
  7014. return POLLIN|POLLRDNORM|POLLERR|POLLPRI;
  7015. poll_wait(filp, &md_event_waiters, wait);
  7016. /* always allow read */
  7017. mask = POLLIN | POLLRDNORM;
  7018. if (seq->poll_event != atomic_read(&md_event_count))
  7019. mask |= POLLERR | POLLPRI;
  7020. return mask;
  7021. }
  7022. static const struct file_operations md_seq_fops = {
  7023. .owner = THIS_MODULE,
  7024. .open = md_seq_open,
  7025. .read = seq_read,
  7026. .llseek = seq_lseek,
  7027. .release = seq_release,
  7028. .poll = mdstat_poll,
  7029. };
  7030. int register_md_personality(struct md_personality *p)
  7031. {
  7032. pr_debug("md: %s personality registered for level %d\n",
  7033. p->name, p->level);
  7034. spin_lock(&pers_lock);
  7035. list_add_tail(&p->list, &pers_list);
  7036. spin_unlock(&pers_lock);
  7037. return 0;
  7038. }
  7039. EXPORT_SYMBOL(register_md_personality);
  7040. int unregister_md_personality(struct md_personality *p)
  7041. {
  7042. pr_debug("md: %s personality unregistered\n", p->name);
  7043. spin_lock(&pers_lock);
  7044. list_del_init(&p->list);
  7045. spin_unlock(&pers_lock);
  7046. return 0;
  7047. }
  7048. EXPORT_SYMBOL(unregister_md_personality);
  7049. int register_md_cluster_operations(struct md_cluster_operations *ops,
  7050. struct module *module)
  7051. {
  7052. int ret = 0;
  7053. spin_lock(&pers_lock);
  7054. if (md_cluster_ops != NULL)
  7055. ret = -EALREADY;
  7056. else {
  7057. md_cluster_ops = ops;
  7058. md_cluster_mod = module;
  7059. }
  7060. spin_unlock(&pers_lock);
  7061. return ret;
  7062. }
  7063. EXPORT_SYMBOL(register_md_cluster_operations);
  7064. int unregister_md_cluster_operations(void)
  7065. {
  7066. spin_lock(&pers_lock);
  7067. md_cluster_ops = NULL;
  7068. spin_unlock(&pers_lock);
  7069. return 0;
  7070. }
  7071. EXPORT_SYMBOL(unregister_md_cluster_operations);
  7072. int md_setup_cluster(struct mddev *mddev, int nodes)
  7073. {
  7074. if (!md_cluster_ops)
  7075. request_module("md-cluster");
  7076. spin_lock(&pers_lock);
  7077. /* ensure module won't be unloaded */
  7078. if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
  7079. pr_warn("can't find md-cluster module or get it's reference.\n");
  7080. spin_unlock(&pers_lock);
  7081. return -ENOENT;
  7082. }
  7083. spin_unlock(&pers_lock);
  7084. return md_cluster_ops->join(mddev, nodes);
  7085. }
  7086. void md_cluster_stop(struct mddev *mddev)
  7087. {
  7088. if (!md_cluster_ops)
  7089. return;
  7090. md_cluster_ops->leave(mddev);
  7091. module_put(md_cluster_mod);
  7092. }
  7093. static int is_mddev_idle(struct mddev *mddev, int init)
  7094. {
  7095. struct md_rdev *rdev;
  7096. int idle;
  7097. int curr_events;
  7098. idle = 1;
  7099. rcu_read_lock();
  7100. rdev_for_each_rcu(rdev, mddev) {
  7101. struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
  7102. curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
  7103. (int)part_stat_read(&disk->part0, sectors[1]) -
  7104. atomic_read(&disk->sync_io);
  7105. /* sync IO will cause sync_io to increase before the disk_stats
  7106. * as sync_io is counted when a request starts, and
  7107. * disk_stats is counted when it completes.
  7108. * So resync activity will cause curr_events to be smaller than
  7109. * when there was no such activity.
  7110. * non-sync IO will cause disk_stat to increase without
  7111. * increasing sync_io so curr_events will (eventually)
  7112. * be larger than it was before. Once it becomes
  7113. * substantially larger, the test below will cause
  7114. * the array to appear non-idle, and resync will slow
  7115. * down.
  7116. * If there is a lot of outstanding resync activity when
  7117. * we set last_event to curr_events, then all that activity
  7118. * completing might cause the array to appear non-idle
  7119. * and resync will be slowed down even though there might
  7120. * not have been non-resync activity. This will only
  7121. * happen once though. 'last_events' will soon reflect
  7122. * the state where there is little or no outstanding
  7123. * resync requests, and further resync activity will
  7124. * always make curr_events less than last_events.
  7125. *
  7126. */
  7127. if (init || curr_events - rdev->last_events > 64) {
  7128. rdev->last_events = curr_events;
  7129. idle = 0;
  7130. }
  7131. }
  7132. rcu_read_unlock();
  7133. return idle;
  7134. }
  7135. void md_done_sync(struct mddev *mddev, int blocks, int ok)
  7136. {
  7137. /* another "blocks" (512byte) blocks have been synced */
  7138. atomic_sub(blocks, &mddev->recovery_active);
  7139. wake_up(&mddev->recovery_wait);
  7140. if (!ok) {
  7141. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  7142. set_bit(MD_RECOVERY_ERROR, &mddev->recovery);
  7143. md_wakeup_thread(mddev->thread);
  7144. // stop recovery, signal do_sync ....
  7145. }
  7146. }
  7147. EXPORT_SYMBOL(md_done_sync);
  7148. /* md_write_start(mddev, bi)
  7149. * If we need to update some array metadata (e.g. 'active' flag
  7150. * in superblock) before writing, schedule a superblock update
  7151. * and wait for it to complete.
  7152. * A return value of 'false' means that the write wasn't recorded
  7153. * and cannot proceed as the array is being suspend.
  7154. */
  7155. bool md_write_start(struct mddev *mddev, struct bio *bi)
  7156. {
  7157. int did_change = 0;
  7158. if (bio_data_dir(bi) != WRITE)
  7159. return true;
  7160. BUG_ON(mddev->ro == 1);
  7161. if (mddev->ro == 2) {
  7162. /* need to switch to read/write */
  7163. mddev->ro = 0;
  7164. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  7165. md_wakeup_thread(mddev->thread);
  7166. md_wakeup_thread(mddev->sync_thread);
  7167. did_change = 1;
  7168. }
  7169. rcu_read_lock();
  7170. percpu_ref_get(&mddev->writes_pending);
  7171. smp_mb(); /* Match smp_mb in set_in_sync() */
  7172. if (mddev->safemode == 1)
  7173. mddev->safemode = 0;
  7174. /* sync_checkers is always 0 when writes_pending is in per-cpu mode */
  7175. if (mddev->in_sync || mddev->sync_checkers) {
  7176. spin_lock(&mddev->lock);
  7177. if (mddev->in_sync) {
  7178. mddev->in_sync = 0;
  7179. set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
  7180. set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
  7181. md_wakeup_thread(mddev->thread);
  7182. did_change = 1;
  7183. }
  7184. spin_unlock(&mddev->lock);
  7185. }
  7186. rcu_read_unlock();
  7187. if (did_change)
  7188. sysfs_notify_dirent_safe(mddev->sysfs_state);
  7189. wait_event(mddev->sb_wait,
  7190. !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
  7191. mddev->suspended);
  7192. if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
  7193. percpu_ref_put(&mddev->writes_pending);
  7194. return false;
  7195. }
  7196. return true;
  7197. }
  7198. EXPORT_SYMBOL(md_write_start);
  7199. /* md_write_inc can only be called when md_write_start() has
  7200. * already been called at least once of the current request.
  7201. * It increments the counter and is useful when a single request
  7202. * is split into several parts. Each part causes an increment and
  7203. * so needs a matching md_write_end().
  7204. * Unlike md_write_start(), it is safe to call md_write_inc() inside
  7205. * a spinlocked region.
  7206. */
  7207. void md_write_inc(struct mddev *mddev, struct bio *bi)
  7208. {
  7209. if (bio_data_dir(bi) != WRITE)
  7210. return;
  7211. WARN_ON_ONCE(mddev->in_sync || mddev->ro);
  7212. percpu_ref_get(&mddev->writes_pending);
  7213. }
  7214. EXPORT_SYMBOL(md_write_inc);
  7215. void md_write_end(struct mddev *mddev)
  7216. {
  7217. percpu_ref_put(&mddev->writes_pending);
  7218. if (mddev->safemode == 2)
  7219. md_wakeup_thread(mddev->thread);
  7220. else if (mddev->safemode_delay)
  7221. /* The roundup() ensures this only performs locking once
  7222. * every ->safemode_delay jiffies
  7223. */
  7224. mod_timer(&mddev->safemode_timer,
  7225. roundup(jiffies, mddev->safemode_delay) +
  7226. mddev->safemode_delay);
  7227. }
  7228. EXPORT_SYMBOL(md_write_end);
  7229. /* md_allow_write(mddev)
  7230. * Calling this ensures that the array is marked 'active' so that writes
  7231. * may proceed without blocking. It is important to call this before
  7232. * attempting a GFP_KERNEL allocation while holding the mddev lock.
  7233. * Must be called with mddev_lock held.
  7234. */
  7235. void md_allow_write(struct mddev *mddev)
  7236. {
  7237. if (!mddev->pers)
  7238. return;
  7239. if (mddev->ro)
  7240. return;
  7241. if (!mddev->pers->sync_request)
  7242. return;
  7243. spin_lock(&mddev->lock);
  7244. if (mddev->in_sync) {
  7245. mddev->in_sync = 0;
  7246. set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
  7247. set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
  7248. if (mddev->safemode_delay &&
  7249. mddev->safemode == 0)
  7250. mddev->safemode = 1;
  7251. spin_unlock(&mddev->lock);
  7252. md_update_sb(mddev, 0);
  7253. sysfs_notify_dirent_safe(mddev->sysfs_state);
  7254. /* wait for the dirty state to be recorded in the metadata */
  7255. wait_event(mddev->sb_wait,
  7256. !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
  7257. } else
  7258. spin_unlock(&mddev->lock);
  7259. }
  7260. EXPORT_SYMBOL_GPL(md_allow_write);
  7261. #define SYNC_MARKS 10
  7262. #define SYNC_MARK_STEP (3*HZ)
  7263. #define UPDATE_FREQUENCY (5*60*HZ)
  7264. void md_do_sync(struct md_thread *thread)
  7265. {
  7266. struct mddev *mddev = thread->mddev;
  7267. struct mddev *mddev2;
  7268. unsigned int currspeed = 0,
  7269. window;
  7270. sector_t max_sectors,j, io_sectors, recovery_done;
  7271. unsigned long mark[SYNC_MARKS];
  7272. unsigned long update_time;
  7273. sector_t mark_cnt[SYNC_MARKS];
  7274. int last_mark,m;
  7275. struct list_head *tmp;
  7276. sector_t last_check;
  7277. int skipped = 0;
  7278. struct md_rdev *rdev;
  7279. char *desc, *action = NULL;
  7280. struct blk_plug plug;
  7281. int ret;
  7282. /* just incase thread restarts... */
  7283. if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
  7284. return;
  7285. if (mddev->ro) {/* never try to sync a read-only array */
  7286. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  7287. return;
  7288. }
  7289. if (mddev_is_clustered(mddev)) {
  7290. ret = md_cluster_ops->resync_start(mddev);
  7291. if (ret)
  7292. goto skip;
  7293. set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags);
  7294. if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
  7295. test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
  7296. test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
  7297. && ((unsigned long long)mddev->curr_resync_completed
  7298. < (unsigned long long)mddev->resync_max_sectors))
  7299. goto skip;
  7300. }
  7301. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  7302. if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
  7303. desc = "data-check";
  7304. action = "check";
  7305. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  7306. desc = "requested-resync";
  7307. action = "repair";
  7308. } else
  7309. desc = "resync";
  7310. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  7311. desc = "reshape";
  7312. else
  7313. desc = "recovery";
  7314. mddev->last_sync_action = action ?: desc;
  7315. /* we overload curr_resync somewhat here.
  7316. * 0 == not engaged in resync at all
  7317. * 2 == checking that there is no conflict with another sync
  7318. * 1 == like 2, but have yielded to allow conflicting resync to
  7319. * commense
  7320. * other == active in resync - this many blocks
  7321. *
  7322. * Before starting a resync we must have set curr_resync to
  7323. * 2, and then checked that every "conflicting" array has curr_resync
  7324. * less than ours. When we find one that is the same or higher
  7325. * we wait on resync_wait. To avoid deadlock, we reduce curr_resync
  7326. * to 1 if we choose to yield (based arbitrarily on address of mddev structure).
  7327. * This will mean we have to start checking from the beginning again.
  7328. *
  7329. */
  7330. do {
  7331. int mddev2_minor = -1;
  7332. mddev->curr_resync = 2;
  7333. try_again:
  7334. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  7335. goto skip;
  7336. for_each_mddev(mddev2, tmp) {
  7337. if (mddev2 == mddev)
  7338. continue;
  7339. if (!mddev->parallel_resync
  7340. && mddev2->curr_resync
  7341. && match_mddev_units(mddev, mddev2)) {
  7342. DEFINE_WAIT(wq);
  7343. if (mddev < mddev2 && mddev->curr_resync == 2) {
  7344. /* arbitrarily yield */
  7345. mddev->curr_resync = 1;
  7346. wake_up(&resync_wait);
  7347. }
  7348. if (mddev > mddev2 && mddev->curr_resync == 1)
  7349. /* no need to wait here, we can wait the next
  7350. * time 'round when curr_resync == 2
  7351. */
  7352. continue;
  7353. /* We need to wait 'interruptible' so as not to
  7354. * contribute to the load average, and not to
  7355. * be caught by 'softlockup'
  7356. */
  7357. prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE);
  7358. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  7359. mddev2->curr_resync >= mddev->curr_resync) {
  7360. if (mddev2_minor != mddev2->md_minor) {
  7361. mddev2_minor = mddev2->md_minor;
  7362. pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n",
  7363. desc, mdname(mddev),
  7364. mdname(mddev2));
  7365. }
  7366. mddev_put(mddev2);
  7367. if (signal_pending(current))
  7368. flush_signals(current);
  7369. schedule();
  7370. finish_wait(&resync_wait, &wq);
  7371. goto try_again;
  7372. }
  7373. finish_wait(&resync_wait, &wq);
  7374. }
  7375. }
  7376. } while (mddev->curr_resync < 2);
  7377. j = 0;
  7378. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  7379. /* resync follows the size requested by the personality,
  7380. * which defaults to physical size, but can be virtual size
  7381. */
  7382. max_sectors = mddev->resync_max_sectors;
  7383. atomic64_set(&mddev->resync_mismatches, 0);
  7384. /* we don't use the checkpoint if there's a bitmap */
  7385. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  7386. j = mddev->resync_min;
  7387. else if (!mddev->bitmap)
  7388. j = mddev->recovery_cp;
  7389. } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
  7390. max_sectors = mddev->resync_max_sectors;
  7391. else {
  7392. /* recovery follows the physical size of devices */
  7393. max_sectors = mddev->dev_sectors;
  7394. j = MaxSector;
  7395. rcu_read_lock();
  7396. rdev_for_each_rcu(rdev, mddev)
  7397. if (rdev->raid_disk >= 0 &&
  7398. !test_bit(Journal, &rdev->flags) &&
  7399. !test_bit(Faulty, &rdev->flags) &&
  7400. !test_bit(In_sync, &rdev->flags) &&
  7401. rdev->recovery_offset < j)
  7402. j = rdev->recovery_offset;
  7403. rcu_read_unlock();
  7404. /* If there is a bitmap, we need to make sure all
  7405. * writes that started before we added a spare
  7406. * complete before we start doing a recovery.
  7407. * Otherwise the write might complete and (via
  7408. * bitmap_endwrite) set a bit in the bitmap after the
  7409. * recovery has checked that bit and skipped that
  7410. * region.
  7411. */
  7412. if (mddev->bitmap) {
  7413. mddev->pers->quiesce(mddev, 1);
  7414. mddev->pers->quiesce(mddev, 0);
  7415. }
  7416. }
  7417. pr_info("md: %s of RAID array %s\n", desc, mdname(mddev));
  7418. pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev));
  7419. pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n",
  7420. speed_max(mddev), desc);
  7421. is_mddev_idle(mddev, 1); /* this initializes IO event counters */
  7422. io_sectors = 0;
  7423. for (m = 0; m < SYNC_MARKS; m++) {
  7424. mark[m] = jiffies;
  7425. mark_cnt[m] = io_sectors;
  7426. }
  7427. last_mark = 0;
  7428. mddev->resync_mark = mark[last_mark];
  7429. mddev->resync_mark_cnt = mark_cnt[last_mark];
  7430. /*
  7431. * Tune reconstruction:
  7432. */
  7433. window = 32*(PAGE_SIZE/512);
  7434. pr_debug("md: using %dk window, over a total of %lluk.\n",
  7435. window/2, (unsigned long long)max_sectors/2);
  7436. atomic_set(&mddev->recovery_active, 0);
  7437. last_check = 0;
  7438. if (j>2) {
  7439. pr_debug("md: resuming %s of %s from checkpoint.\n",
  7440. desc, mdname(mddev));
  7441. mddev->curr_resync = j;
  7442. } else
  7443. mddev->curr_resync = 3; /* no longer delayed */
  7444. mddev->curr_resync_completed = j;
  7445. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  7446. md_new_event(mddev);
  7447. update_time = jiffies;
  7448. blk_start_plug(&plug);
  7449. while (j < max_sectors) {
  7450. sector_t sectors;
  7451. skipped = 0;
  7452. if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  7453. ((mddev->curr_resync > mddev->curr_resync_completed &&
  7454. (mddev->curr_resync - mddev->curr_resync_completed)
  7455. > (max_sectors >> 4)) ||
  7456. time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) ||
  7457. (j - mddev->curr_resync_completed)*2
  7458. >= mddev->resync_max - mddev->curr_resync_completed ||
  7459. mddev->curr_resync_completed > mddev->resync_max
  7460. )) {
  7461. /* time to update curr_resync_completed */
  7462. wait_event(mddev->recovery_wait,
  7463. atomic_read(&mddev->recovery_active) == 0);
  7464. mddev->curr_resync_completed = j;
  7465. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
  7466. j > mddev->recovery_cp)
  7467. mddev->recovery_cp = j;
  7468. update_time = jiffies;
  7469. set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
  7470. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  7471. }
  7472. while (j >= mddev->resync_max &&
  7473. !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  7474. /* As this condition is controlled by user-space,
  7475. * we can block indefinitely, so use '_interruptible'
  7476. * to avoid triggering warnings.
  7477. */
  7478. flush_signals(current); /* just in case */
  7479. wait_event_interruptible(mddev->recovery_wait,
  7480. mddev->resync_max > j
  7481. || test_bit(MD_RECOVERY_INTR,
  7482. &mddev->recovery));
  7483. }
  7484. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  7485. break;
  7486. sectors = mddev->pers->sync_request(mddev, j, &skipped);
  7487. if (sectors == 0) {
  7488. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  7489. break;
  7490. }
  7491. if (!skipped) { /* actual IO requested */
  7492. io_sectors += sectors;
  7493. atomic_add(sectors, &mddev->recovery_active);
  7494. }
  7495. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  7496. break;
  7497. j += sectors;
  7498. if (j > max_sectors)
  7499. /* when skipping, extra large numbers can be returned. */
  7500. j = max_sectors;
  7501. if (j > 2)
  7502. mddev->curr_resync = j;
  7503. mddev->curr_mark_cnt = io_sectors;
  7504. if (last_check == 0)
  7505. /* this is the earliest that rebuild will be
  7506. * visible in /proc/mdstat
  7507. */
  7508. md_new_event(mddev);
  7509. if (last_check + window > io_sectors || j == max_sectors)
  7510. continue;
  7511. last_check = io_sectors;
  7512. repeat:
  7513. if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) {
  7514. /* step marks */
  7515. int next = (last_mark+1) % SYNC_MARKS;
  7516. mddev->resync_mark = mark[next];
  7517. mddev->resync_mark_cnt = mark_cnt[next];
  7518. mark[next] = jiffies;
  7519. mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active);
  7520. last_mark = next;
  7521. }
  7522. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  7523. break;
  7524. /*
  7525. * this loop exits only if either when we are slower than
  7526. * the 'hard' speed limit, or the system was IO-idle for
  7527. * a jiffy.
  7528. * the system might be non-idle CPU-wise, but we only care
  7529. * about not overloading the IO subsystem. (things like an
  7530. * e2fsck being done on the RAID array should execute fast)
  7531. */
  7532. cond_resched();
  7533. recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
  7534. currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
  7535. /((jiffies-mddev->resync_mark)/HZ +1) +1;
  7536. if (currspeed > speed_min(mddev)) {
  7537. if (currspeed > speed_max(mddev)) {
  7538. msleep(500);
  7539. goto repeat;
  7540. }
  7541. if (!is_mddev_idle(mddev, 0)) {
  7542. /*
  7543. * Give other IO more of a chance.
  7544. * The faster the devices, the less we wait.
  7545. */
  7546. wait_event(mddev->recovery_wait,
  7547. !atomic_read(&mddev->recovery_active));
  7548. }
  7549. }
  7550. }
  7551. pr_info("md: %s: %s %s.\n",mdname(mddev), desc,
  7552. test_bit(MD_RECOVERY_INTR, &mddev->recovery)
  7553. ? "interrupted" : "done");
  7554. /*
  7555. * this also signals 'finished resyncing' to md_stop
  7556. */
  7557. blk_finish_plug(&plug);
  7558. wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
  7559. if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  7560. !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  7561. mddev->curr_resync > 3) {
  7562. mddev->curr_resync_completed = mddev->curr_resync;
  7563. sysfs_notify(&mddev->kobj, NULL, "sync_completed");
  7564. }
  7565. mddev->pers->sync_request(mddev, max_sectors, &skipped);
  7566. if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
  7567. mddev->curr_resync > 3) {
  7568. if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
  7569. if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  7570. if (mddev->curr_resync >= mddev->recovery_cp) {
  7571. pr_debug("md: checkpointing %s of %s.\n",
  7572. desc, mdname(mddev));
  7573. if (test_bit(MD_RECOVERY_ERROR,
  7574. &mddev->recovery))
  7575. mddev->recovery_cp =
  7576. mddev->curr_resync_completed;
  7577. else
  7578. mddev->recovery_cp =
  7579. mddev->curr_resync;
  7580. }
  7581. } else
  7582. mddev->recovery_cp = MaxSector;
  7583. } else {
  7584. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
  7585. mddev->curr_resync = MaxSector;
  7586. if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  7587. test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
  7588. rcu_read_lock();
  7589. rdev_for_each_rcu(rdev, mddev)
  7590. if (rdev->raid_disk >= 0 &&
  7591. mddev->delta_disks >= 0 &&
  7592. !test_bit(Journal, &rdev->flags) &&
  7593. !test_bit(Faulty, &rdev->flags) &&
  7594. !test_bit(In_sync, &rdev->flags) &&
  7595. rdev->recovery_offset < mddev->curr_resync)
  7596. rdev->recovery_offset = mddev->curr_resync;
  7597. rcu_read_unlock();
  7598. }
  7599. }
  7600. }
  7601. skip:
  7602. /* set CHANGE_PENDING here since maybe another update is needed,
  7603. * so other nodes are informed. It should be harmless for normal
  7604. * raid */
  7605. set_mask_bits(&mddev->sb_flags, 0,
  7606. BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS));
  7607. spin_lock(&mddev->lock);
  7608. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
  7609. /* We completed so min/max setting can be forgotten if used. */
  7610. if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  7611. mddev->resync_min = 0;
  7612. mddev->resync_max = MaxSector;
  7613. } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
  7614. mddev->resync_min = mddev->curr_resync_completed;
  7615. set_bit(MD_RECOVERY_DONE, &mddev->recovery);
  7616. mddev->curr_resync = 0;
  7617. spin_unlock(&mddev->lock);
  7618. wake_up(&resync_wait);
  7619. md_wakeup_thread(mddev->thread);
  7620. return;
  7621. }
  7622. EXPORT_SYMBOL_GPL(md_do_sync);
  7623. static int remove_and_add_spares(struct mddev *mddev,
  7624. struct md_rdev *this)
  7625. {
  7626. struct md_rdev *rdev;
  7627. int spares = 0;
  7628. int removed = 0;
  7629. bool remove_some = false;
  7630. rdev_for_each(rdev, mddev) {
  7631. if ((this == NULL || rdev == this) &&
  7632. rdev->raid_disk >= 0 &&
  7633. !test_bit(Blocked, &rdev->flags) &&
  7634. test_bit(Faulty, &rdev->flags) &&
  7635. atomic_read(&rdev->nr_pending)==0) {
  7636. /* Faulty non-Blocked devices with nr_pending == 0
  7637. * never get nr_pending incremented,
  7638. * never get Faulty cleared, and never get Blocked set.
  7639. * So we can synchronize_rcu now rather than once per device
  7640. */
  7641. remove_some = true;
  7642. set_bit(RemoveSynchronized, &rdev->flags);
  7643. }
  7644. }
  7645. if (remove_some)
  7646. synchronize_rcu();
  7647. rdev_for_each(rdev, mddev) {
  7648. if ((this == NULL || rdev == this) &&
  7649. rdev->raid_disk >= 0 &&
  7650. !test_bit(Blocked, &rdev->flags) &&
  7651. ((test_bit(RemoveSynchronized, &rdev->flags) ||
  7652. (!test_bit(In_sync, &rdev->flags) &&
  7653. !test_bit(Journal, &rdev->flags))) &&
  7654. atomic_read(&rdev->nr_pending)==0)) {
  7655. if (mddev->pers->hot_remove_disk(
  7656. mddev, rdev) == 0) {
  7657. sysfs_unlink_rdev(mddev, rdev);
  7658. rdev->raid_disk = -1;
  7659. removed++;
  7660. }
  7661. }
  7662. if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
  7663. clear_bit(RemoveSynchronized, &rdev->flags);
  7664. }
  7665. if (removed && mddev->kobj.sd)
  7666. sysfs_notify(&mddev->kobj, NULL, "degraded");
  7667. if (this && removed)
  7668. goto no_add;
  7669. rdev_for_each(rdev, mddev) {
  7670. if (this && this != rdev)
  7671. continue;
  7672. if (test_bit(Candidate, &rdev->flags))
  7673. continue;
  7674. if (rdev->raid_disk >= 0 &&
  7675. !test_bit(In_sync, &rdev->flags) &&
  7676. !test_bit(Journal, &rdev->flags) &&
  7677. !test_bit(Faulty, &rdev->flags))
  7678. spares++;
  7679. if (rdev->raid_disk >= 0)
  7680. continue;
  7681. if (test_bit(Faulty, &rdev->flags))
  7682. continue;
  7683. if (!test_bit(Journal, &rdev->flags)) {
  7684. if (mddev->ro &&
  7685. ! (rdev->saved_raid_disk >= 0 &&
  7686. !test_bit(Bitmap_sync, &rdev->flags)))
  7687. continue;
  7688. rdev->recovery_offset = 0;
  7689. }
  7690. if (mddev->pers->
  7691. hot_add_disk(mddev, rdev) == 0) {
  7692. if (sysfs_link_rdev(mddev, rdev))
  7693. /* failure here is OK */;
  7694. if (!test_bit(Journal, &rdev->flags))
  7695. spares++;
  7696. md_new_event(mddev);
  7697. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  7698. }
  7699. }
  7700. no_add:
  7701. if (removed)
  7702. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  7703. return spares;
  7704. }
  7705. static void md_start_sync(struct work_struct *ws)
  7706. {
  7707. struct mddev *mddev = container_of(ws, struct mddev, del_work);
  7708. mddev->sync_thread = md_register_thread(md_do_sync,
  7709. mddev,
  7710. "resync");
  7711. if (!mddev->sync_thread) {
  7712. pr_warn("%s: could not start resync thread...\n",
  7713. mdname(mddev));
  7714. /* leave the spares where they are, it shouldn't hurt */
  7715. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  7716. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  7717. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  7718. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  7719. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7720. wake_up(&resync_wait);
  7721. if (test_and_clear_bit(MD_RECOVERY_RECOVER,
  7722. &mddev->recovery))
  7723. if (mddev->sysfs_action)
  7724. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7725. } else
  7726. md_wakeup_thread(mddev->sync_thread);
  7727. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7728. md_new_event(mddev);
  7729. }
  7730. /*
  7731. * This routine is regularly called by all per-raid-array threads to
  7732. * deal with generic issues like resync and super-block update.
  7733. * Raid personalities that don't have a thread (linear/raid0) do not
  7734. * need this as they never do any recovery or update the superblock.
  7735. *
  7736. * It does not do any resync itself, but rather "forks" off other threads
  7737. * to do that as needed.
  7738. * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in
  7739. * "->recovery" and create a thread at ->sync_thread.
  7740. * When the thread finishes it sets MD_RECOVERY_DONE
  7741. * and wakeups up this thread which will reap the thread and finish up.
  7742. * This thread also removes any faulty devices (with nr_pending == 0).
  7743. *
  7744. * The overall approach is:
  7745. * 1/ if the superblock needs updating, update it.
  7746. * 2/ If a recovery thread is running, don't do anything else.
  7747. * 3/ If recovery has finished, clean up, possibly marking spares active.
  7748. * 4/ If there are any faulty devices, remove them.
  7749. * 5/ If array is degraded, try to add spares devices
  7750. * 6/ If array has spares or is not in-sync, start a resync thread.
  7751. */
  7752. void md_check_recovery(struct mddev *mddev)
  7753. {
  7754. if (mddev->suspended)
  7755. return;
  7756. if (mddev->bitmap)
  7757. bitmap_daemon_work(mddev);
  7758. if (signal_pending(current)) {
  7759. if (mddev->pers->sync_request && !mddev->external) {
  7760. pr_debug("md: %s in immediate safe mode\n",
  7761. mdname(mddev));
  7762. mddev->safemode = 2;
  7763. }
  7764. flush_signals(current);
  7765. }
  7766. if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
  7767. return;
  7768. if ( ! (
  7769. (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
  7770. test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  7771. test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
  7772. (mddev->external == 0 && mddev->safemode == 1) ||
  7773. (mddev->safemode == 2
  7774. && !mddev->in_sync && mddev->recovery_cp == MaxSector)
  7775. ))
  7776. return;
  7777. if (mddev_trylock(mddev)) {
  7778. int spares = 0;
  7779. if (!mddev->external && mddev->safemode == 1)
  7780. mddev->safemode = 0;
  7781. if (mddev->ro) {
  7782. struct md_rdev *rdev;
  7783. if (!mddev->external && mddev->in_sync)
  7784. /* 'Blocked' flag not needed as failed devices
  7785. * will be recorded if array switched to read/write.
  7786. * Leaving it set will prevent the device
  7787. * from being removed.
  7788. */
  7789. rdev_for_each(rdev, mddev)
  7790. clear_bit(Blocked, &rdev->flags);
  7791. /* On a read-only array we can:
  7792. * - remove failed devices
  7793. * - add already-in_sync devices if the array itself
  7794. * is in-sync.
  7795. * As we only add devices that are already in-sync,
  7796. * we can activate the spares immediately.
  7797. */
  7798. remove_and_add_spares(mddev, NULL);
  7799. /* There is no thread, but we need to call
  7800. * ->spare_active and clear saved_raid_disk
  7801. */
  7802. set_bit(MD_RECOVERY_INTR, &mddev->recovery);
  7803. md_reap_sync_thread(mddev);
  7804. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  7805. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  7806. clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
  7807. goto unlock;
  7808. }
  7809. if (mddev_is_clustered(mddev)) {
  7810. struct md_rdev *rdev;
  7811. /* kick the device if another node issued a
  7812. * remove disk.
  7813. */
  7814. rdev_for_each(rdev, mddev) {
  7815. if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
  7816. rdev->raid_disk < 0)
  7817. md_kick_rdev_from_array(rdev);
  7818. }
  7819. }
  7820. if (!mddev->external && !mddev->in_sync) {
  7821. spin_lock(&mddev->lock);
  7822. set_in_sync(mddev);
  7823. spin_unlock(&mddev->lock);
  7824. }
  7825. if (mddev->sb_flags)
  7826. md_update_sb(mddev, 0);
  7827. if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
  7828. !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) {
  7829. /* resync/recovery still happening */
  7830. clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  7831. goto unlock;
  7832. }
  7833. if (mddev->sync_thread) {
  7834. md_reap_sync_thread(mddev);
  7835. goto unlock;
  7836. }
  7837. /* Set RUNNING before clearing NEEDED to avoid
  7838. * any transients in the value of "sync_action".
  7839. */
  7840. mddev->curr_resync_completed = 0;
  7841. spin_lock(&mddev->lock);
  7842. set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7843. spin_unlock(&mddev->lock);
  7844. /* Clear some bits that don't mean anything, but
  7845. * might be left set
  7846. */
  7847. clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
  7848. clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
  7849. if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
  7850. test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
  7851. goto not_running;
  7852. /* no recovery is running.
  7853. * remove any failed drives, then
  7854. * add spares if possible.
  7855. * Spares are also removed and re-added, to allow
  7856. * the personality to fail the re-add.
  7857. */
  7858. if (mddev->reshape_position != MaxSector) {
  7859. if (mddev->pers->check_reshape == NULL ||
  7860. mddev->pers->check_reshape(mddev) != 0)
  7861. /* Cannot proceed */
  7862. goto not_running;
  7863. set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  7864. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  7865. } else if ((spares = remove_and_add_spares(mddev, NULL))) {
  7866. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  7867. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  7868. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  7869. set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  7870. } else if (mddev->recovery_cp < MaxSector) {
  7871. set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  7872. clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
  7873. } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
  7874. /* nothing to be done ... */
  7875. goto not_running;
  7876. if (mddev->pers->sync_request) {
  7877. if (spares) {
  7878. /* We are adding a device or devices to an array
  7879. * which has the bitmap stored on all devices.
  7880. * So make sure all bitmap pages get written
  7881. */
  7882. bitmap_write_all(mddev->bitmap);
  7883. }
  7884. INIT_WORK(&mddev->del_work, md_start_sync);
  7885. queue_work(md_misc_wq, &mddev->del_work);
  7886. goto unlock;
  7887. }
  7888. not_running:
  7889. if (!mddev->sync_thread) {
  7890. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7891. wake_up(&resync_wait);
  7892. if (test_and_clear_bit(MD_RECOVERY_RECOVER,
  7893. &mddev->recovery))
  7894. if (mddev->sysfs_action)
  7895. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7896. }
  7897. unlock:
  7898. wake_up(&mddev->sb_wait);
  7899. mddev_unlock(mddev);
  7900. } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
  7901. /* Write superblock - thread that called mddev_suspend()
  7902. * holds reconfig_mutex for us.
  7903. */
  7904. set_bit(MD_UPDATING_SB, &mddev->flags);
  7905. smp_mb__after_atomic();
  7906. if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
  7907. md_update_sb(mddev, 0);
  7908. clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
  7909. wake_up(&mddev->sb_wait);
  7910. }
  7911. }
  7912. EXPORT_SYMBOL(md_check_recovery);
  7913. void md_reap_sync_thread(struct mddev *mddev)
  7914. {
  7915. struct md_rdev *rdev;
  7916. /* resync has finished, collect result */
  7917. md_unregister_thread(&mddev->sync_thread);
  7918. if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
  7919. !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
  7920. /* success...*/
  7921. /* activate any spares */
  7922. if (mddev->pers->spare_active(mddev)) {
  7923. sysfs_notify(&mddev->kobj, NULL,
  7924. "degraded");
  7925. set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
  7926. }
  7927. }
  7928. if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
  7929. mddev->pers->finish_reshape)
  7930. mddev->pers->finish_reshape(mddev);
  7931. /* If array is no-longer degraded, then any saved_raid_disk
  7932. * information must be scrapped.
  7933. */
  7934. if (!mddev->degraded)
  7935. rdev_for_each(rdev, mddev)
  7936. rdev->saved_raid_disk = -1;
  7937. md_update_sb(mddev, 1);
  7938. /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can
  7939. * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by
  7940. * clustered raid */
  7941. if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags))
  7942. md_cluster_ops->resync_finish(mddev);
  7943. clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
  7944. clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
  7945. clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
  7946. clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
  7947. clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
  7948. clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
  7949. wake_up(&resync_wait);
  7950. /* flag recovery needed just to double check */
  7951. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  7952. sysfs_notify_dirent_safe(mddev->sysfs_action);
  7953. md_new_event(mddev);
  7954. if (mddev->event_work.func)
  7955. queue_work(md_misc_wq, &mddev->event_work);
  7956. }
  7957. EXPORT_SYMBOL(md_reap_sync_thread);
  7958. void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
  7959. {
  7960. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7961. wait_event_timeout(rdev->blocked_wait,
  7962. !test_bit(Blocked, &rdev->flags) &&
  7963. !test_bit(BlockedBadBlocks, &rdev->flags),
  7964. msecs_to_jiffies(5000));
  7965. rdev_dec_pending(rdev, mddev);
  7966. }
  7967. EXPORT_SYMBOL(md_wait_for_blocked_rdev);
  7968. void md_finish_reshape(struct mddev *mddev)
  7969. {
  7970. /* called be personality module when reshape completes. */
  7971. struct md_rdev *rdev;
  7972. rdev_for_each(rdev, mddev) {
  7973. if (rdev->data_offset > rdev->new_data_offset)
  7974. rdev->sectors += rdev->data_offset - rdev->new_data_offset;
  7975. else
  7976. rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
  7977. rdev->data_offset = rdev->new_data_offset;
  7978. }
  7979. }
  7980. EXPORT_SYMBOL(md_finish_reshape);
  7981. /* Bad block management */
  7982. /* Returns 1 on success, 0 on failure */
  7983. int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  7984. int is_new)
  7985. {
  7986. struct mddev *mddev = rdev->mddev;
  7987. int rv;
  7988. if (is_new)
  7989. s += rdev->new_data_offset;
  7990. else
  7991. s += rdev->data_offset;
  7992. rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
  7993. if (rv == 0) {
  7994. /* Make sure they get written out promptly */
  7995. if (test_bit(ExternalBbl, &rdev->flags))
  7996. sysfs_notify(&rdev->kobj, NULL,
  7997. "unacknowledged_bad_blocks");
  7998. sysfs_notify_dirent_safe(rdev->sysfs_state);
  7999. set_mask_bits(&mddev->sb_flags, 0,
  8000. BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING));
  8001. md_wakeup_thread(rdev->mddev->thread);
  8002. return 1;
  8003. } else
  8004. return 0;
  8005. }
  8006. EXPORT_SYMBOL_GPL(rdev_set_badblocks);
  8007. int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
  8008. int is_new)
  8009. {
  8010. int rv;
  8011. if (is_new)
  8012. s += rdev->new_data_offset;
  8013. else
  8014. s += rdev->data_offset;
  8015. rv = badblocks_clear(&rdev->badblocks, s, sectors);
  8016. if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
  8017. sysfs_notify(&rdev->kobj, NULL, "bad_blocks");
  8018. return rv;
  8019. }
  8020. EXPORT_SYMBOL_GPL(rdev_clear_badblocks);
  8021. static int md_notify_reboot(struct notifier_block *this,
  8022. unsigned long code, void *x)
  8023. {
  8024. struct list_head *tmp;
  8025. struct mddev *mddev;
  8026. int need_delay = 0;
  8027. for_each_mddev(mddev, tmp) {
  8028. if (mddev_trylock(mddev)) {
  8029. if (mddev->pers)
  8030. __md_stop_writes(mddev);
  8031. if (mddev->persistent)
  8032. mddev->safemode = 2;
  8033. mddev_unlock(mddev);
  8034. }
  8035. need_delay = 1;
  8036. }
  8037. /*
  8038. * certain more exotic SCSI devices are known to be
  8039. * volatile wrt too early system reboots. While the
  8040. * right place to handle this issue is the given
  8041. * driver, we do want to have a safe RAID driver ...
  8042. */
  8043. if (need_delay)
  8044. mdelay(1000*1);
  8045. return NOTIFY_DONE;
  8046. }
  8047. static struct notifier_block md_notifier = {
  8048. .notifier_call = md_notify_reboot,
  8049. .next = NULL,
  8050. .priority = INT_MAX, /* before any real devices */
  8051. };
  8052. static void md_geninit(void)
  8053. {
  8054. pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
  8055. proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
  8056. }
  8057. static int __init md_init(void)
  8058. {
  8059. int ret = -ENOMEM;
  8060. md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
  8061. if (!md_wq)
  8062. goto err_wq;
  8063. md_misc_wq = alloc_workqueue("md_misc", 0, 0);
  8064. if (!md_misc_wq)
  8065. goto err_misc_wq;
  8066. if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
  8067. goto err_md;
  8068. if ((ret = register_blkdev(0, "mdp")) < 0)
  8069. goto err_mdp;
  8070. mdp_major = ret;
  8071. blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
  8072. md_probe, NULL, NULL);
  8073. blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
  8074. md_probe, NULL, NULL);
  8075. register_reboot_notifier(&md_notifier);
  8076. raid_table_header = register_sysctl_table(raid_root_table);
  8077. md_geninit();
  8078. return 0;
  8079. err_mdp:
  8080. unregister_blkdev(MD_MAJOR, "md");
  8081. err_md:
  8082. destroy_workqueue(md_misc_wq);
  8083. err_misc_wq:
  8084. destroy_workqueue(md_wq);
  8085. err_wq:
  8086. return ret;
  8087. }
  8088. static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
  8089. {
  8090. struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
  8091. struct md_rdev *rdev2;
  8092. int role, ret;
  8093. char b[BDEVNAME_SIZE];
  8094. /*
  8095. * If size is changed in another node then we need to
  8096. * do resize as well.
  8097. */
  8098. if (mddev->dev_sectors != le64_to_cpu(sb->size)) {
  8099. ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
  8100. if (ret)
  8101. pr_info("md-cluster: resize failed\n");
  8102. else
  8103. bitmap_update_sb(mddev->bitmap);
  8104. }
  8105. /* Check for change of roles in the active devices */
  8106. rdev_for_each(rdev2, mddev) {
  8107. if (test_bit(Faulty, &rdev2->flags))
  8108. continue;
  8109. /* Check if the roles changed */
  8110. role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
  8111. if (test_bit(Candidate, &rdev2->flags)) {
  8112. if (role == 0xfffe) {
  8113. pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b));
  8114. md_kick_rdev_from_array(rdev2);
  8115. continue;
  8116. }
  8117. else
  8118. clear_bit(Candidate, &rdev2->flags);
  8119. }
  8120. if (role != rdev2->raid_disk) {
  8121. /* got activated */
  8122. if (rdev2->raid_disk == -1 && role != 0xffff) {
  8123. rdev2->saved_raid_disk = role;
  8124. ret = remove_and_add_spares(mddev, rdev2);
  8125. pr_info("Activated spare: %s\n",
  8126. bdevname(rdev2->bdev,b));
  8127. /* wakeup mddev->thread here, so array could
  8128. * perform resync with the new activated disk */
  8129. set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
  8130. md_wakeup_thread(mddev->thread);
  8131. }
  8132. /* device faulty
  8133. * We just want to do the minimum to mark the disk
  8134. * as faulty. The recovery is performed by the
  8135. * one who initiated the error.
  8136. */
  8137. if ((role == 0xfffe) || (role == 0xfffd)) {
  8138. md_error(mddev, rdev2);
  8139. clear_bit(Blocked, &rdev2->flags);
  8140. }
  8141. }
  8142. }
  8143. if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
  8144. update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
  8145. /* Finally set the event to be up to date */
  8146. mddev->events = le64_to_cpu(sb->events);
  8147. }
  8148. static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
  8149. {
  8150. int err;
  8151. struct page *swapout = rdev->sb_page;
  8152. struct mdp_superblock_1 *sb;
  8153. /* Store the sb page of the rdev in the swapout temporary
  8154. * variable in case we err in the future
  8155. */
  8156. rdev->sb_page = NULL;
  8157. err = alloc_disk_sb(rdev);
  8158. if (err == 0) {
  8159. ClearPageUptodate(rdev->sb_page);
  8160. rdev->sb_loaded = 0;
  8161. err = super_types[mddev->major_version].
  8162. load_super(rdev, NULL, mddev->minor_version);
  8163. }
  8164. if (err < 0) {
  8165. pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
  8166. __func__, __LINE__, rdev->desc_nr, err);
  8167. if (rdev->sb_page)
  8168. put_page(rdev->sb_page);
  8169. rdev->sb_page = swapout;
  8170. rdev->sb_loaded = 1;
  8171. return err;
  8172. }
  8173. sb = page_address(rdev->sb_page);
  8174. /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET
  8175. * is not set
  8176. */
  8177. if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET))
  8178. rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
  8179. /* The other node finished recovery, call spare_active to set
  8180. * device In_sync and mddev->degraded
  8181. */
  8182. if (rdev->recovery_offset == MaxSector &&
  8183. !test_bit(In_sync, &rdev->flags) &&
  8184. mddev->pers->spare_active(mddev))
  8185. sysfs_notify(&mddev->kobj, NULL, "degraded");
  8186. put_page(swapout);
  8187. return 0;
  8188. }
  8189. void md_reload_sb(struct mddev *mddev, int nr)
  8190. {
  8191. struct md_rdev *rdev;
  8192. int err;
  8193. /* Find the rdev */
  8194. rdev_for_each_rcu(rdev, mddev) {
  8195. if (rdev->desc_nr == nr)
  8196. break;
  8197. }
  8198. if (!rdev || rdev->desc_nr != nr) {
  8199. pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
  8200. return;
  8201. }
  8202. err = read_rdev(mddev, rdev);
  8203. if (err < 0)
  8204. return;
  8205. check_sb_changes(mddev, rdev);
  8206. /* Read all rdev's to update recovery_offset */
  8207. rdev_for_each_rcu(rdev, mddev)
  8208. read_rdev(mddev, rdev);
  8209. }
  8210. EXPORT_SYMBOL(md_reload_sb);
  8211. #ifndef MODULE
  8212. /*
  8213. * Searches all registered partitions for autorun RAID arrays
  8214. * at boot time.
  8215. */
  8216. static DEFINE_MUTEX(detected_devices_mutex);
  8217. static LIST_HEAD(all_detected_devices);
  8218. struct detected_devices_node {
  8219. struct list_head list;
  8220. dev_t dev;
  8221. };
  8222. void md_autodetect_dev(dev_t dev)
  8223. {
  8224. struct detected_devices_node *node_detected_dev;
  8225. node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL);
  8226. if (node_detected_dev) {
  8227. node_detected_dev->dev = dev;
  8228. mutex_lock(&detected_devices_mutex);
  8229. list_add_tail(&node_detected_dev->list, &all_detected_devices);
  8230. mutex_unlock(&detected_devices_mutex);
  8231. }
  8232. }
  8233. static void autostart_arrays(int part)
  8234. {
  8235. struct md_rdev *rdev;
  8236. struct detected_devices_node *node_detected_dev;
  8237. dev_t dev;
  8238. int i_scanned, i_passed;
  8239. i_scanned = 0;
  8240. i_passed = 0;
  8241. pr_info("md: Autodetecting RAID arrays.\n");
  8242. mutex_lock(&detected_devices_mutex);
  8243. while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) {
  8244. i_scanned++;
  8245. node_detected_dev = list_entry(all_detected_devices.next,
  8246. struct detected_devices_node, list);
  8247. list_del(&node_detected_dev->list);
  8248. dev = node_detected_dev->dev;
  8249. kfree(node_detected_dev);
  8250. mutex_unlock(&detected_devices_mutex);
  8251. rdev = md_import_device(dev,0, 90);
  8252. mutex_lock(&detected_devices_mutex);
  8253. if (IS_ERR(rdev))
  8254. continue;
  8255. if (test_bit(Faulty, &rdev->flags))
  8256. continue;
  8257. set_bit(AutoDetected, &rdev->flags);
  8258. list_add(&rdev->same_set, &pending_raid_disks);
  8259. i_passed++;
  8260. }
  8261. mutex_unlock(&detected_devices_mutex);
  8262. pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed);
  8263. autorun_devices(part);
  8264. }
  8265. #endif /* !MODULE */
  8266. static __exit void md_exit(void)
  8267. {
  8268. struct mddev *mddev;
  8269. struct list_head *tmp;
  8270. int delay = 1;
  8271. blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
  8272. blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
  8273. unregister_blkdev(MD_MAJOR,"md");
  8274. unregister_blkdev(mdp_major, "mdp");
  8275. unregister_reboot_notifier(&md_notifier);
  8276. unregister_sysctl_table(raid_table_header);
  8277. /* We cannot unload the modules while some process is
  8278. * waiting for us in select() or poll() - wake them up
  8279. */
  8280. md_unloading = 1;
  8281. while (waitqueue_active(&md_event_waiters)) {
  8282. /* not safe to leave yet */
  8283. wake_up(&md_event_waiters);
  8284. msleep(delay);
  8285. delay += delay;
  8286. }
  8287. remove_proc_entry("mdstat", NULL);
  8288. for_each_mddev(mddev, tmp) {
  8289. export_array(mddev);
  8290. mddev->ctime = 0;
  8291. mddev->hold_active = 0;
  8292. /*
  8293. * for_each_mddev() will call mddev_put() at the end of each
  8294. * iteration. As the mddev is now fully clear, this will
  8295. * schedule the mddev for destruction by a workqueue, and the
  8296. * destroy_workqueue() below will wait for that to complete.
  8297. */
  8298. }
  8299. destroy_workqueue(md_misc_wq);
  8300. destroy_workqueue(md_wq);
  8301. }
  8302. subsys_initcall(md_init);
  8303. module_exit(md_exit)
  8304. static int get_ro(char *buffer, const struct kernel_param *kp)
  8305. {
  8306. return sprintf(buffer, "%d", start_readonly);
  8307. }
  8308. static int set_ro(const char *val, const struct kernel_param *kp)
  8309. {
  8310. return kstrtouint(val, 10, (unsigned int *)&start_readonly);
  8311. }
  8312. module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR);
  8313. module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
  8314. module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
  8315. module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
  8316. MODULE_LICENSE("GPL");
  8317. MODULE_DESCRIPTION("MD RAID framework");
  8318. MODULE_ALIAS("md");
  8319. MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR);