nfs4state.c 167 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597
  1. /*
  2. * Copyright (c) 2001 The Regents of the University of Michigan.
  3. * All rights reserved.
  4. *
  5. * Kendrick Smith <kmsmith@umich.edu>
  6. * Andy Adamson <kandros@umich.edu>
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the name of the University nor the names of its
  18. * contributors may be used to endorse or promote products derived
  19. * from this software without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  22. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  23. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  28. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  29. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  30. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  31. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. *
  33. */
  34. #include <linux/file.h>
  35. #include <linux/fs.h>
  36. #include <linux/slab.h>
  37. #include <linux/namei.h>
  38. #include <linux/swap.h>
  39. #include <linux/pagemap.h>
  40. #include <linux/ratelimit.h>
  41. #include <linux/sunrpc/svcauth_gss.h>
  42. #include <linux/sunrpc/addr.h>
  43. #include <linux/hash.h>
  44. #include "xdr4.h"
  45. #include "xdr4cb.h"
  46. #include "vfs.h"
  47. #include "current_stateid.h"
  48. #include "netns.h"
  49. #define NFSDDBG_FACILITY NFSDDBG_PROC
  50. #define all_ones {{~0,~0},~0}
  51. static const stateid_t one_stateid = {
  52. .si_generation = ~0,
  53. .si_opaque = all_ones,
  54. };
  55. static const stateid_t zero_stateid = {
  56. /* all fields zero */
  57. };
  58. static const stateid_t currentstateid = {
  59. .si_generation = 1,
  60. };
  61. static u64 current_sessionid = 1;
  62. #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
  63. #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
  64. #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
  65. /* forward declarations */
  66. static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
  67. static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
  68. /* Locking: */
  69. /*
  70. * Currently used for the del_recall_lru and file hash table. In an
  71. * effort to decrease the scope of the client_mutex, this spinlock may
  72. * eventually cover more:
  73. */
  74. static DEFINE_SPINLOCK(state_lock);
  75. /*
  76. * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
  77. * the refcount on the open stateid to drop.
  78. */
  79. static DECLARE_WAIT_QUEUE_HEAD(close_wq);
  80. static struct kmem_cache *openowner_slab;
  81. static struct kmem_cache *lockowner_slab;
  82. static struct kmem_cache *file_slab;
  83. static struct kmem_cache *stateid_slab;
  84. static struct kmem_cache *deleg_slab;
  85. static void free_session(struct nfsd4_session *);
  86. static struct nfsd4_callback_ops nfsd4_cb_recall_ops;
  87. static bool is_session_dead(struct nfsd4_session *ses)
  88. {
  89. return ses->se_flags & NFS4_SESSION_DEAD;
  90. }
  91. static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
  92. {
  93. if (atomic_read(&ses->se_ref) > ref_held_by_me)
  94. return nfserr_jukebox;
  95. ses->se_flags |= NFS4_SESSION_DEAD;
  96. return nfs_ok;
  97. }
  98. static bool is_client_expired(struct nfs4_client *clp)
  99. {
  100. return clp->cl_time == 0;
  101. }
  102. static __be32 get_client_locked(struct nfs4_client *clp)
  103. {
  104. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  105. lockdep_assert_held(&nn->client_lock);
  106. if (is_client_expired(clp))
  107. return nfserr_expired;
  108. atomic_inc(&clp->cl_refcount);
  109. return nfs_ok;
  110. }
  111. /* must be called under the client_lock */
  112. static inline void
  113. renew_client_locked(struct nfs4_client *clp)
  114. {
  115. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  116. if (is_client_expired(clp)) {
  117. WARN_ON(1);
  118. printk("%s: client (clientid %08x/%08x) already expired\n",
  119. __func__,
  120. clp->cl_clientid.cl_boot,
  121. clp->cl_clientid.cl_id);
  122. return;
  123. }
  124. dprintk("renewing client (clientid %08x/%08x)\n",
  125. clp->cl_clientid.cl_boot,
  126. clp->cl_clientid.cl_id);
  127. list_move_tail(&clp->cl_lru, &nn->client_lru);
  128. clp->cl_time = get_seconds();
  129. }
  130. static inline void
  131. renew_client(struct nfs4_client *clp)
  132. {
  133. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  134. spin_lock(&nn->client_lock);
  135. renew_client_locked(clp);
  136. spin_unlock(&nn->client_lock);
  137. }
  138. static void put_client_renew_locked(struct nfs4_client *clp)
  139. {
  140. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  141. lockdep_assert_held(&nn->client_lock);
  142. if (!atomic_dec_and_test(&clp->cl_refcount))
  143. return;
  144. if (!is_client_expired(clp))
  145. renew_client_locked(clp);
  146. }
  147. static void put_client_renew(struct nfs4_client *clp)
  148. {
  149. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  150. if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
  151. return;
  152. if (!is_client_expired(clp))
  153. renew_client_locked(clp);
  154. spin_unlock(&nn->client_lock);
  155. }
  156. static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
  157. {
  158. __be32 status;
  159. if (is_session_dead(ses))
  160. return nfserr_badsession;
  161. status = get_client_locked(ses->se_client);
  162. if (status)
  163. return status;
  164. atomic_inc(&ses->se_ref);
  165. return nfs_ok;
  166. }
  167. static void nfsd4_put_session_locked(struct nfsd4_session *ses)
  168. {
  169. struct nfs4_client *clp = ses->se_client;
  170. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  171. lockdep_assert_held(&nn->client_lock);
  172. if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
  173. free_session(ses);
  174. put_client_renew_locked(clp);
  175. }
  176. static void nfsd4_put_session(struct nfsd4_session *ses)
  177. {
  178. struct nfs4_client *clp = ses->se_client;
  179. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  180. spin_lock(&nn->client_lock);
  181. nfsd4_put_session_locked(ses);
  182. spin_unlock(&nn->client_lock);
  183. }
  184. static inline struct nfs4_stateowner *
  185. nfs4_get_stateowner(struct nfs4_stateowner *sop)
  186. {
  187. atomic_inc(&sop->so_count);
  188. return sop;
  189. }
  190. static int
  191. same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
  192. {
  193. return (sop->so_owner.len == owner->len) &&
  194. 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
  195. }
  196. static struct nfs4_openowner *
  197. find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
  198. struct nfs4_client *clp)
  199. {
  200. struct nfs4_stateowner *so;
  201. lockdep_assert_held(&clp->cl_lock);
  202. list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
  203. so_strhash) {
  204. if (!so->so_is_open_owner)
  205. continue;
  206. if (same_owner_str(so, &open->op_owner))
  207. return openowner(nfs4_get_stateowner(so));
  208. }
  209. return NULL;
  210. }
  211. static struct nfs4_openowner *
  212. find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
  213. struct nfs4_client *clp)
  214. {
  215. struct nfs4_openowner *oo;
  216. spin_lock(&clp->cl_lock);
  217. oo = find_openstateowner_str_locked(hashval, open, clp);
  218. spin_unlock(&clp->cl_lock);
  219. return oo;
  220. }
  221. static inline u32
  222. opaque_hashval(const void *ptr, int nbytes)
  223. {
  224. unsigned char *cptr = (unsigned char *) ptr;
  225. u32 x = 0;
  226. while (nbytes--) {
  227. x *= 37;
  228. x += *cptr++;
  229. }
  230. return x;
  231. }
  232. static void nfsd4_free_file(struct nfs4_file *f)
  233. {
  234. kmem_cache_free(file_slab, f);
  235. }
  236. static inline void
  237. put_nfs4_file(struct nfs4_file *fi)
  238. {
  239. might_lock(&state_lock);
  240. if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
  241. hlist_del(&fi->fi_hash);
  242. spin_unlock(&state_lock);
  243. nfsd4_free_file(fi);
  244. }
  245. }
  246. static inline void
  247. get_nfs4_file(struct nfs4_file *fi)
  248. {
  249. atomic_inc(&fi->fi_ref);
  250. }
  251. static struct file *
  252. __nfs4_get_fd(struct nfs4_file *f, int oflag)
  253. {
  254. if (f->fi_fds[oflag])
  255. return get_file(f->fi_fds[oflag]);
  256. return NULL;
  257. }
  258. static struct file *
  259. find_writeable_file_locked(struct nfs4_file *f)
  260. {
  261. struct file *ret;
  262. lockdep_assert_held(&f->fi_lock);
  263. ret = __nfs4_get_fd(f, O_WRONLY);
  264. if (!ret)
  265. ret = __nfs4_get_fd(f, O_RDWR);
  266. return ret;
  267. }
  268. static struct file *
  269. find_writeable_file(struct nfs4_file *f)
  270. {
  271. struct file *ret;
  272. spin_lock(&f->fi_lock);
  273. ret = find_writeable_file_locked(f);
  274. spin_unlock(&f->fi_lock);
  275. return ret;
  276. }
  277. static struct file *find_readable_file_locked(struct nfs4_file *f)
  278. {
  279. struct file *ret;
  280. lockdep_assert_held(&f->fi_lock);
  281. ret = __nfs4_get_fd(f, O_RDONLY);
  282. if (!ret)
  283. ret = __nfs4_get_fd(f, O_RDWR);
  284. return ret;
  285. }
  286. static struct file *
  287. find_readable_file(struct nfs4_file *f)
  288. {
  289. struct file *ret;
  290. spin_lock(&f->fi_lock);
  291. ret = find_readable_file_locked(f);
  292. spin_unlock(&f->fi_lock);
  293. return ret;
  294. }
  295. static struct file *
  296. find_any_file(struct nfs4_file *f)
  297. {
  298. struct file *ret;
  299. spin_lock(&f->fi_lock);
  300. ret = __nfs4_get_fd(f, O_RDWR);
  301. if (!ret) {
  302. ret = __nfs4_get_fd(f, O_WRONLY);
  303. if (!ret)
  304. ret = __nfs4_get_fd(f, O_RDONLY);
  305. }
  306. spin_unlock(&f->fi_lock);
  307. return ret;
  308. }
  309. static atomic_long_t num_delegations;
  310. unsigned long max_delegations;
  311. /*
  312. * Open owner state (share locks)
  313. */
  314. /* hash tables for lock and open owners */
  315. #define OWNER_HASH_BITS 8
  316. #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
  317. #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
  318. static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
  319. {
  320. unsigned int ret;
  321. ret = opaque_hashval(ownername->data, ownername->len);
  322. return ret & OWNER_HASH_MASK;
  323. }
  324. /* hash table for nfs4_file */
  325. #define FILE_HASH_BITS 8
  326. #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
  327. static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
  328. {
  329. return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
  330. }
  331. static unsigned int file_hashval(struct knfsd_fh *fh)
  332. {
  333. return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
  334. }
  335. static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
  336. {
  337. return fh1->fh_size == fh2->fh_size &&
  338. !memcmp(fh1->fh_base.fh_pad,
  339. fh2->fh_base.fh_pad,
  340. fh1->fh_size);
  341. }
  342. static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
  343. static void
  344. __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
  345. {
  346. lockdep_assert_held(&fp->fi_lock);
  347. if (access & NFS4_SHARE_ACCESS_WRITE)
  348. atomic_inc(&fp->fi_access[O_WRONLY]);
  349. if (access & NFS4_SHARE_ACCESS_READ)
  350. atomic_inc(&fp->fi_access[O_RDONLY]);
  351. }
  352. static __be32
  353. nfs4_file_get_access(struct nfs4_file *fp, u32 access)
  354. {
  355. lockdep_assert_held(&fp->fi_lock);
  356. /* Does this access mode make sense? */
  357. if (access & ~NFS4_SHARE_ACCESS_BOTH)
  358. return nfserr_inval;
  359. /* Does it conflict with a deny mode already set? */
  360. if ((access & fp->fi_share_deny) != 0)
  361. return nfserr_share_denied;
  362. __nfs4_file_get_access(fp, access);
  363. return nfs_ok;
  364. }
  365. static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
  366. {
  367. /* Common case is that there is no deny mode. */
  368. if (deny) {
  369. /* Does this deny mode make sense? */
  370. if (deny & ~NFS4_SHARE_DENY_BOTH)
  371. return nfserr_inval;
  372. if ((deny & NFS4_SHARE_DENY_READ) &&
  373. atomic_read(&fp->fi_access[O_RDONLY]))
  374. return nfserr_share_denied;
  375. if ((deny & NFS4_SHARE_DENY_WRITE) &&
  376. atomic_read(&fp->fi_access[O_WRONLY]))
  377. return nfserr_share_denied;
  378. }
  379. return nfs_ok;
  380. }
  381. static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
  382. {
  383. might_lock(&fp->fi_lock);
  384. if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
  385. struct file *f1 = NULL;
  386. struct file *f2 = NULL;
  387. swap(f1, fp->fi_fds[oflag]);
  388. if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
  389. swap(f2, fp->fi_fds[O_RDWR]);
  390. spin_unlock(&fp->fi_lock);
  391. if (f1)
  392. fput(f1);
  393. if (f2)
  394. fput(f2);
  395. }
  396. }
  397. static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
  398. {
  399. WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
  400. if (access & NFS4_SHARE_ACCESS_WRITE)
  401. __nfs4_file_put_access(fp, O_WRONLY);
  402. if (access & NFS4_SHARE_ACCESS_READ)
  403. __nfs4_file_put_access(fp, O_RDONLY);
  404. }
  405. static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
  406. struct kmem_cache *slab)
  407. {
  408. struct nfs4_stid *stid;
  409. int new_id;
  410. stid = kmem_cache_zalloc(slab, GFP_KERNEL);
  411. if (!stid)
  412. return NULL;
  413. idr_preload(GFP_KERNEL);
  414. spin_lock(&cl->cl_lock);
  415. new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
  416. spin_unlock(&cl->cl_lock);
  417. idr_preload_end();
  418. if (new_id < 0)
  419. goto out_free;
  420. stid->sc_client = cl;
  421. stid->sc_stateid.si_opaque.so_id = new_id;
  422. stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
  423. /* Will be incremented before return to client: */
  424. atomic_set(&stid->sc_count, 1);
  425. /*
  426. * It shouldn't be a problem to reuse an opaque stateid value.
  427. * I don't think it is for 4.1. But with 4.0 I worry that, for
  428. * example, a stray write retransmission could be accepted by
  429. * the server when it should have been rejected. Therefore,
  430. * adopt a trick from the sctp code to attempt to maximize the
  431. * amount of time until an id is reused, by ensuring they always
  432. * "increase" (mod INT_MAX):
  433. */
  434. return stid;
  435. out_free:
  436. kmem_cache_free(slab, stid);
  437. return NULL;
  438. }
  439. static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
  440. {
  441. struct nfs4_stid *stid;
  442. struct nfs4_ol_stateid *stp;
  443. stid = nfs4_alloc_stid(clp, stateid_slab);
  444. if (!stid)
  445. return NULL;
  446. stp = openlockstateid(stid);
  447. stp->st_stid.sc_free = nfs4_free_ol_stateid;
  448. return stp;
  449. }
  450. static void nfs4_free_deleg(struct nfs4_stid *stid)
  451. {
  452. kmem_cache_free(deleg_slab, stid);
  453. atomic_long_dec(&num_delegations);
  454. }
  455. /*
  456. * When we recall a delegation, we should be careful not to hand it
  457. * out again straight away.
  458. * To ensure this we keep a pair of bloom filters ('new' and 'old')
  459. * in which the filehandles of recalled delegations are "stored".
  460. * If a filehandle appear in either filter, a delegation is blocked.
  461. * When a delegation is recalled, the filehandle is stored in the "new"
  462. * filter.
  463. * Every 30 seconds we swap the filters and clear the "new" one,
  464. * unless both are empty of course.
  465. *
  466. * Each filter is 256 bits. We hash the filehandle to 32bit and use the
  467. * low 3 bytes as hash-table indices.
  468. *
  469. * 'blocked_delegations_lock', which is always taken in block_delegations(),
  470. * is used to manage concurrent access. Testing does not need the lock
  471. * except when swapping the two filters.
  472. */
  473. static DEFINE_SPINLOCK(blocked_delegations_lock);
  474. static struct bloom_pair {
  475. int entries, old_entries;
  476. time_t swap_time;
  477. int new; /* index into 'set' */
  478. DECLARE_BITMAP(set[2], 256);
  479. } blocked_delegations;
  480. static int delegation_blocked(struct knfsd_fh *fh)
  481. {
  482. u32 hash;
  483. struct bloom_pair *bd = &blocked_delegations;
  484. if (bd->entries == 0)
  485. return 0;
  486. if (seconds_since_boot() - bd->swap_time > 30) {
  487. spin_lock(&blocked_delegations_lock);
  488. if (seconds_since_boot() - bd->swap_time > 30) {
  489. bd->entries -= bd->old_entries;
  490. bd->old_entries = bd->entries;
  491. memset(bd->set[bd->new], 0,
  492. sizeof(bd->set[0]));
  493. bd->new = 1-bd->new;
  494. bd->swap_time = seconds_since_boot();
  495. }
  496. spin_unlock(&blocked_delegations_lock);
  497. }
  498. hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
  499. if (test_bit(hash&255, bd->set[0]) &&
  500. test_bit((hash>>8)&255, bd->set[0]) &&
  501. test_bit((hash>>16)&255, bd->set[0]))
  502. return 1;
  503. if (test_bit(hash&255, bd->set[1]) &&
  504. test_bit((hash>>8)&255, bd->set[1]) &&
  505. test_bit((hash>>16)&255, bd->set[1]))
  506. return 1;
  507. return 0;
  508. }
  509. static void block_delegations(struct knfsd_fh *fh)
  510. {
  511. u32 hash;
  512. struct bloom_pair *bd = &blocked_delegations;
  513. hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
  514. spin_lock(&blocked_delegations_lock);
  515. __set_bit(hash&255, bd->set[bd->new]);
  516. __set_bit((hash>>8)&255, bd->set[bd->new]);
  517. __set_bit((hash>>16)&255, bd->set[bd->new]);
  518. if (bd->entries == 0)
  519. bd->swap_time = seconds_since_boot();
  520. bd->entries += 1;
  521. spin_unlock(&blocked_delegations_lock);
  522. }
  523. static struct nfs4_delegation *
  524. alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
  525. {
  526. struct nfs4_delegation *dp;
  527. long n;
  528. dprintk("NFSD alloc_init_deleg\n");
  529. n = atomic_long_inc_return(&num_delegations);
  530. if (n < 0 || n > max_delegations)
  531. goto out_dec;
  532. if (delegation_blocked(&current_fh->fh_handle))
  533. goto out_dec;
  534. dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
  535. if (dp == NULL)
  536. goto out_dec;
  537. dp->dl_stid.sc_free = nfs4_free_deleg;
  538. /*
  539. * delegation seqid's are never incremented. The 4.1 special
  540. * meaning of seqid 0 isn't meaningful, really, but let's avoid
  541. * 0 anyway just for consistency and use 1:
  542. */
  543. dp->dl_stid.sc_stateid.si_generation = 1;
  544. INIT_LIST_HEAD(&dp->dl_perfile);
  545. INIT_LIST_HEAD(&dp->dl_perclnt);
  546. INIT_LIST_HEAD(&dp->dl_recall_lru);
  547. dp->dl_type = NFS4_OPEN_DELEGATE_READ;
  548. dp->dl_retries = 1;
  549. nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
  550. &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
  551. return dp;
  552. out_dec:
  553. atomic_long_dec(&num_delegations);
  554. return NULL;
  555. }
  556. void
  557. nfs4_put_stid(struct nfs4_stid *s)
  558. {
  559. struct nfs4_file *fp = s->sc_file;
  560. struct nfs4_client *clp = s->sc_client;
  561. might_lock(&clp->cl_lock);
  562. if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
  563. wake_up_all(&close_wq);
  564. return;
  565. }
  566. idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
  567. spin_unlock(&clp->cl_lock);
  568. s->sc_free(s);
  569. if (fp)
  570. put_nfs4_file(fp);
  571. }
  572. static void nfs4_put_deleg_lease(struct nfs4_file *fp)
  573. {
  574. struct file *filp = NULL;
  575. spin_lock(&fp->fi_lock);
  576. if (fp->fi_deleg_file && atomic_dec_and_test(&fp->fi_delegees))
  577. swap(filp, fp->fi_deleg_file);
  578. spin_unlock(&fp->fi_lock);
  579. if (filp) {
  580. vfs_setlease(filp, F_UNLCK, NULL, NULL);
  581. fput(filp);
  582. }
  583. }
  584. static void unhash_stid(struct nfs4_stid *s)
  585. {
  586. s->sc_type = 0;
  587. }
  588. static void
  589. hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
  590. {
  591. lockdep_assert_held(&state_lock);
  592. lockdep_assert_held(&fp->fi_lock);
  593. atomic_inc(&dp->dl_stid.sc_count);
  594. dp->dl_stid.sc_type = NFS4_DELEG_STID;
  595. list_add(&dp->dl_perfile, &fp->fi_delegations);
  596. list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
  597. }
  598. static void
  599. unhash_delegation_locked(struct nfs4_delegation *dp)
  600. {
  601. struct nfs4_file *fp = dp->dl_stid.sc_file;
  602. lockdep_assert_held(&state_lock);
  603. dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
  604. /* Ensure that deleg break won't try to requeue it */
  605. ++dp->dl_time;
  606. spin_lock(&fp->fi_lock);
  607. list_del_init(&dp->dl_perclnt);
  608. list_del_init(&dp->dl_recall_lru);
  609. list_del_init(&dp->dl_perfile);
  610. spin_unlock(&fp->fi_lock);
  611. }
  612. static void destroy_delegation(struct nfs4_delegation *dp)
  613. {
  614. spin_lock(&state_lock);
  615. unhash_delegation_locked(dp);
  616. spin_unlock(&state_lock);
  617. nfs4_put_deleg_lease(dp->dl_stid.sc_file);
  618. nfs4_put_stid(&dp->dl_stid);
  619. }
  620. static void revoke_delegation(struct nfs4_delegation *dp)
  621. {
  622. struct nfs4_client *clp = dp->dl_stid.sc_client;
  623. WARN_ON(!list_empty(&dp->dl_recall_lru));
  624. nfs4_put_deleg_lease(dp->dl_stid.sc_file);
  625. if (clp->cl_minorversion == 0)
  626. nfs4_put_stid(&dp->dl_stid);
  627. else {
  628. dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
  629. spin_lock(&clp->cl_lock);
  630. list_add(&dp->dl_recall_lru, &clp->cl_revoked);
  631. spin_unlock(&clp->cl_lock);
  632. }
  633. }
  634. /*
  635. * SETCLIENTID state
  636. */
  637. static unsigned int clientid_hashval(u32 id)
  638. {
  639. return id & CLIENT_HASH_MASK;
  640. }
  641. static unsigned int clientstr_hashval(const char *name)
  642. {
  643. return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
  644. }
  645. /*
  646. * We store the NONE, READ, WRITE, and BOTH bits separately in the
  647. * st_{access,deny}_bmap field of the stateid, in order to track not
  648. * only what share bits are currently in force, but also what
  649. * combinations of share bits previous opens have used. This allows us
  650. * to enforce the recommendation of rfc 3530 14.2.19 that the server
  651. * return an error if the client attempt to downgrade to a combination
  652. * of share bits not explicable by closing some of its previous opens.
  653. *
  654. * XXX: This enforcement is actually incomplete, since we don't keep
  655. * track of access/deny bit combinations; so, e.g., we allow:
  656. *
  657. * OPEN allow read, deny write
  658. * OPEN allow both, deny none
  659. * DOWNGRADE allow read, deny none
  660. *
  661. * which we should reject.
  662. */
  663. static unsigned int
  664. bmap_to_share_mode(unsigned long bmap) {
  665. int i;
  666. unsigned int access = 0;
  667. for (i = 1; i < 4; i++) {
  668. if (test_bit(i, &bmap))
  669. access |= i;
  670. }
  671. return access;
  672. }
  673. /* set share access for a given stateid */
  674. static inline void
  675. set_access(u32 access, struct nfs4_ol_stateid *stp)
  676. {
  677. unsigned char mask = 1 << access;
  678. WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
  679. stp->st_access_bmap |= mask;
  680. }
  681. /* clear share access for a given stateid */
  682. static inline void
  683. clear_access(u32 access, struct nfs4_ol_stateid *stp)
  684. {
  685. unsigned char mask = 1 << access;
  686. WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
  687. stp->st_access_bmap &= ~mask;
  688. }
  689. /* test whether a given stateid has access */
  690. static inline bool
  691. test_access(u32 access, struct nfs4_ol_stateid *stp)
  692. {
  693. unsigned char mask = 1 << access;
  694. return (bool)(stp->st_access_bmap & mask);
  695. }
  696. /* set share deny for a given stateid */
  697. static inline void
  698. set_deny(u32 deny, struct nfs4_ol_stateid *stp)
  699. {
  700. unsigned char mask = 1 << deny;
  701. WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
  702. stp->st_deny_bmap |= mask;
  703. }
  704. /* clear share deny for a given stateid */
  705. static inline void
  706. clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
  707. {
  708. unsigned char mask = 1 << deny;
  709. WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
  710. stp->st_deny_bmap &= ~mask;
  711. }
  712. /* test whether a given stateid is denying specific access */
  713. static inline bool
  714. test_deny(u32 deny, struct nfs4_ol_stateid *stp)
  715. {
  716. unsigned char mask = 1 << deny;
  717. return (bool)(stp->st_deny_bmap & mask);
  718. }
  719. static int nfs4_access_to_omode(u32 access)
  720. {
  721. switch (access & NFS4_SHARE_ACCESS_BOTH) {
  722. case NFS4_SHARE_ACCESS_READ:
  723. return O_RDONLY;
  724. case NFS4_SHARE_ACCESS_WRITE:
  725. return O_WRONLY;
  726. case NFS4_SHARE_ACCESS_BOTH:
  727. return O_RDWR;
  728. }
  729. WARN_ON_ONCE(1);
  730. return O_RDONLY;
  731. }
  732. /*
  733. * A stateid that had a deny mode associated with it is being released
  734. * or downgraded. Recalculate the deny mode on the file.
  735. */
  736. static void
  737. recalculate_deny_mode(struct nfs4_file *fp)
  738. {
  739. struct nfs4_ol_stateid *stp;
  740. spin_lock(&fp->fi_lock);
  741. fp->fi_share_deny = 0;
  742. list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
  743. fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
  744. spin_unlock(&fp->fi_lock);
  745. }
  746. static void
  747. reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
  748. {
  749. int i;
  750. bool change = false;
  751. for (i = 1; i < 4; i++) {
  752. if ((i & deny) != i) {
  753. change = true;
  754. clear_deny(i, stp);
  755. }
  756. }
  757. /* Recalculate per-file deny mode if there was a change */
  758. if (change)
  759. recalculate_deny_mode(stp->st_stid.sc_file);
  760. }
  761. /* release all access and file references for a given stateid */
  762. static void
  763. release_all_access(struct nfs4_ol_stateid *stp)
  764. {
  765. int i;
  766. struct nfs4_file *fp = stp->st_stid.sc_file;
  767. if (fp && stp->st_deny_bmap != 0)
  768. recalculate_deny_mode(fp);
  769. for (i = 1; i < 4; i++) {
  770. if (test_access(i, stp))
  771. nfs4_file_put_access(stp->st_stid.sc_file, i);
  772. clear_access(i, stp);
  773. }
  774. }
  775. static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
  776. {
  777. struct nfs4_client *clp = sop->so_client;
  778. might_lock(&clp->cl_lock);
  779. if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
  780. return;
  781. sop->so_ops->so_unhash(sop);
  782. spin_unlock(&clp->cl_lock);
  783. kfree(sop->so_owner.data);
  784. sop->so_ops->so_free(sop);
  785. }
  786. static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
  787. {
  788. struct nfs4_file *fp = stp->st_stid.sc_file;
  789. lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
  790. spin_lock(&fp->fi_lock);
  791. list_del(&stp->st_perfile);
  792. spin_unlock(&fp->fi_lock);
  793. list_del(&stp->st_perstateowner);
  794. }
  795. static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
  796. {
  797. struct nfs4_ol_stateid *stp = openlockstateid(stid);
  798. release_all_access(stp);
  799. if (stp->st_stateowner)
  800. nfs4_put_stateowner(stp->st_stateowner);
  801. kmem_cache_free(stateid_slab, stid);
  802. }
  803. static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
  804. {
  805. struct nfs4_ol_stateid *stp = openlockstateid(stid);
  806. struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
  807. struct file *file;
  808. file = find_any_file(stp->st_stid.sc_file);
  809. if (file)
  810. filp_close(file, (fl_owner_t)lo);
  811. nfs4_free_ol_stateid(stid);
  812. }
  813. /*
  814. * Put the persistent reference to an already unhashed generic stateid, while
  815. * holding the cl_lock. If it's the last reference, then put it onto the
  816. * reaplist for later destruction.
  817. */
  818. static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
  819. struct list_head *reaplist)
  820. {
  821. struct nfs4_stid *s = &stp->st_stid;
  822. struct nfs4_client *clp = s->sc_client;
  823. lockdep_assert_held(&clp->cl_lock);
  824. WARN_ON_ONCE(!list_empty(&stp->st_locks));
  825. if (!atomic_dec_and_test(&s->sc_count)) {
  826. wake_up_all(&close_wq);
  827. return;
  828. }
  829. idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
  830. list_add(&stp->st_locks, reaplist);
  831. }
  832. static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
  833. {
  834. struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
  835. lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
  836. list_del_init(&stp->st_locks);
  837. unhash_ol_stateid(stp);
  838. unhash_stid(&stp->st_stid);
  839. }
  840. static void release_lock_stateid(struct nfs4_ol_stateid *stp)
  841. {
  842. struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
  843. spin_lock(&oo->oo_owner.so_client->cl_lock);
  844. unhash_lock_stateid(stp);
  845. spin_unlock(&oo->oo_owner.so_client->cl_lock);
  846. nfs4_put_stid(&stp->st_stid);
  847. }
  848. static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
  849. {
  850. struct nfs4_client *clp = lo->lo_owner.so_client;
  851. lockdep_assert_held(&clp->cl_lock);
  852. list_del_init(&lo->lo_owner.so_strhash);
  853. }
  854. /*
  855. * Free a list of generic stateids that were collected earlier after being
  856. * fully unhashed.
  857. */
  858. static void
  859. free_ol_stateid_reaplist(struct list_head *reaplist)
  860. {
  861. struct nfs4_ol_stateid *stp;
  862. struct nfs4_file *fp;
  863. might_sleep();
  864. while (!list_empty(reaplist)) {
  865. stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
  866. st_locks);
  867. list_del(&stp->st_locks);
  868. fp = stp->st_stid.sc_file;
  869. stp->st_stid.sc_free(&stp->st_stid);
  870. if (fp)
  871. put_nfs4_file(fp);
  872. }
  873. }
  874. static void release_lockowner(struct nfs4_lockowner *lo)
  875. {
  876. struct nfs4_client *clp = lo->lo_owner.so_client;
  877. struct nfs4_ol_stateid *stp;
  878. struct list_head reaplist;
  879. INIT_LIST_HEAD(&reaplist);
  880. spin_lock(&clp->cl_lock);
  881. unhash_lockowner_locked(lo);
  882. while (!list_empty(&lo->lo_owner.so_stateids)) {
  883. stp = list_first_entry(&lo->lo_owner.so_stateids,
  884. struct nfs4_ol_stateid, st_perstateowner);
  885. unhash_lock_stateid(stp);
  886. put_ol_stateid_locked(stp, &reaplist);
  887. }
  888. spin_unlock(&clp->cl_lock);
  889. free_ol_stateid_reaplist(&reaplist);
  890. nfs4_put_stateowner(&lo->lo_owner);
  891. }
  892. static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
  893. struct list_head *reaplist)
  894. {
  895. struct nfs4_ol_stateid *stp;
  896. while (!list_empty(&open_stp->st_locks)) {
  897. stp = list_entry(open_stp->st_locks.next,
  898. struct nfs4_ol_stateid, st_locks);
  899. unhash_lock_stateid(stp);
  900. put_ol_stateid_locked(stp, reaplist);
  901. }
  902. }
  903. static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
  904. struct list_head *reaplist)
  905. {
  906. lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
  907. unhash_ol_stateid(stp);
  908. release_open_stateid_locks(stp, reaplist);
  909. }
  910. static void release_open_stateid(struct nfs4_ol_stateid *stp)
  911. {
  912. LIST_HEAD(reaplist);
  913. spin_lock(&stp->st_stid.sc_client->cl_lock);
  914. unhash_open_stateid(stp, &reaplist);
  915. put_ol_stateid_locked(stp, &reaplist);
  916. spin_unlock(&stp->st_stid.sc_client->cl_lock);
  917. free_ol_stateid_reaplist(&reaplist);
  918. }
  919. static void unhash_openowner_locked(struct nfs4_openowner *oo)
  920. {
  921. struct nfs4_client *clp = oo->oo_owner.so_client;
  922. lockdep_assert_held(&clp->cl_lock);
  923. list_del_init(&oo->oo_owner.so_strhash);
  924. list_del_init(&oo->oo_perclient);
  925. }
  926. static void release_last_closed_stateid(struct nfs4_openowner *oo)
  927. {
  928. struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
  929. nfsd_net_id);
  930. struct nfs4_ol_stateid *s;
  931. spin_lock(&nn->client_lock);
  932. s = oo->oo_last_closed_stid;
  933. if (s) {
  934. list_del_init(&oo->oo_close_lru);
  935. oo->oo_last_closed_stid = NULL;
  936. }
  937. spin_unlock(&nn->client_lock);
  938. if (s)
  939. nfs4_put_stid(&s->st_stid);
  940. }
  941. static void release_openowner(struct nfs4_openowner *oo)
  942. {
  943. struct nfs4_ol_stateid *stp;
  944. struct nfs4_client *clp = oo->oo_owner.so_client;
  945. struct list_head reaplist;
  946. INIT_LIST_HEAD(&reaplist);
  947. spin_lock(&clp->cl_lock);
  948. unhash_openowner_locked(oo);
  949. while (!list_empty(&oo->oo_owner.so_stateids)) {
  950. stp = list_first_entry(&oo->oo_owner.so_stateids,
  951. struct nfs4_ol_stateid, st_perstateowner);
  952. unhash_open_stateid(stp, &reaplist);
  953. put_ol_stateid_locked(stp, &reaplist);
  954. }
  955. spin_unlock(&clp->cl_lock);
  956. free_ol_stateid_reaplist(&reaplist);
  957. release_last_closed_stateid(oo);
  958. nfs4_put_stateowner(&oo->oo_owner);
  959. }
  960. static inline int
  961. hash_sessionid(struct nfs4_sessionid *sessionid)
  962. {
  963. struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
  964. return sid->sequence % SESSION_HASH_SIZE;
  965. }
  966. #ifdef NFSD_DEBUG
  967. static inline void
  968. dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
  969. {
  970. u32 *ptr = (u32 *)(&sessionid->data[0]);
  971. dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
  972. }
  973. #else
  974. static inline void
  975. dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
  976. {
  977. }
  978. #endif
  979. /*
  980. * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
  981. * won't be used for replay.
  982. */
  983. void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
  984. {
  985. struct nfs4_stateowner *so = cstate->replay_owner;
  986. if (nfserr == nfserr_replay_me)
  987. return;
  988. if (!seqid_mutating_err(ntohl(nfserr))) {
  989. nfsd4_cstate_clear_replay(cstate);
  990. return;
  991. }
  992. if (!so)
  993. return;
  994. if (so->so_is_open_owner)
  995. release_last_closed_stateid(openowner(so));
  996. so->so_seqid++;
  997. return;
  998. }
  999. static void
  1000. gen_sessionid(struct nfsd4_session *ses)
  1001. {
  1002. struct nfs4_client *clp = ses->se_client;
  1003. struct nfsd4_sessionid *sid;
  1004. sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
  1005. sid->clientid = clp->cl_clientid;
  1006. sid->sequence = current_sessionid++;
  1007. sid->reserved = 0;
  1008. }
  1009. /*
  1010. * The protocol defines ca_maxresponssize_cached to include the size of
  1011. * the rpc header, but all we need to cache is the data starting after
  1012. * the end of the initial SEQUENCE operation--the rest we regenerate
  1013. * each time. Therefore we can advertise a ca_maxresponssize_cached
  1014. * value that is the number of bytes in our cache plus a few additional
  1015. * bytes. In order to stay on the safe side, and not promise more than
  1016. * we can cache, those additional bytes must be the minimum possible: 24
  1017. * bytes of rpc header (xid through accept state, with AUTH_NULL
  1018. * verifier), 12 for the compound header (with zero-length tag), and 44
  1019. * for the SEQUENCE op response:
  1020. */
  1021. #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
  1022. static void
  1023. free_session_slots(struct nfsd4_session *ses)
  1024. {
  1025. int i;
  1026. for (i = 0; i < ses->se_fchannel.maxreqs; i++)
  1027. kfree(ses->se_slots[i]);
  1028. }
  1029. /*
  1030. * We don't actually need to cache the rpc and session headers, so we
  1031. * can allocate a little less for each slot:
  1032. */
  1033. static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
  1034. {
  1035. u32 size;
  1036. if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
  1037. size = 0;
  1038. else
  1039. size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
  1040. return size + sizeof(struct nfsd4_slot);
  1041. }
  1042. /*
  1043. * XXX: If we run out of reserved DRC memory we could (up to a point)
  1044. * re-negotiate active sessions and reduce their slot usage to make
  1045. * room for new connections. For now we just fail the create session.
  1046. */
  1047. static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
  1048. {
  1049. u32 slotsize = slot_bytes(ca);
  1050. u32 num = ca->maxreqs;
  1051. int avail;
  1052. spin_lock(&nfsd_drc_lock);
  1053. avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
  1054. nfsd_drc_max_mem - nfsd_drc_mem_used);
  1055. num = min_t(int, num, avail / slotsize);
  1056. nfsd_drc_mem_used += num * slotsize;
  1057. spin_unlock(&nfsd_drc_lock);
  1058. return num;
  1059. }
  1060. static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
  1061. {
  1062. int slotsize = slot_bytes(ca);
  1063. spin_lock(&nfsd_drc_lock);
  1064. nfsd_drc_mem_used -= slotsize * ca->maxreqs;
  1065. spin_unlock(&nfsd_drc_lock);
  1066. }
  1067. static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
  1068. struct nfsd4_channel_attrs *battrs)
  1069. {
  1070. int numslots = fattrs->maxreqs;
  1071. int slotsize = slot_bytes(fattrs);
  1072. struct nfsd4_session *new;
  1073. int mem, i;
  1074. BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
  1075. + sizeof(struct nfsd4_session) > PAGE_SIZE);
  1076. mem = numslots * sizeof(struct nfsd4_slot *);
  1077. new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
  1078. if (!new)
  1079. return NULL;
  1080. /* allocate each struct nfsd4_slot and data cache in one piece */
  1081. for (i = 0; i < numslots; i++) {
  1082. new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
  1083. if (!new->se_slots[i])
  1084. goto out_free;
  1085. }
  1086. memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
  1087. memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
  1088. return new;
  1089. out_free:
  1090. while (i--)
  1091. kfree(new->se_slots[i]);
  1092. kfree(new);
  1093. return NULL;
  1094. }
  1095. static void free_conn(struct nfsd4_conn *c)
  1096. {
  1097. svc_xprt_put(c->cn_xprt);
  1098. kfree(c);
  1099. }
  1100. static void nfsd4_conn_lost(struct svc_xpt_user *u)
  1101. {
  1102. struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
  1103. struct nfs4_client *clp = c->cn_session->se_client;
  1104. spin_lock(&clp->cl_lock);
  1105. if (!list_empty(&c->cn_persession)) {
  1106. list_del(&c->cn_persession);
  1107. free_conn(c);
  1108. }
  1109. nfsd4_probe_callback(clp);
  1110. spin_unlock(&clp->cl_lock);
  1111. }
  1112. static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
  1113. {
  1114. struct nfsd4_conn *conn;
  1115. conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
  1116. if (!conn)
  1117. return NULL;
  1118. svc_xprt_get(rqstp->rq_xprt);
  1119. conn->cn_xprt = rqstp->rq_xprt;
  1120. conn->cn_flags = flags;
  1121. INIT_LIST_HEAD(&conn->cn_xpt_user.list);
  1122. return conn;
  1123. }
  1124. static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
  1125. {
  1126. conn->cn_session = ses;
  1127. list_add(&conn->cn_persession, &ses->se_conns);
  1128. }
  1129. static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
  1130. {
  1131. struct nfs4_client *clp = ses->se_client;
  1132. spin_lock(&clp->cl_lock);
  1133. __nfsd4_hash_conn(conn, ses);
  1134. spin_unlock(&clp->cl_lock);
  1135. }
  1136. static int nfsd4_register_conn(struct nfsd4_conn *conn)
  1137. {
  1138. conn->cn_xpt_user.callback = nfsd4_conn_lost;
  1139. return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
  1140. }
  1141. static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
  1142. {
  1143. int ret;
  1144. nfsd4_hash_conn(conn, ses);
  1145. ret = nfsd4_register_conn(conn);
  1146. if (ret)
  1147. /* oops; xprt is already down: */
  1148. nfsd4_conn_lost(&conn->cn_xpt_user);
  1149. /* We may have gained or lost a callback channel: */
  1150. nfsd4_probe_callback_sync(ses->se_client);
  1151. }
  1152. static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
  1153. {
  1154. u32 dir = NFS4_CDFC4_FORE;
  1155. if (cses->flags & SESSION4_BACK_CHAN)
  1156. dir |= NFS4_CDFC4_BACK;
  1157. return alloc_conn(rqstp, dir);
  1158. }
  1159. /* must be called under client_lock */
  1160. static void nfsd4_del_conns(struct nfsd4_session *s)
  1161. {
  1162. struct nfs4_client *clp = s->se_client;
  1163. struct nfsd4_conn *c;
  1164. spin_lock(&clp->cl_lock);
  1165. while (!list_empty(&s->se_conns)) {
  1166. c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
  1167. list_del_init(&c->cn_persession);
  1168. spin_unlock(&clp->cl_lock);
  1169. unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
  1170. free_conn(c);
  1171. spin_lock(&clp->cl_lock);
  1172. }
  1173. spin_unlock(&clp->cl_lock);
  1174. }
  1175. static void __free_session(struct nfsd4_session *ses)
  1176. {
  1177. free_session_slots(ses);
  1178. kfree(ses);
  1179. }
  1180. static void free_session(struct nfsd4_session *ses)
  1181. {
  1182. nfsd4_del_conns(ses);
  1183. nfsd4_put_drc_mem(&ses->se_fchannel);
  1184. __free_session(ses);
  1185. }
  1186. static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
  1187. {
  1188. int idx;
  1189. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  1190. new->se_client = clp;
  1191. gen_sessionid(new);
  1192. INIT_LIST_HEAD(&new->se_conns);
  1193. new->se_cb_seq_nr = 1;
  1194. new->se_flags = cses->flags;
  1195. new->se_cb_prog = cses->callback_prog;
  1196. new->se_cb_sec = cses->cb_sec;
  1197. atomic_set(&new->se_ref, 0);
  1198. idx = hash_sessionid(&new->se_sessionid);
  1199. list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
  1200. spin_lock(&clp->cl_lock);
  1201. list_add(&new->se_perclnt, &clp->cl_sessions);
  1202. spin_unlock(&clp->cl_lock);
  1203. if (cses->flags & SESSION4_BACK_CHAN) {
  1204. struct sockaddr *sa = svc_addr(rqstp);
  1205. /*
  1206. * This is a little silly; with sessions there's no real
  1207. * use for the callback address. Use the peer address
  1208. * as a reasonable default for now, but consider fixing
  1209. * the rpc client not to require an address in the
  1210. * future:
  1211. */
  1212. rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
  1213. clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
  1214. }
  1215. }
  1216. /* caller must hold client_lock */
  1217. static struct nfsd4_session *
  1218. __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
  1219. {
  1220. struct nfsd4_session *elem;
  1221. int idx;
  1222. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  1223. lockdep_assert_held(&nn->client_lock);
  1224. dump_sessionid(__func__, sessionid);
  1225. idx = hash_sessionid(sessionid);
  1226. /* Search in the appropriate list */
  1227. list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
  1228. if (!memcmp(elem->se_sessionid.data, sessionid->data,
  1229. NFS4_MAX_SESSIONID_LEN)) {
  1230. return elem;
  1231. }
  1232. }
  1233. dprintk("%s: session not found\n", __func__);
  1234. return NULL;
  1235. }
  1236. static struct nfsd4_session *
  1237. find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
  1238. __be32 *ret)
  1239. {
  1240. struct nfsd4_session *session;
  1241. __be32 status = nfserr_badsession;
  1242. session = __find_in_sessionid_hashtbl(sessionid, net);
  1243. if (!session)
  1244. goto out;
  1245. status = nfsd4_get_session_locked(session);
  1246. if (status)
  1247. session = NULL;
  1248. out:
  1249. *ret = status;
  1250. return session;
  1251. }
  1252. /* caller must hold client_lock */
  1253. static void
  1254. unhash_session(struct nfsd4_session *ses)
  1255. {
  1256. struct nfs4_client *clp = ses->se_client;
  1257. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1258. lockdep_assert_held(&nn->client_lock);
  1259. list_del(&ses->se_hash);
  1260. spin_lock(&ses->se_client->cl_lock);
  1261. list_del(&ses->se_perclnt);
  1262. spin_unlock(&ses->se_client->cl_lock);
  1263. }
  1264. /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
  1265. static int
  1266. STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
  1267. {
  1268. if (clid->cl_boot == nn->boot_time)
  1269. return 0;
  1270. dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
  1271. clid->cl_boot, clid->cl_id, nn->boot_time);
  1272. return 1;
  1273. }
  1274. /*
  1275. * XXX Should we use a slab cache ?
  1276. * This type of memory management is somewhat inefficient, but we use it
  1277. * anyway since SETCLIENTID is not a common operation.
  1278. */
  1279. static struct nfs4_client *alloc_client(struct xdr_netobj name)
  1280. {
  1281. struct nfs4_client *clp;
  1282. int i;
  1283. clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
  1284. if (clp == NULL)
  1285. return NULL;
  1286. clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
  1287. if (clp->cl_name.data == NULL)
  1288. goto err_no_name;
  1289. clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
  1290. OWNER_HASH_SIZE, GFP_KERNEL);
  1291. if (!clp->cl_ownerstr_hashtbl)
  1292. goto err_no_hashtbl;
  1293. for (i = 0; i < OWNER_HASH_SIZE; i++)
  1294. INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
  1295. clp->cl_name.len = name.len;
  1296. INIT_LIST_HEAD(&clp->cl_sessions);
  1297. idr_init(&clp->cl_stateids);
  1298. atomic_set(&clp->cl_refcount, 0);
  1299. clp->cl_cb_state = NFSD4_CB_UNKNOWN;
  1300. INIT_LIST_HEAD(&clp->cl_idhash);
  1301. INIT_LIST_HEAD(&clp->cl_openowners);
  1302. INIT_LIST_HEAD(&clp->cl_delegations);
  1303. INIT_LIST_HEAD(&clp->cl_lru);
  1304. INIT_LIST_HEAD(&clp->cl_callbacks);
  1305. INIT_LIST_HEAD(&clp->cl_revoked);
  1306. spin_lock_init(&clp->cl_lock);
  1307. rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
  1308. return clp;
  1309. err_no_hashtbl:
  1310. kfree(clp->cl_name.data);
  1311. err_no_name:
  1312. kfree(clp);
  1313. return NULL;
  1314. }
  1315. static void
  1316. free_client(struct nfs4_client *clp)
  1317. {
  1318. while (!list_empty(&clp->cl_sessions)) {
  1319. struct nfsd4_session *ses;
  1320. ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
  1321. se_perclnt);
  1322. list_del(&ses->se_perclnt);
  1323. WARN_ON_ONCE(atomic_read(&ses->se_ref));
  1324. free_session(ses);
  1325. }
  1326. rpc_destroy_wait_queue(&clp->cl_cb_waitq);
  1327. free_svc_cred(&clp->cl_cred);
  1328. kfree(clp->cl_ownerstr_hashtbl);
  1329. kfree(clp->cl_name.data);
  1330. idr_destroy(&clp->cl_stateids);
  1331. kfree(clp);
  1332. }
  1333. /* must be called under the client_lock */
  1334. static void
  1335. unhash_client_locked(struct nfs4_client *clp)
  1336. {
  1337. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1338. struct nfsd4_session *ses;
  1339. lockdep_assert_held(&nn->client_lock);
  1340. /* Mark the client as expired! */
  1341. clp->cl_time = 0;
  1342. /* Make it invisible */
  1343. if (!list_empty(&clp->cl_idhash)) {
  1344. list_del_init(&clp->cl_idhash);
  1345. if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
  1346. rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
  1347. else
  1348. rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
  1349. }
  1350. list_del_init(&clp->cl_lru);
  1351. spin_lock(&clp->cl_lock);
  1352. list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
  1353. list_del_init(&ses->se_hash);
  1354. spin_unlock(&clp->cl_lock);
  1355. }
  1356. static void
  1357. unhash_client(struct nfs4_client *clp)
  1358. {
  1359. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1360. spin_lock(&nn->client_lock);
  1361. unhash_client_locked(clp);
  1362. spin_unlock(&nn->client_lock);
  1363. }
  1364. static __be32 mark_client_expired_locked(struct nfs4_client *clp)
  1365. {
  1366. if (atomic_read(&clp->cl_refcount))
  1367. return nfserr_jukebox;
  1368. unhash_client_locked(clp);
  1369. return nfs_ok;
  1370. }
  1371. static void
  1372. __destroy_client(struct nfs4_client *clp)
  1373. {
  1374. struct nfs4_openowner *oo;
  1375. struct nfs4_delegation *dp;
  1376. struct list_head reaplist;
  1377. INIT_LIST_HEAD(&reaplist);
  1378. spin_lock(&state_lock);
  1379. while (!list_empty(&clp->cl_delegations)) {
  1380. dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
  1381. unhash_delegation_locked(dp);
  1382. list_add(&dp->dl_recall_lru, &reaplist);
  1383. }
  1384. spin_unlock(&state_lock);
  1385. while (!list_empty(&reaplist)) {
  1386. dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
  1387. list_del_init(&dp->dl_recall_lru);
  1388. nfs4_put_deleg_lease(dp->dl_stid.sc_file);
  1389. nfs4_put_stid(&dp->dl_stid);
  1390. }
  1391. while (!list_empty(&clp->cl_revoked)) {
  1392. dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
  1393. list_del_init(&dp->dl_recall_lru);
  1394. nfs4_put_stid(&dp->dl_stid);
  1395. }
  1396. while (!list_empty(&clp->cl_openowners)) {
  1397. oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
  1398. nfs4_get_stateowner(&oo->oo_owner);
  1399. release_openowner(oo);
  1400. }
  1401. nfsd4_shutdown_callback(clp);
  1402. if (clp->cl_cb_conn.cb_xprt)
  1403. svc_xprt_put(clp->cl_cb_conn.cb_xprt);
  1404. free_client(clp);
  1405. }
  1406. static void
  1407. destroy_client(struct nfs4_client *clp)
  1408. {
  1409. unhash_client(clp);
  1410. __destroy_client(clp);
  1411. }
  1412. static void expire_client(struct nfs4_client *clp)
  1413. {
  1414. unhash_client(clp);
  1415. nfsd4_client_record_remove(clp);
  1416. __destroy_client(clp);
  1417. }
  1418. static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
  1419. {
  1420. memcpy(target->cl_verifier.data, source->data,
  1421. sizeof(target->cl_verifier.data));
  1422. }
  1423. static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
  1424. {
  1425. target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
  1426. target->cl_clientid.cl_id = source->cl_clientid.cl_id;
  1427. }
  1428. static int copy_cred(struct svc_cred *target, struct svc_cred *source)
  1429. {
  1430. if (source->cr_principal) {
  1431. target->cr_principal =
  1432. kstrdup(source->cr_principal, GFP_KERNEL);
  1433. if (target->cr_principal == NULL)
  1434. return -ENOMEM;
  1435. } else
  1436. target->cr_principal = NULL;
  1437. target->cr_flavor = source->cr_flavor;
  1438. target->cr_uid = source->cr_uid;
  1439. target->cr_gid = source->cr_gid;
  1440. target->cr_group_info = source->cr_group_info;
  1441. get_group_info(target->cr_group_info);
  1442. target->cr_gss_mech = source->cr_gss_mech;
  1443. if (source->cr_gss_mech)
  1444. gss_mech_get(source->cr_gss_mech);
  1445. return 0;
  1446. }
  1447. static long long
  1448. compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
  1449. {
  1450. long long res;
  1451. res = o1->len - o2->len;
  1452. if (res)
  1453. return res;
  1454. return (long long)memcmp(o1->data, o2->data, o1->len);
  1455. }
  1456. static int same_name(const char *n1, const char *n2)
  1457. {
  1458. return 0 == memcmp(n1, n2, HEXDIR_LEN);
  1459. }
  1460. static int
  1461. same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
  1462. {
  1463. return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
  1464. }
  1465. static int
  1466. same_clid(clientid_t *cl1, clientid_t *cl2)
  1467. {
  1468. return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
  1469. }
  1470. static bool groups_equal(struct group_info *g1, struct group_info *g2)
  1471. {
  1472. int i;
  1473. if (g1->ngroups != g2->ngroups)
  1474. return false;
  1475. for (i=0; i<g1->ngroups; i++)
  1476. if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
  1477. return false;
  1478. return true;
  1479. }
  1480. /*
  1481. * RFC 3530 language requires clid_inuse be returned when the
  1482. * "principal" associated with a requests differs from that previously
  1483. * used. We use uid, gid's, and gss principal string as our best
  1484. * approximation. We also don't want to allow non-gss use of a client
  1485. * established using gss: in theory cr_principal should catch that
  1486. * change, but in practice cr_principal can be null even in the gss case
  1487. * since gssd doesn't always pass down a principal string.
  1488. */
  1489. static bool is_gss_cred(struct svc_cred *cr)
  1490. {
  1491. /* Is cr_flavor one of the gss "pseudoflavors"?: */
  1492. return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
  1493. }
  1494. static bool
  1495. same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
  1496. {
  1497. if ((is_gss_cred(cr1) != is_gss_cred(cr2))
  1498. || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
  1499. || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
  1500. || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
  1501. return false;
  1502. if (cr1->cr_principal == cr2->cr_principal)
  1503. return true;
  1504. if (!cr1->cr_principal || !cr2->cr_principal)
  1505. return false;
  1506. return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
  1507. }
  1508. static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
  1509. {
  1510. struct svc_cred *cr = &rqstp->rq_cred;
  1511. u32 service;
  1512. if (!cr->cr_gss_mech)
  1513. return false;
  1514. service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
  1515. return service == RPC_GSS_SVC_INTEGRITY ||
  1516. service == RPC_GSS_SVC_PRIVACY;
  1517. }
  1518. static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
  1519. {
  1520. struct svc_cred *cr = &rqstp->rq_cred;
  1521. if (!cl->cl_mach_cred)
  1522. return true;
  1523. if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
  1524. return false;
  1525. if (!svc_rqst_integrity_protected(rqstp))
  1526. return false;
  1527. if (!cr->cr_principal)
  1528. return false;
  1529. return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
  1530. }
  1531. static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
  1532. {
  1533. __be32 verf[2];
  1534. /*
  1535. * This is opaque to client, so no need to byte-swap. Use
  1536. * __force to keep sparse happy
  1537. */
  1538. verf[0] = (__force __be32)get_seconds();
  1539. verf[1] = (__force __be32)nn->clientid_counter;
  1540. memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
  1541. }
  1542. static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
  1543. {
  1544. clp->cl_clientid.cl_boot = nn->boot_time;
  1545. clp->cl_clientid.cl_id = nn->clientid_counter++;
  1546. gen_confirm(clp, nn);
  1547. }
  1548. static struct nfs4_stid *
  1549. find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
  1550. {
  1551. struct nfs4_stid *ret;
  1552. ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
  1553. if (!ret || !ret->sc_type)
  1554. return NULL;
  1555. return ret;
  1556. }
  1557. static struct nfs4_stid *
  1558. find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
  1559. {
  1560. struct nfs4_stid *s;
  1561. spin_lock(&cl->cl_lock);
  1562. s = find_stateid_locked(cl, t);
  1563. if (s != NULL) {
  1564. if (typemask & s->sc_type)
  1565. atomic_inc(&s->sc_count);
  1566. else
  1567. s = NULL;
  1568. }
  1569. spin_unlock(&cl->cl_lock);
  1570. return s;
  1571. }
  1572. static struct nfs4_client *create_client(struct xdr_netobj name,
  1573. struct svc_rqst *rqstp, nfs4_verifier *verf)
  1574. {
  1575. struct nfs4_client *clp;
  1576. struct sockaddr *sa = svc_addr(rqstp);
  1577. int ret;
  1578. struct net *net = SVC_NET(rqstp);
  1579. clp = alloc_client(name);
  1580. if (clp == NULL)
  1581. return NULL;
  1582. ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
  1583. if (ret) {
  1584. free_client(clp);
  1585. return NULL;
  1586. }
  1587. nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
  1588. clp->cl_time = get_seconds();
  1589. clear_bit(0, &clp->cl_cb_slot_busy);
  1590. copy_verf(clp, verf);
  1591. rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
  1592. clp->cl_cb_session = NULL;
  1593. clp->net = net;
  1594. return clp;
  1595. }
  1596. static void
  1597. add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
  1598. {
  1599. struct rb_node **new = &(root->rb_node), *parent = NULL;
  1600. struct nfs4_client *clp;
  1601. while (*new) {
  1602. clp = rb_entry(*new, struct nfs4_client, cl_namenode);
  1603. parent = *new;
  1604. if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
  1605. new = &((*new)->rb_left);
  1606. else
  1607. new = &((*new)->rb_right);
  1608. }
  1609. rb_link_node(&new_clp->cl_namenode, parent, new);
  1610. rb_insert_color(&new_clp->cl_namenode, root);
  1611. }
  1612. static struct nfs4_client *
  1613. find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
  1614. {
  1615. long long cmp;
  1616. struct rb_node *node = root->rb_node;
  1617. struct nfs4_client *clp;
  1618. while (node) {
  1619. clp = rb_entry(node, struct nfs4_client, cl_namenode);
  1620. cmp = compare_blob(&clp->cl_name, name);
  1621. if (cmp > 0)
  1622. node = node->rb_left;
  1623. else if (cmp < 0)
  1624. node = node->rb_right;
  1625. else
  1626. return clp;
  1627. }
  1628. return NULL;
  1629. }
  1630. static void
  1631. add_to_unconfirmed(struct nfs4_client *clp)
  1632. {
  1633. unsigned int idhashval;
  1634. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1635. lockdep_assert_held(&nn->client_lock);
  1636. clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
  1637. add_clp_to_name_tree(clp, &nn->unconf_name_tree);
  1638. idhashval = clientid_hashval(clp->cl_clientid.cl_id);
  1639. list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
  1640. renew_client_locked(clp);
  1641. }
  1642. static void
  1643. move_to_confirmed(struct nfs4_client *clp)
  1644. {
  1645. unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
  1646. struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
  1647. lockdep_assert_held(&nn->client_lock);
  1648. dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
  1649. list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
  1650. rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
  1651. add_clp_to_name_tree(clp, &nn->conf_name_tree);
  1652. set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
  1653. renew_client_locked(clp);
  1654. }
  1655. static struct nfs4_client *
  1656. find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
  1657. {
  1658. struct nfs4_client *clp;
  1659. unsigned int idhashval = clientid_hashval(clid->cl_id);
  1660. list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
  1661. if (same_clid(&clp->cl_clientid, clid)) {
  1662. if ((bool)clp->cl_minorversion != sessions)
  1663. return NULL;
  1664. renew_client_locked(clp);
  1665. return clp;
  1666. }
  1667. }
  1668. return NULL;
  1669. }
  1670. static struct nfs4_client *
  1671. find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
  1672. {
  1673. struct list_head *tbl = nn->conf_id_hashtbl;
  1674. lockdep_assert_held(&nn->client_lock);
  1675. return find_client_in_id_table(tbl, clid, sessions);
  1676. }
  1677. static struct nfs4_client *
  1678. find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
  1679. {
  1680. struct list_head *tbl = nn->unconf_id_hashtbl;
  1681. lockdep_assert_held(&nn->client_lock);
  1682. return find_client_in_id_table(tbl, clid, sessions);
  1683. }
  1684. static bool clp_used_exchangeid(struct nfs4_client *clp)
  1685. {
  1686. return clp->cl_exchange_flags != 0;
  1687. }
  1688. static struct nfs4_client *
  1689. find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
  1690. {
  1691. lockdep_assert_held(&nn->client_lock);
  1692. return find_clp_in_name_tree(name, &nn->conf_name_tree);
  1693. }
  1694. static struct nfs4_client *
  1695. find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
  1696. {
  1697. lockdep_assert_held(&nn->client_lock);
  1698. return find_clp_in_name_tree(name, &nn->unconf_name_tree);
  1699. }
  1700. static void
  1701. gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
  1702. {
  1703. struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
  1704. struct sockaddr *sa = svc_addr(rqstp);
  1705. u32 scopeid = rpc_get_scope_id(sa);
  1706. unsigned short expected_family;
  1707. /* Currently, we only support tcp and tcp6 for the callback channel */
  1708. if (se->se_callback_netid_len == 3 &&
  1709. !memcmp(se->se_callback_netid_val, "tcp", 3))
  1710. expected_family = AF_INET;
  1711. else if (se->se_callback_netid_len == 4 &&
  1712. !memcmp(se->se_callback_netid_val, "tcp6", 4))
  1713. expected_family = AF_INET6;
  1714. else
  1715. goto out_err;
  1716. conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
  1717. se->se_callback_addr_len,
  1718. (struct sockaddr *)&conn->cb_addr,
  1719. sizeof(conn->cb_addr));
  1720. if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
  1721. goto out_err;
  1722. if (conn->cb_addr.ss_family == AF_INET6)
  1723. ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
  1724. conn->cb_prog = se->se_callback_prog;
  1725. conn->cb_ident = se->se_callback_ident;
  1726. memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
  1727. return;
  1728. out_err:
  1729. conn->cb_addr.ss_family = AF_UNSPEC;
  1730. conn->cb_addrlen = 0;
  1731. dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
  1732. "will not receive delegations\n",
  1733. clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
  1734. return;
  1735. }
  1736. /*
  1737. * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
  1738. */
  1739. static void
  1740. nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
  1741. {
  1742. struct xdr_buf *buf = resp->xdr.buf;
  1743. struct nfsd4_slot *slot = resp->cstate.slot;
  1744. unsigned int base;
  1745. dprintk("--> %s slot %p\n", __func__, slot);
  1746. slot->sl_opcnt = resp->opcnt;
  1747. slot->sl_status = resp->cstate.status;
  1748. slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
  1749. if (nfsd4_not_cached(resp)) {
  1750. slot->sl_datalen = 0;
  1751. return;
  1752. }
  1753. base = resp->cstate.data_offset;
  1754. slot->sl_datalen = buf->len - base;
  1755. if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
  1756. WARN("%s: sessions DRC could not cache compound\n", __func__);
  1757. return;
  1758. }
  1759. /*
  1760. * Encode the replay sequence operation from the slot values.
  1761. * If cachethis is FALSE encode the uncached rep error on the next
  1762. * operation which sets resp->p and increments resp->opcnt for
  1763. * nfs4svc_encode_compoundres.
  1764. *
  1765. */
  1766. static __be32
  1767. nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
  1768. struct nfsd4_compoundres *resp)
  1769. {
  1770. struct nfsd4_op *op;
  1771. struct nfsd4_slot *slot = resp->cstate.slot;
  1772. /* Encode the replayed sequence operation */
  1773. op = &args->ops[resp->opcnt - 1];
  1774. nfsd4_encode_operation(resp, op);
  1775. /* Return nfserr_retry_uncached_rep in next operation. */
  1776. if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
  1777. op = &args->ops[resp->opcnt++];
  1778. op->status = nfserr_retry_uncached_rep;
  1779. nfsd4_encode_operation(resp, op);
  1780. }
  1781. return op->status;
  1782. }
  1783. /*
  1784. * The sequence operation is not cached because we can use the slot and
  1785. * session values.
  1786. */
  1787. static __be32
  1788. nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
  1789. struct nfsd4_sequence *seq)
  1790. {
  1791. struct nfsd4_slot *slot = resp->cstate.slot;
  1792. struct xdr_stream *xdr = &resp->xdr;
  1793. __be32 *p;
  1794. __be32 status;
  1795. dprintk("--> %s slot %p\n", __func__, slot);
  1796. status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
  1797. if (status)
  1798. return status;
  1799. p = xdr_reserve_space(xdr, slot->sl_datalen);
  1800. if (!p) {
  1801. WARN_ON_ONCE(1);
  1802. return nfserr_serverfault;
  1803. }
  1804. xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
  1805. xdr_commit_encode(xdr);
  1806. resp->opcnt = slot->sl_opcnt;
  1807. return slot->sl_status;
  1808. }
  1809. /*
  1810. * Set the exchange_id flags returned by the server.
  1811. */
  1812. static void
  1813. nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
  1814. {
  1815. /* pNFS is not supported */
  1816. new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
  1817. /* Referrals are supported, Migration is not. */
  1818. new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
  1819. /* set the wire flags to return to client. */
  1820. clid->flags = new->cl_exchange_flags;
  1821. }
  1822. static bool client_has_state(struct nfs4_client *clp)
  1823. {
  1824. /*
  1825. * Note clp->cl_openowners check isn't quite right: there's no
  1826. * need to count owners without stateid's.
  1827. *
  1828. * Also note we should probably be using this in 4.0 case too.
  1829. */
  1830. return !list_empty(&clp->cl_openowners)
  1831. || !list_empty(&clp->cl_delegations)
  1832. || !list_empty(&clp->cl_sessions);
  1833. }
  1834. __be32
  1835. nfsd4_exchange_id(struct svc_rqst *rqstp,
  1836. struct nfsd4_compound_state *cstate,
  1837. struct nfsd4_exchange_id *exid)
  1838. {
  1839. struct nfs4_client *conf, *new;
  1840. struct nfs4_client *unconf = NULL;
  1841. __be32 status;
  1842. char addr_str[INET6_ADDRSTRLEN];
  1843. nfs4_verifier verf = exid->verifier;
  1844. struct sockaddr *sa = svc_addr(rqstp);
  1845. bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
  1846. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  1847. rpc_ntop(sa, addr_str, sizeof(addr_str));
  1848. dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
  1849. "ip_addr=%s flags %x, spa_how %d\n",
  1850. __func__, rqstp, exid, exid->clname.len, exid->clname.data,
  1851. addr_str, exid->flags, exid->spa_how);
  1852. if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
  1853. return nfserr_inval;
  1854. switch (exid->spa_how) {
  1855. case SP4_MACH_CRED:
  1856. if (!svc_rqst_integrity_protected(rqstp))
  1857. return nfserr_inval;
  1858. case SP4_NONE:
  1859. break;
  1860. default: /* checked by xdr code */
  1861. WARN_ON_ONCE(1);
  1862. case SP4_SSV:
  1863. return nfserr_encr_alg_unsupp;
  1864. }
  1865. new = create_client(exid->clname, rqstp, &verf);
  1866. if (new == NULL)
  1867. return nfserr_jukebox;
  1868. /* Cases below refer to rfc 5661 section 18.35.4: */
  1869. spin_lock(&nn->client_lock);
  1870. conf = find_confirmed_client_by_name(&exid->clname, nn);
  1871. if (conf) {
  1872. bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
  1873. bool verfs_match = same_verf(&verf, &conf->cl_verifier);
  1874. if (update) {
  1875. if (!clp_used_exchangeid(conf)) { /* buggy client */
  1876. status = nfserr_inval;
  1877. goto out;
  1878. }
  1879. if (!mach_creds_match(conf, rqstp)) {
  1880. status = nfserr_wrong_cred;
  1881. goto out;
  1882. }
  1883. if (!creds_match) { /* case 9 */
  1884. status = nfserr_perm;
  1885. goto out;
  1886. }
  1887. if (!verfs_match) { /* case 8 */
  1888. status = nfserr_not_same;
  1889. goto out;
  1890. }
  1891. /* case 6 */
  1892. exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
  1893. goto out_copy;
  1894. }
  1895. if (!creds_match) { /* case 3 */
  1896. if (client_has_state(conf)) {
  1897. status = nfserr_clid_inuse;
  1898. goto out;
  1899. }
  1900. goto out_new;
  1901. }
  1902. if (verfs_match) { /* case 2 */
  1903. conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
  1904. goto out_copy;
  1905. }
  1906. /* case 5, client reboot */
  1907. conf = NULL;
  1908. goto out_new;
  1909. }
  1910. if (update) { /* case 7 */
  1911. status = nfserr_noent;
  1912. goto out;
  1913. }
  1914. unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
  1915. if (unconf) /* case 4, possible retry or client restart */
  1916. unhash_client_locked(unconf);
  1917. /* case 1 (normal case) */
  1918. out_new:
  1919. if (conf) {
  1920. status = mark_client_expired_locked(conf);
  1921. if (status)
  1922. goto out;
  1923. }
  1924. new->cl_minorversion = cstate->minorversion;
  1925. new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
  1926. gen_clid(new, nn);
  1927. add_to_unconfirmed(new);
  1928. swap(new, conf);
  1929. out_copy:
  1930. exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
  1931. exid->clientid.cl_id = conf->cl_clientid.cl_id;
  1932. exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
  1933. nfsd4_set_ex_flags(conf, exid);
  1934. dprintk("nfsd4_exchange_id seqid %d flags %x\n",
  1935. conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
  1936. status = nfs_ok;
  1937. out:
  1938. spin_unlock(&nn->client_lock);
  1939. if (new)
  1940. expire_client(new);
  1941. if (unconf)
  1942. expire_client(unconf);
  1943. return status;
  1944. }
  1945. static __be32
  1946. check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
  1947. {
  1948. dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
  1949. slot_seqid);
  1950. /* The slot is in use, and no response has been sent. */
  1951. if (slot_inuse) {
  1952. if (seqid == slot_seqid)
  1953. return nfserr_jukebox;
  1954. else
  1955. return nfserr_seq_misordered;
  1956. }
  1957. /* Note unsigned 32-bit arithmetic handles wraparound: */
  1958. if (likely(seqid == slot_seqid + 1))
  1959. return nfs_ok;
  1960. if (seqid == slot_seqid)
  1961. return nfserr_replay_cache;
  1962. return nfserr_seq_misordered;
  1963. }
  1964. /*
  1965. * Cache the create session result into the create session single DRC
  1966. * slot cache by saving the xdr structure. sl_seqid has been set.
  1967. * Do this for solo or embedded create session operations.
  1968. */
  1969. static void
  1970. nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
  1971. struct nfsd4_clid_slot *slot, __be32 nfserr)
  1972. {
  1973. slot->sl_status = nfserr;
  1974. memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
  1975. }
  1976. static __be32
  1977. nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
  1978. struct nfsd4_clid_slot *slot)
  1979. {
  1980. memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
  1981. return slot->sl_status;
  1982. }
  1983. #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
  1984. 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
  1985. 1 + /* MIN tag is length with zero, only length */ \
  1986. 3 + /* version, opcount, opcode */ \
  1987. XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
  1988. /* seqid, slotID, slotID, cache */ \
  1989. 4 ) * sizeof(__be32))
  1990. #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
  1991. 2 + /* verifier: AUTH_NULL, length 0 */\
  1992. 1 + /* status */ \
  1993. 1 + /* MIN tag is length with zero, only length */ \
  1994. 3 + /* opcount, opcode, opstatus*/ \
  1995. XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
  1996. /* seqid, slotID, slotID, slotID, status */ \
  1997. 5 ) * sizeof(__be32))
  1998. static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
  1999. {
  2000. u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
  2001. if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
  2002. return nfserr_toosmall;
  2003. if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
  2004. return nfserr_toosmall;
  2005. ca->headerpadsz = 0;
  2006. ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
  2007. ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
  2008. ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
  2009. ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
  2010. NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
  2011. ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
  2012. /*
  2013. * Note decreasing slot size below client's request may make it
  2014. * difficult for client to function correctly, whereas
  2015. * decreasing the number of slots will (just?) affect
  2016. * performance. When short on memory we therefore prefer to
  2017. * decrease number of slots instead of their size. Clients that
  2018. * request larger slots than they need will get poor results:
  2019. */
  2020. ca->maxreqs = nfsd4_get_drc_mem(ca);
  2021. if (!ca->maxreqs)
  2022. return nfserr_jukebox;
  2023. return nfs_ok;
  2024. }
  2025. #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
  2026. RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
  2027. #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
  2028. RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
  2029. static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
  2030. {
  2031. ca->headerpadsz = 0;
  2032. /*
  2033. * These RPC_MAX_HEADER macros are overkill, especially since we
  2034. * don't even do gss on the backchannel yet. But this is still
  2035. * less than 1k. Tighten up this estimate in the unlikely event
  2036. * it turns out to be a problem for some client:
  2037. */
  2038. if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
  2039. return nfserr_toosmall;
  2040. if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
  2041. return nfserr_toosmall;
  2042. ca->maxresp_cached = 0;
  2043. if (ca->maxops < 2)
  2044. return nfserr_toosmall;
  2045. return nfs_ok;
  2046. }
  2047. static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
  2048. {
  2049. switch (cbs->flavor) {
  2050. case RPC_AUTH_NULL:
  2051. case RPC_AUTH_UNIX:
  2052. return nfs_ok;
  2053. default:
  2054. /*
  2055. * GSS case: the spec doesn't allow us to return this
  2056. * error. But it also doesn't allow us not to support
  2057. * GSS.
  2058. * I'd rather this fail hard than return some error the
  2059. * client might think it can already handle:
  2060. */
  2061. return nfserr_encr_alg_unsupp;
  2062. }
  2063. }
  2064. __be32
  2065. nfsd4_create_session(struct svc_rqst *rqstp,
  2066. struct nfsd4_compound_state *cstate,
  2067. struct nfsd4_create_session *cr_ses)
  2068. {
  2069. struct sockaddr *sa = svc_addr(rqstp);
  2070. struct nfs4_client *conf, *unconf;
  2071. struct nfs4_client *old = NULL;
  2072. struct nfsd4_session *new;
  2073. struct nfsd4_conn *conn;
  2074. struct nfsd4_clid_slot *cs_slot = NULL;
  2075. __be32 status = 0;
  2076. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2077. if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
  2078. return nfserr_inval;
  2079. status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
  2080. if (status)
  2081. return status;
  2082. status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
  2083. if (status)
  2084. return status;
  2085. status = check_backchannel_attrs(&cr_ses->back_channel);
  2086. if (status)
  2087. goto out_release_drc_mem;
  2088. status = nfserr_jukebox;
  2089. new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
  2090. if (!new)
  2091. goto out_release_drc_mem;
  2092. conn = alloc_conn_from_crses(rqstp, cr_ses);
  2093. if (!conn)
  2094. goto out_free_session;
  2095. spin_lock(&nn->client_lock);
  2096. unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
  2097. conf = find_confirmed_client(&cr_ses->clientid, true, nn);
  2098. WARN_ON_ONCE(conf && unconf);
  2099. if (conf) {
  2100. status = nfserr_wrong_cred;
  2101. if (!mach_creds_match(conf, rqstp))
  2102. goto out_free_conn;
  2103. cs_slot = &conf->cl_cs_slot;
  2104. status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
  2105. if (status == nfserr_replay_cache) {
  2106. status = nfsd4_replay_create_session(cr_ses, cs_slot);
  2107. goto out_free_conn;
  2108. } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
  2109. status = nfserr_seq_misordered;
  2110. goto out_free_conn;
  2111. }
  2112. } else if (unconf) {
  2113. if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
  2114. !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
  2115. status = nfserr_clid_inuse;
  2116. goto out_free_conn;
  2117. }
  2118. status = nfserr_wrong_cred;
  2119. if (!mach_creds_match(unconf, rqstp))
  2120. goto out_free_conn;
  2121. cs_slot = &unconf->cl_cs_slot;
  2122. status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
  2123. if (status) {
  2124. /* an unconfirmed replay returns misordered */
  2125. status = nfserr_seq_misordered;
  2126. goto out_free_conn;
  2127. }
  2128. old = find_confirmed_client_by_name(&unconf->cl_name, nn);
  2129. if (old) {
  2130. status = mark_client_expired_locked(old);
  2131. if (status) {
  2132. old = NULL;
  2133. goto out_free_conn;
  2134. }
  2135. }
  2136. move_to_confirmed(unconf);
  2137. conf = unconf;
  2138. } else {
  2139. status = nfserr_stale_clientid;
  2140. goto out_free_conn;
  2141. }
  2142. status = nfs_ok;
  2143. /*
  2144. * We do not support RDMA or persistent sessions
  2145. */
  2146. cr_ses->flags &= ~SESSION4_PERSIST;
  2147. cr_ses->flags &= ~SESSION4_RDMA;
  2148. init_session(rqstp, new, conf, cr_ses);
  2149. nfsd4_get_session_locked(new);
  2150. memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
  2151. NFS4_MAX_SESSIONID_LEN);
  2152. cs_slot->sl_seqid++;
  2153. cr_ses->seqid = cs_slot->sl_seqid;
  2154. /* cache solo and embedded create sessions under the client_lock */
  2155. nfsd4_cache_create_session(cr_ses, cs_slot, status);
  2156. spin_unlock(&nn->client_lock);
  2157. /* init connection and backchannel */
  2158. nfsd4_init_conn(rqstp, conn, new);
  2159. nfsd4_put_session(new);
  2160. if (old)
  2161. expire_client(old);
  2162. return status;
  2163. out_free_conn:
  2164. spin_unlock(&nn->client_lock);
  2165. free_conn(conn);
  2166. if (old)
  2167. expire_client(old);
  2168. out_free_session:
  2169. __free_session(new);
  2170. out_release_drc_mem:
  2171. nfsd4_put_drc_mem(&cr_ses->fore_channel);
  2172. return status;
  2173. }
  2174. static __be32 nfsd4_map_bcts_dir(u32 *dir)
  2175. {
  2176. switch (*dir) {
  2177. case NFS4_CDFC4_FORE:
  2178. case NFS4_CDFC4_BACK:
  2179. return nfs_ok;
  2180. case NFS4_CDFC4_FORE_OR_BOTH:
  2181. case NFS4_CDFC4_BACK_OR_BOTH:
  2182. *dir = NFS4_CDFC4_BOTH;
  2183. return nfs_ok;
  2184. };
  2185. return nfserr_inval;
  2186. }
  2187. __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
  2188. {
  2189. struct nfsd4_session *session = cstate->session;
  2190. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2191. __be32 status;
  2192. status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
  2193. if (status)
  2194. return status;
  2195. spin_lock(&nn->client_lock);
  2196. session->se_cb_prog = bc->bc_cb_program;
  2197. session->se_cb_sec = bc->bc_cb_sec;
  2198. spin_unlock(&nn->client_lock);
  2199. nfsd4_probe_callback(session->se_client);
  2200. return nfs_ok;
  2201. }
  2202. __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
  2203. struct nfsd4_compound_state *cstate,
  2204. struct nfsd4_bind_conn_to_session *bcts)
  2205. {
  2206. __be32 status;
  2207. struct nfsd4_conn *conn;
  2208. struct nfsd4_session *session;
  2209. struct net *net = SVC_NET(rqstp);
  2210. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  2211. if (!nfsd4_last_compound_op(rqstp))
  2212. return nfserr_not_only_op;
  2213. spin_lock(&nn->client_lock);
  2214. session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
  2215. spin_unlock(&nn->client_lock);
  2216. if (!session)
  2217. goto out_no_session;
  2218. status = nfserr_wrong_cred;
  2219. if (!mach_creds_match(session->se_client, rqstp))
  2220. goto out;
  2221. status = nfsd4_map_bcts_dir(&bcts->dir);
  2222. if (status)
  2223. goto out;
  2224. conn = alloc_conn(rqstp, bcts->dir);
  2225. status = nfserr_jukebox;
  2226. if (!conn)
  2227. goto out;
  2228. nfsd4_init_conn(rqstp, conn, session);
  2229. status = nfs_ok;
  2230. out:
  2231. nfsd4_put_session(session);
  2232. out_no_session:
  2233. return status;
  2234. }
  2235. static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
  2236. {
  2237. if (!session)
  2238. return 0;
  2239. return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
  2240. }
  2241. __be32
  2242. nfsd4_destroy_session(struct svc_rqst *r,
  2243. struct nfsd4_compound_state *cstate,
  2244. struct nfsd4_destroy_session *sessionid)
  2245. {
  2246. struct nfsd4_session *ses;
  2247. __be32 status;
  2248. int ref_held_by_me = 0;
  2249. struct net *net = SVC_NET(r);
  2250. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  2251. status = nfserr_not_only_op;
  2252. if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
  2253. if (!nfsd4_last_compound_op(r))
  2254. goto out;
  2255. ref_held_by_me++;
  2256. }
  2257. dump_sessionid(__func__, &sessionid->sessionid);
  2258. spin_lock(&nn->client_lock);
  2259. ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
  2260. if (!ses)
  2261. goto out_client_lock;
  2262. status = nfserr_wrong_cred;
  2263. if (!mach_creds_match(ses->se_client, r))
  2264. goto out_put_session;
  2265. status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
  2266. if (status)
  2267. goto out_put_session;
  2268. unhash_session(ses);
  2269. spin_unlock(&nn->client_lock);
  2270. nfsd4_probe_callback_sync(ses->se_client);
  2271. spin_lock(&nn->client_lock);
  2272. status = nfs_ok;
  2273. out_put_session:
  2274. nfsd4_put_session_locked(ses);
  2275. out_client_lock:
  2276. spin_unlock(&nn->client_lock);
  2277. out:
  2278. return status;
  2279. }
  2280. static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
  2281. {
  2282. struct nfsd4_conn *c;
  2283. list_for_each_entry(c, &s->se_conns, cn_persession) {
  2284. if (c->cn_xprt == xpt) {
  2285. return c;
  2286. }
  2287. }
  2288. return NULL;
  2289. }
  2290. static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
  2291. {
  2292. struct nfs4_client *clp = ses->se_client;
  2293. struct nfsd4_conn *c;
  2294. __be32 status = nfs_ok;
  2295. int ret;
  2296. spin_lock(&clp->cl_lock);
  2297. c = __nfsd4_find_conn(new->cn_xprt, ses);
  2298. if (c)
  2299. goto out_free;
  2300. status = nfserr_conn_not_bound_to_session;
  2301. if (clp->cl_mach_cred)
  2302. goto out_free;
  2303. __nfsd4_hash_conn(new, ses);
  2304. spin_unlock(&clp->cl_lock);
  2305. ret = nfsd4_register_conn(new);
  2306. if (ret)
  2307. /* oops; xprt is already down: */
  2308. nfsd4_conn_lost(&new->cn_xpt_user);
  2309. return nfs_ok;
  2310. out_free:
  2311. spin_unlock(&clp->cl_lock);
  2312. free_conn(new);
  2313. return status;
  2314. }
  2315. static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
  2316. {
  2317. struct nfsd4_compoundargs *args = rqstp->rq_argp;
  2318. return args->opcnt > session->se_fchannel.maxops;
  2319. }
  2320. static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
  2321. struct nfsd4_session *session)
  2322. {
  2323. struct xdr_buf *xb = &rqstp->rq_arg;
  2324. return xb->len > session->se_fchannel.maxreq_sz;
  2325. }
  2326. __be32
  2327. nfsd4_sequence(struct svc_rqst *rqstp,
  2328. struct nfsd4_compound_state *cstate,
  2329. struct nfsd4_sequence *seq)
  2330. {
  2331. struct nfsd4_compoundres *resp = rqstp->rq_resp;
  2332. struct xdr_stream *xdr = &resp->xdr;
  2333. struct nfsd4_session *session;
  2334. struct nfs4_client *clp;
  2335. struct nfsd4_slot *slot;
  2336. struct nfsd4_conn *conn;
  2337. __be32 status;
  2338. int buflen;
  2339. struct net *net = SVC_NET(rqstp);
  2340. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  2341. if (resp->opcnt != 1)
  2342. return nfserr_sequence_pos;
  2343. /*
  2344. * Will be either used or freed by nfsd4_sequence_check_conn
  2345. * below.
  2346. */
  2347. conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
  2348. if (!conn)
  2349. return nfserr_jukebox;
  2350. spin_lock(&nn->client_lock);
  2351. session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
  2352. if (!session)
  2353. goto out_no_session;
  2354. clp = session->se_client;
  2355. status = nfserr_too_many_ops;
  2356. if (nfsd4_session_too_many_ops(rqstp, session))
  2357. goto out_put_session;
  2358. status = nfserr_req_too_big;
  2359. if (nfsd4_request_too_big(rqstp, session))
  2360. goto out_put_session;
  2361. status = nfserr_badslot;
  2362. if (seq->slotid >= session->se_fchannel.maxreqs)
  2363. goto out_put_session;
  2364. slot = session->se_slots[seq->slotid];
  2365. dprintk("%s: slotid %d\n", __func__, seq->slotid);
  2366. /* We do not negotiate the number of slots yet, so set the
  2367. * maxslots to the session maxreqs which is used to encode
  2368. * sr_highest_slotid and the sr_target_slot id to maxslots */
  2369. seq->maxslots = session->se_fchannel.maxreqs;
  2370. status = check_slot_seqid(seq->seqid, slot->sl_seqid,
  2371. slot->sl_flags & NFSD4_SLOT_INUSE);
  2372. if (status == nfserr_replay_cache) {
  2373. status = nfserr_seq_misordered;
  2374. if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
  2375. goto out_put_session;
  2376. cstate->slot = slot;
  2377. cstate->session = session;
  2378. cstate->clp = clp;
  2379. /* Return the cached reply status and set cstate->status
  2380. * for nfsd4_proc_compound processing */
  2381. status = nfsd4_replay_cache_entry(resp, seq);
  2382. cstate->status = nfserr_replay_cache;
  2383. goto out;
  2384. }
  2385. if (status)
  2386. goto out_put_session;
  2387. status = nfsd4_sequence_check_conn(conn, session);
  2388. conn = NULL;
  2389. if (status)
  2390. goto out_put_session;
  2391. buflen = (seq->cachethis) ?
  2392. session->se_fchannel.maxresp_cached :
  2393. session->se_fchannel.maxresp_sz;
  2394. status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
  2395. nfserr_rep_too_big;
  2396. if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
  2397. goto out_put_session;
  2398. svc_reserve(rqstp, buflen);
  2399. status = nfs_ok;
  2400. /* Success! bump slot seqid */
  2401. slot->sl_seqid = seq->seqid;
  2402. slot->sl_flags |= NFSD4_SLOT_INUSE;
  2403. if (seq->cachethis)
  2404. slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
  2405. else
  2406. slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
  2407. cstate->slot = slot;
  2408. cstate->session = session;
  2409. cstate->clp = clp;
  2410. out:
  2411. switch (clp->cl_cb_state) {
  2412. case NFSD4_CB_DOWN:
  2413. seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
  2414. break;
  2415. case NFSD4_CB_FAULT:
  2416. seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
  2417. break;
  2418. default:
  2419. seq->status_flags = 0;
  2420. }
  2421. if (!list_empty(&clp->cl_revoked))
  2422. seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
  2423. out_no_session:
  2424. if (conn)
  2425. free_conn(conn);
  2426. spin_unlock(&nn->client_lock);
  2427. return status;
  2428. out_put_session:
  2429. nfsd4_put_session_locked(session);
  2430. goto out_no_session;
  2431. }
  2432. void
  2433. nfsd4_sequence_done(struct nfsd4_compoundres *resp)
  2434. {
  2435. struct nfsd4_compound_state *cs = &resp->cstate;
  2436. if (nfsd4_has_session(cs)) {
  2437. if (cs->status != nfserr_replay_cache) {
  2438. nfsd4_store_cache_entry(resp);
  2439. cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
  2440. }
  2441. /* Drop session reference that was taken in nfsd4_sequence() */
  2442. nfsd4_put_session(cs->session);
  2443. } else if (cs->clp)
  2444. put_client_renew(cs->clp);
  2445. }
  2446. __be32
  2447. nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
  2448. {
  2449. struct nfs4_client *conf, *unconf;
  2450. struct nfs4_client *clp = NULL;
  2451. __be32 status = 0;
  2452. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2453. spin_lock(&nn->client_lock);
  2454. unconf = find_unconfirmed_client(&dc->clientid, true, nn);
  2455. conf = find_confirmed_client(&dc->clientid, true, nn);
  2456. WARN_ON_ONCE(conf && unconf);
  2457. if (conf) {
  2458. if (client_has_state(conf)) {
  2459. status = nfserr_clientid_busy;
  2460. goto out;
  2461. }
  2462. status = mark_client_expired_locked(conf);
  2463. if (status)
  2464. goto out;
  2465. clp = conf;
  2466. } else if (unconf)
  2467. clp = unconf;
  2468. else {
  2469. status = nfserr_stale_clientid;
  2470. goto out;
  2471. }
  2472. if (!mach_creds_match(clp, rqstp)) {
  2473. clp = NULL;
  2474. status = nfserr_wrong_cred;
  2475. goto out;
  2476. }
  2477. unhash_client_locked(clp);
  2478. out:
  2479. spin_unlock(&nn->client_lock);
  2480. if (clp)
  2481. expire_client(clp);
  2482. return status;
  2483. }
  2484. __be32
  2485. nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
  2486. {
  2487. __be32 status = 0;
  2488. if (rc->rca_one_fs) {
  2489. if (!cstate->current_fh.fh_dentry)
  2490. return nfserr_nofilehandle;
  2491. /*
  2492. * We don't take advantage of the rca_one_fs case.
  2493. * That's OK, it's optional, we can safely ignore it.
  2494. */
  2495. return nfs_ok;
  2496. }
  2497. status = nfserr_complete_already;
  2498. if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
  2499. &cstate->session->se_client->cl_flags))
  2500. goto out;
  2501. status = nfserr_stale_clientid;
  2502. if (is_client_expired(cstate->session->se_client))
  2503. /*
  2504. * The following error isn't really legal.
  2505. * But we only get here if the client just explicitly
  2506. * destroyed the client. Surely it no longer cares what
  2507. * error it gets back on an operation for the dead
  2508. * client.
  2509. */
  2510. goto out;
  2511. status = nfs_ok;
  2512. nfsd4_client_record_create(cstate->session->se_client);
  2513. out:
  2514. return status;
  2515. }
  2516. __be32
  2517. nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  2518. struct nfsd4_setclientid *setclid)
  2519. {
  2520. struct xdr_netobj clname = setclid->se_name;
  2521. nfs4_verifier clverifier = setclid->se_verf;
  2522. struct nfs4_client *conf, *new;
  2523. struct nfs4_client *unconf = NULL;
  2524. __be32 status;
  2525. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2526. new = create_client(clname, rqstp, &clverifier);
  2527. if (new == NULL)
  2528. return nfserr_jukebox;
  2529. /* Cases below refer to rfc 3530 section 14.2.33: */
  2530. spin_lock(&nn->client_lock);
  2531. conf = find_confirmed_client_by_name(&clname, nn);
  2532. if (conf) {
  2533. /* case 0: */
  2534. status = nfserr_clid_inuse;
  2535. if (clp_used_exchangeid(conf))
  2536. goto out;
  2537. if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
  2538. char addr_str[INET6_ADDRSTRLEN];
  2539. rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
  2540. sizeof(addr_str));
  2541. dprintk("NFSD: setclientid: string in use by client "
  2542. "at %s\n", addr_str);
  2543. goto out;
  2544. }
  2545. }
  2546. unconf = find_unconfirmed_client_by_name(&clname, nn);
  2547. if (unconf)
  2548. unhash_client_locked(unconf);
  2549. if (conf && same_verf(&conf->cl_verifier, &clverifier))
  2550. /* case 1: probable callback update */
  2551. copy_clid(new, conf);
  2552. else /* case 4 (new client) or cases 2, 3 (client reboot): */
  2553. gen_clid(new, nn);
  2554. new->cl_minorversion = 0;
  2555. gen_callback(new, setclid, rqstp);
  2556. add_to_unconfirmed(new);
  2557. setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
  2558. setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
  2559. memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
  2560. new = NULL;
  2561. status = nfs_ok;
  2562. out:
  2563. spin_unlock(&nn->client_lock);
  2564. if (new)
  2565. free_client(new);
  2566. if (unconf)
  2567. expire_client(unconf);
  2568. return status;
  2569. }
  2570. __be32
  2571. nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
  2572. struct nfsd4_compound_state *cstate,
  2573. struct nfsd4_setclientid_confirm *setclientid_confirm)
  2574. {
  2575. struct nfs4_client *conf, *unconf;
  2576. struct nfs4_client *old = NULL;
  2577. nfs4_verifier confirm = setclientid_confirm->sc_confirm;
  2578. clientid_t * clid = &setclientid_confirm->sc_clientid;
  2579. __be32 status;
  2580. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  2581. if (STALE_CLIENTID(clid, nn))
  2582. return nfserr_stale_clientid;
  2583. spin_lock(&nn->client_lock);
  2584. conf = find_confirmed_client(clid, false, nn);
  2585. unconf = find_unconfirmed_client(clid, false, nn);
  2586. /*
  2587. * We try hard to give out unique clientid's, so if we get an
  2588. * attempt to confirm the same clientid with a different cred,
  2589. * there's a bug somewhere. Let's charitably assume it's our
  2590. * bug.
  2591. */
  2592. status = nfserr_serverfault;
  2593. if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
  2594. goto out;
  2595. if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
  2596. goto out;
  2597. /* cases below refer to rfc 3530 section 14.2.34: */
  2598. if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
  2599. if (conf && !unconf) /* case 2: probable retransmit */
  2600. status = nfs_ok;
  2601. else /* case 4: client hasn't noticed we rebooted yet? */
  2602. status = nfserr_stale_clientid;
  2603. goto out;
  2604. }
  2605. status = nfs_ok;
  2606. if (conf) { /* case 1: callback update */
  2607. old = unconf;
  2608. unhash_client_locked(old);
  2609. nfsd4_change_callback(conf, &unconf->cl_cb_conn);
  2610. } else { /* case 3: normal case; new or rebooted client */
  2611. old = find_confirmed_client_by_name(&unconf->cl_name, nn);
  2612. if (old) {
  2613. status = mark_client_expired_locked(old);
  2614. if (status) {
  2615. old = NULL;
  2616. goto out;
  2617. }
  2618. }
  2619. move_to_confirmed(unconf);
  2620. conf = unconf;
  2621. }
  2622. get_client_locked(conf);
  2623. spin_unlock(&nn->client_lock);
  2624. nfsd4_probe_callback(conf);
  2625. spin_lock(&nn->client_lock);
  2626. put_client_renew_locked(conf);
  2627. out:
  2628. spin_unlock(&nn->client_lock);
  2629. if (old)
  2630. expire_client(old);
  2631. return status;
  2632. }
  2633. static struct nfs4_file *nfsd4_alloc_file(void)
  2634. {
  2635. return kmem_cache_alloc(file_slab, GFP_KERNEL);
  2636. }
  2637. /* OPEN Share state helper functions */
  2638. static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
  2639. {
  2640. unsigned int hashval = file_hashval(fh);
  2641. lockdep_assert_held(&state_lock);
  2642. atomic_set(&fp->fi_ref, 1);
  2643. spin_lock_init(&fp->fi_lock);
  2644. INIT_LIST_HEAD(&fp->fi_stateids);
  2645. INIT_LIST_HEAD(&fp->fi_delegations);
  2646. fh_copy_shallow(&fp->fi_fhandle, fh);
  2647. fp->fi_deleg_file = NULL;
  2648. fp->fi_had_conflict = false;
  2649. fp->fi_share_deny = 0;
  2650. memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
  2651. memset(fp->fi_access, 0, sizeof(fp->fi_access));
  2652. hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
  2653. }
  2654. void
  2655. nfsd4_free_slabs(void)
  2656. {
  2657. kmem_cache_destroy(openowner_slab);
  2658. kmem_cache_destroy(lockowner_slab);
  2659. kmem_cache_destroy(file_slab);
  2660. kmem_cache_destroy(stateid_slab);
  2661. kmem_cache_destroy(deleg_slab);
  2662. }
  2663. int
  2664. nfsd4_init_slabs(void)
  2665. {
  2666. openowner_slab = kmem_cache_create("nfsd4_openowners",
  2667. sizeof(struct nfs4_openowner), 0, 0, NULL);
  2668. if (openowner_slab == NULL)
  2669. goto out;
  2670. lockowner_slab = kmem_cache_create("nfsd4_lockowners",
  2671. sizeof(struct nfs4_lockowner), 0, 0, NULL);
  2672. if (lockowner_slab == NULL)
  2673. goto out_free_openowner_slab;
  2674. file_slab = kmem_cache_create("nfsd4_files",
  2675. sizeof(struct nfs4_file), 0, 0, NULL);
  2676. if (file_slab == NULL)
  2677. goto out_free_lockowner_slab;
  2678. stateid_slab = kmem_cache_create("nfsd4_stateids",
  2679. sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
  2680. if (stateid_slab == NULL)
  2681. goto out_free_file_slab;
  2682. deleg_slab = kmem_cache_create("nfsd4_delegations",
  2683. sizeof(struct nfs4_delegation), 0, 0, NULL);
  2684. if (deleg_slab == NULL)
  2685. goto out_free_stateid_slab;
  2686. return 0;
  2687. out_free_stateid_slab:
  2688. kmem_cache_destroy(stateid_slab);
  2689. out_free_file_slab:
  2690. kmem_cache_destroy(file_slab);
  2691. out_free_lockowner_slab:
  2692. kmem_cache_destroy(lockowner_slab);
  2693. out_free_openowner_slab:
  2694. kmem_cache_destroy(openowner_slab);
  2695. out:
  2696. dprintk("nfsd4: out of memory while initializing nfsv4\n");
  2697. return -ENOMEM;
  2698. }
  2699. static void init_nfs4_replay(struct nfs4_replay *rp)
  2700. {
  2701. rp->rp_status = nfserr_serverfault;
  2702. rp->rp_buflen = 0;
  2703. rp->rp_buf = rp->rp_ibuf;
  2704. mutex_init(&rp->rp_mutex);
  2705. }
  2706. static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
  2707. struct nfs4_stateowner *so)
  2708. {
  2709. if (!nfsd4_has_session(cstate)) {
  2710. mutex_lock(&so->so_replay.rp_mutex);
  2711. cstate->replay_owner = nfs4_get_stateowner(so);
  2712. }
  2713. }
  2714. void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
  2715. {
  2716. struct nfs4_stateowner *so = cstate->replay_owner;
  2717. if (so != NULL) {
  2718. cstate->replay_owner = NULL;
  2719. mutex_unlock(&so->so_replay.rp_mutex);
  2720. nfs4_put_stateowner(so);
  2721. }
  2722. }
  2723. static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
  2724. {
  2725. struct nfs4_stateowner *sop;
  2726. sop = kmem_cache_alloc(slab, GFP_KERNEL);
  2727. if (!sop)
  2728. return NULL;
  2729. sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
  2730. if (!sop->so_owner.data) {
  2731. kmem_cache_free(slab, sop);
  2732. return NULL;
  2733. }
  2734. sop->so_owner.len = owner->len;
  2735. INIT_LIST_HEAD(&sop->so_stateids);
  2736. sop->so_client = clp;
  2737. init_nfs4_replay(&sop->so_replay);
  2738. atomic_set(&sop->so_count, 1);
  2739. return sop;
  2740. }
  2741. static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
  2742. {
  2743. lockdep_assert_held(&clp->cl_lock);
  2744. list_add(&oo->oo_owner.so_strhash,
  2745. &clp->cl_ownerstr_hashtbl[strhashval]);
  2746. list_add(&oo->oo_perclient, &clp->cl_openowners);
  2747. }
  2748. static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
  2749. {
  2750. unhash_openowner_locked(openowner(so));
  2751. }
  2752. static void nfs4_free_openowner(struct nfs4_stateowner *so)
  2753. {
  2754. struct nfs4_openowner *oo = openowner(so);
  2755. kmem_cache_free(openowner_slab, oo);
  2756. }
  2757. static const struct nfs4_stateowner_operations openowner_ops = {
  2758. .so_unhash = nfs4_unhash_openowner,
  2759. .so_free = nfs4_free_openowner,
  2760. };
  2761. static struct nfs4_openowner *
  2762. alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
  2763. struct nfsd4_compound_state *cstate)
  2764. {
  2765. struct nfs4_client *clp = cstate->clp;
  2766. struct nfs4_openowner *oo, *ret;
  2767. oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
  2768. if (!oo)
  2769. return NULL;
  2770. oo->oo_owner.so_ops = &openowner_ops;
  2771. oo->oo_owner.so_is_open_owner = 1;
  2772. oo->oo_owner.so_seqid = open->op_seqid;
  2773. oo->oo_flags = 0;
  2774. if (nfsd4_has_session(cstate))
  2775. oo->oo_flags |= NFS4_OO_CONFIRMED;
  2776. oo->oo_time = 0;
  2777. oo->oo_last_closed_stid = NULL;
  2778. INIT_LIST_HEAD(&oo->oo_close_lru);
  2779. spin_lock(&clp->cl_lock);
  2780. ret = find_openstateowner_str_locked(strhashval, open, clp);
  2781. if (ret == NULL) {
  2782. hash_openowner(oo, clp, strhashval);
  2783. ret = oo;
  2784. } else
  2785. nfs4_free_openowner(&oo->oo_owner);
  2786. spin_unlock(&clp->cl_lock);
  2787. return oo;
  2788. }
  2789. static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
  2790. struct nfs4_openowner *oo = open->op_openowner;
  2791. atomic_inc(&stp->st_stid.sc_count);
  2792. stp->st_stid.sc_type = NFS4_OPEN_STID;
  2793. INIT_LIST_HEAD(&stp->st_locks);
  2794. stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
  2795. get_nfs4_file(fp);
  2796. stp->st_stid.sc_file = fp;
  2797. stp->st_access_bmap = 0;
  2798. stp->st_deny_bmap = 0;
  2799. stp->st_openstp = NULL;
  2800. spin_lock(&oo->oo_owner.so_client->cl_lock);
  2801. list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
  2802. spin_lock(&fp->fi_lock);
  2803. list_add(&stp->st_perfile, &fp->fi_stateids);
  2804. spin_unlock(&fp->fi_lock);
  2805. spin_unlock(&oo->oo_owner.so_client->cl_lock);
  2806. }
  2807. /*
  2808. * In the 4.0 case we need to keep the owners around a little while to handle
  2809. * CLOSE replay. We still do need to release any file access that is held by
  2810. * them before returning however.
  2811. */
  2812. static void
  2813. move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
  2814. {
  2815. struct nfs4_ol_stateid *last;
  2816. struct nfs4_openowner *oo = openowner(s->st_stateowner);
  2817. struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
  2818. nfsd_net_id);
  2819. dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
  2820. /*
  2821. * We know that we hold one reference via nfsd4_close, and another
  2822. * "persistent" reference for the client. If the refcount is higher
  2823. * than 2, then there are still calls in progress that are using this
  2824. * stateid. We can't put the sc_file reference until they are finished.
  2825. * Wait for the refcount to drop to 2. Since it has been unhashed,
  2826. * there should be no danger of the refcount going back up again at
  2827. * this point.
  2828. */
  2829. wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
  2830. release_all_access(s);
  2831. if (s->st_stid.sc_file) {
  2832. put_nfs4_file(s->st_stid.sc_file);
  2833. s->st_stid.sc_file = NULL;
  2834. }
  2835. spin_lock(&nn->client_lock);
  2836. last = oo->oo_last_closed_stid;
  2837. oo->oo_last_closed_stid = s;
  2838. list_move_tail(&oo->oo_close_lru, &nn->close_lru);
  2839. oo->oo_time = get_seconds();
  2840. spin_unlock(&nn->client_lock);
  2841. if (last)
  2842. nfs4_put_stid(&last->st_stid);
  2843. }
  2844. /* search file_hashtbl[] for file */
  2845. static struct nfs4_file *
  2846. find_file_locked(struct knfsd_fh *fh)
  2847. {
  2848. unsigned int hashval = file_hashval(fh);
  2849. struct nfs4_file *fp;
  2850. lockdep_assert_held(&state_lock);
  2851. hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
  2852. if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
  2853. get_nfs4_file(fp);
  2854. return fp;
  2855. }
  2856. }
  2857. return NULL;
  2858. }
  2859. static struct nfs4_file *
  2860. find_file(struct knfsd_fh *fh)
  2861. {
  2862. struct nfs4_file *fp;
  2863. spin_lock(&state_lock);
  2864. fp = find_file_locked(fh);
  2865. spin_unlock(&state_lock);
  2866. return fp;
  2867. }
  2868. static struct nfs4_file *
  2869. find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
  2870. {
  2871. struct nfs4_file *fp;
  2872. spin_lock(&state_lock);
  2873. fp = find_file_locked(fh);
  2874. if (fp == NULL) {
  2875. nfsd4_init_file(new, fh);
  2876. fp = new;
  2877. }
  2878. spin_unlock(&state_lock);
  2879. return fp;
  2880. }
  2881. /*
  2882. * Called to check deny when READ with all zero stateid or
  2883. * WRITE with all zero or all one stateid
  2884. */
  2885. static __be32
  2886. nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
  2887. {
  2888. struct nfs4_file *fp;
  2889. __be32 ret = nfs_ok;
  2890. fp = find_file(&current_fh->fh_handle);
  2891. if (!fp)
  2892. return ret;
  2893. /* Check for conflicting share reservations */
  2894. spin_lock(&fp->fi_lock);
  2895. if (fp->fi_share_deny & deny_type)
  2896. ret = nfserr_locked;
  2897. spin_unlock(&fp->fi_lock);
  2898. put_nfs4_file(fp);
  2899. return ret;
  2900. }
  2901. static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
  2902. {
  2903. struct nfs4_delegation *dp = cb_to_delegation(cb);
  2904. struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
  2905. nfsd_net_id);
  2906. block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
  2907. /*
  2908. * We can't do this in nfsd_break_deleg_cb because it is
  2909. * already holding inode->i_lock.
  2910. *
  2911. * If the dl_time != 0, then we know that it has already been
  2912. * queued for a lease break. Don't queue it again.
  2913. */
  2914. spin_lock(&state_lock);
  2915. if (dp->dl_time == 0) {
  2916. dp->dl_time = get_seconds();
  2917. list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
  2918. }
  2919. spin_unlock(&state_lock);
  2920. }
  2921. static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
  2922. struct rpc_task *task)
  2923. {
  2924. struct nfs4_delegation *dp = cb_to_delegation(cb);
  2925. switch (task->tk_status) {
  2926. case 0:
  2927. return 1;
  2928. case -EBADHANDLE:
  2929. case -NFS4ERR_BAD_STATEID:
  2930. /*
  2931. * Race: client probably got cb_recall before open reply
  2932. * granting delegation.
  2933. */
  2934. if (dp->dl_retries--) {
  2935. rpc_delay(task, 2 * HZ);
  2936. return 0;
  2937. }
  2938. /*FALLTHRU*/
  2939. default:
  2940. return -1;
  2941. }
  2942. }
  2943. static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
  2944. {
  2945. struct nfs4_delegation *dp = cb_to_delegation(cb);
  2946. nfs4_put_stid(&dp->dl_stid);
  2947. }
  2948. static struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
  2949. .prepare = nfsd4_cb_recall_prepare,
  2950. .done = nfsd4_cb_recall_done,
  2951. .release = nfsd4_cb_recall_release,
  2952. };
  2953. static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
  2954. {
  2955. /*
  2956. * We're assuming the state code never drops its reference
  2957. * without first removing the lease. Since we're in this lease
  2958. * callback (and since the lease code is serialized by the kernel
  2959. * lock) we know the server hasn't removed the lease yet, we know
  2960. * it's safe to take a reference.
  2961. */
  2962. atomic_inc(&dp->dl_stid.sc_count);
  2963. nfsd4_run_cb(&dp->dl_recall);
  2964. }
  2965. /* Called from break_lease() with i_lock held. */
  2966. static bool
  2967. nfsd_break_deleg_cb(struct file_lock *fl)
  2968. {
  2969. bool ret = false;
  2970. struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
  2971. struct nfs4_delegation *dp;
  2972. if (!fp) {
  2973. WARN(1, "(%p)->fl_owner NULL\n", fl);
  2974. return ret;
  2975. }
  2976. if (fp->fi_had_conflict) {
  2977. WARN(1, "duplicate break on %p\n", fp);
  2978. return ret;
  2979. }
  2980. /*
  2981. * We don't want the locks code to timeout the lease for us;
  2982. * we'll remove it ourself if a delegation isn't returned
  2983. * in time:
  2984. */
  2985. fl->fl_break_time = 0;
  2986. spin_lock(&fp->fi_lock);
  2987. fp->fi_had_conflict = true;
  2988. /*
  2989. * If there are no delegations on the list, then return true
  2990. * so that the lease code will go ahead and delete it.
  2991. */
  2992. if (list_empty(&fp->fi_delegations))
  2993. ret = true;
  2994. else
  2995. list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
  2996. nfsd_break_one_deleg(dp);
  2997. spin_unlock(&fp->fi_lock);
  2998. return ret;
  2999. }
  3000. static int
  3001. nfsd_change_deleg_cb(struct file_lock **onlist, int arg, struct list_head *dispose)
  3002. {
  3003. if (arg & F_UNLCK)
  3004. return lease_modify(onlist, arg, dispose);
  3005. else
  3006. return -EAGAIN;
  3007. }
  3008. static const struct lock_manager_operations nfsd_lease_mng_ops = {
  3009. .lm_break = nfsd_break_deleg_cb,
  3010. .lm_change = nfsd_change_deleg_cb,
  3011. };
  3012. static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
  3013. {
  3014. if (nfsd4_has_session(cstate))
  3015. return nfs_ok;
  3016. if (seqid == so->so_seqid - 1)
  3017. return nfserr_replay_me;
  3018. if (seqid == so->so_seqid)
  3019. return nfs_ok;
  3020. return nfserr_bad_seqid;
  3021. }
  3022. static __be32 lookup_clientid(clientid_t *clid,
  3023. struct nfsd4_compound_state *cstate,
  3024. struct nfsd_net *nn)
  3025. {
  3026. struct nfs4_client *found;
  3027. if (cstate->clp) {
  3028. found = cstate->clp;
  3029. if (!same_clid(&found->cl_clientid, clid))
  3030. return nfserr_stale_clientid;
  3031. return nfs_ok;
  3032. }
  3033. if (STALE_CLIENTID(clid, nn))
  3034. return nfserr_stale_clientid;
  3035. /*
  3036. * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
  3037. * cached already then we know this is for is for v4.0 and "sessions"
  3038. * will be false.
  3039. */
  3040. WARN_ON_ONCE(cstate->session);
  3041. spin_lock(&nn->client_lock);
  3042. found = find_confirmed_client(clid, false, nn);
  3043. if (!found) {
  3044. spin_unlock(&nn->client_lock);
  3045. return nfserr_expired;
  3046. }
  3047. atomic_inc(&found->cl_refcount);
  3048. spin_unlock(&nn->client_lock);
  3049. /* Cache the nfs4_client in cstate! */
  3050. cstate->clp = found;
  3051. return nfs_ok;
  3052. }
  3053. __be32
  3054. nfsd4_process_open1(struct nfsd4_compound_state *cstate,
  3055. struct nfsd4_open *open, struct nfsd_net *nn)
  3056. {
  3057. clientid_t *clientid = &open->op_clientid;
  3058. struct nfs4_client *clp = NULL;
  3059. unsigned int strhashval;
  3060. struct nfs4_openowner *oo = NULL;
  3061. __be32 status;
  3062. if (STALE_CLIENTID(&open->op_clientid, nn))
  3063. return nfserr_stale_clientid;
  3064. /*
  3065. * In case we need it later, after we've already created the
  3066. * file and don't want to risk a further failure:
  3067. */
  3068. open->op_file = nfsd4_alloc_file();
  3069. if (open->op_file == NULL)
  3070. return nfserr_jukebox;
  3071. status = lookup_clientid(clientid, cstate, nn);
  3072. if (status)
  3073. return status;
  3074. clp = cstate->clp;
  3075. strhashval = ownerstr_hashval(&open->op_owner);
  3076. oo = find_openstateowner_str(strhashval, open, clp);
  3077. open->op_openowner = oo;
  3078. if (!oo) {
  3079. goto new_owner;
  3080. }
  3081. if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
  3082. /* Replace unconfirmed owners without checking for replay. */
  3083. release_openowner(oo);
  3084. open->op_openowner = NULL;
  3085. goto new_owner;
  3086. }
  3087. status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
  3088. if (status)
  3089. return status;
  3090. goto alloc_stateid;
  3091. new_owner:
  3092. oo = alloc_init_open_stateowner(strhashval, open, cstate);
  3093. if (oo == NULL)
  3094. return nfserr_jukebox;
  3095. open->op_openowner = oo;
  3096. alloc_stateid:
  3097. open->op_stp = nfs4_alloc_open_stateid(clp);
  3098. if (!open->op_stp)
  3099. return nfserr_jukebox;
  3100. return nfs_ok;
  3101. }
  3102. static inline __be32
  3103. nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
  3104. {
  3105. if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
  3106. return nfserr_openmode;
  3107. else
  3108. return nfs_ok;
  3109. }
  3110. static int share_access_to_flags(u32 share_access)
  3111. {
  3112. return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
  3113. }
  3114. static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
  3115. {
  3116. struct nfs4_stid *ret;
  3117. ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
  3118. if (!ret)
  3119. return NULL;
  3120. return delegstateid(ret);
  3121. }
  3122. static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
  3123. {
  3124. return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
  3125. open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
  3126. }
  3127. static __be32
  3128. nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
  3129. struct nfs4_delegation **dp)
  3130. {
  3131. int flags;
  3132. __be32 status = nfserr_bad_stateid;
  3133. struct nfs4_delegation *deleg;
  3134. deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
  3135. if (deleg == NULL)
  3136. goto out;
  3137. flags = share_access_to_flags(open->op_share_access);
  3138. status = nfs4_check_delegmode(deleg, flags);
  3139. if (status) {
  3140. nfs4_put_stid(&deleg->dl_stid);
  3141. goto out;
  3142. }
  3143. *dp = deleg;
  3144. out:
  3145. if (!nfsd4_is_deleg_cur(open))
  3146. return nfs_ok;
  3147. if (status)
  3148. return status;
  3149. open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
  3150. return nfs_ok;
  3151. }
  3152. static struct nfs4_ol_stateid *
  3153. nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
  3154. {
  3155. struct nfs4_ol_stateid *local, *ret = NULL;
  3156. struct nfs4_openowner *oo = open->op_openowner;
  3157. spin_lock(&fp->fi_lock);
  3158. list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
  3159. /* ignore lock owners */
  3160. if (local->st_stateowner->so_is_open_owner == 0)
  3161. continue;
  3162. if (local->st_stateowner == &oo->oo_owner) {
  3163. ret = local;
  3164. atomic_inc(&ret->st_stid.sc_count);
  3165. break;
  3166. }
  3167. }
  3168. spin_unlock(&fp->fi_lock);
  3169. return ret;
  3170. }
  3171. static inline int nfs4_access_to_access(u32 nfs4_access)
  3172. {
  3173. int flags = 0;
  3174. if (nfs4_access & NFS4_SHARE_ACCESS_READ)
  3175. flags |= NFSD_MAY_READ;
  3176. if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
  3177. flags |= NFSD_MAY_WRITE;
  3178. return flags;
  3179. }
  3180. static inline __be32
  3181. nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
  3182. struct nfsd4_open *open)
  3183. {
  3184. struct iattr iattr = {
  3185. .ia_valid = ATTR_SIZE,
  3186. .ia_size = 0,
  3187. };
  3188. if (!open->op_truncate)
  3189. return 0;
  3190. if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
  3191. return nfserr_inval;
  3192. return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
  3193. }
  3194. static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
  3195. struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
  3196. struct nfsd4_open *open)
  3197. {
  3198. struct file *filp = NULL;
  3199. __be32 status;
  3200. int oflag = nfs4_access_to_omode(open->op_share_access);
  3201. int access = nfs4_access_to_access(open->op_share_access);
  3202. unsigned char old_access_bmap, old_deny_bmap;
  3203. spin_lock(&fp->fi_lock);
  3204. /*
  3205. * Are we trying to set a deny mode that would conflict with
  3206. * current access?
  3207. */
  3208. status = nfs4_file_check_deny(fp, open->op_share_deny);
  3209. if (status != nfs_ok) {
  3210. spin_unlock(&fp->fi_lock);
  3211. goto out;
  3212. }
  3213. /* set access to the file */
  3214. status = nfs4_file_get_access(fp, open->op_share_access);
  3215. if (status != nfs_ok) {
  3216. spin_unlock(&fp->fi_lock);
  3217. goto out;
  3218. }
  3219. /* Set access bits in stateid */
  3220. old_access_bmap = stp->st_access_bmap;
  3221. set_access(open->op_share_access, stp);
  3222. /* Set new deny mask */
  3223. old_deny_bmap = stp->st_deny_bmap;
  3224. set_deny(open->op_share_deny, stp);
  3225. fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
  3226. if (!fp->fi_fds[oflag]) {
  3227. spin_unlock(&fp->fi_lock);
  3228. status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
  3229. if (status)
  3230. goto out_put_access;
  3231. spin_lock(&fp->fi_lock);
  3232. if (!fp->fi_fds[oflag]) {
  3233. fp->fi_fds[oflag] = filp;
  3234. filp = NULL;
  3235. }
  3236. }
  3237. spin_unlock(&fp->fi_lock);
  3238. if (filp)
  3239. fput(filp);
  3240. status = nfsd4_truncate(rqstp, cur_fh, open);
  3241. if (status)
  3242. goto out_put_access;
  3243. out:
  3244. return status;
  3245. out_put_access:
  3246. stp->st_access_bmap = old_access_bmap;
  3247. nfs4_file_put_access(fp, open->op_share_access);
  3248. reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
  3249. goto out;
  3250. }
  3251. static __be32
  3252. nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
  3253. {
  3254. __be32 status;
  3255. unsigned char old_deny_bmap;
  3256. if (!test_access(open->op_share_access, stp))
  3257. return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
  3258. /* test and set deny mode */
  3259. spin_lock(&fp->fi_lock);
  3260. status = nfs4_file_check_deny(fp, open->op_share_deny);
  3261. if (status == nfs_ok) {
  3262. old_deny_bmap = stp->st_deny_bmap;
  3263. set_deny(open->op_share_deny, stp);
  3264. fp->fi_share_deny |=
  3265. (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
  3266. }
  3267. spin_unlock(&fp->fi_lock);
  3268. if (status != nfs_ok)
  3269. return status;
  3270. status = nfsd4_truncate(rqstp, cur_fh, open);
  3271. if (status != nfs_ok)
  3272. reset_union_bmap_deny(old_deny_bmap, stp);
  3273. return status;
  3274. }
  3275. static void
  3276. nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
  3277. {
  3278. open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
  3279. }
  3280. /* Should we give out recallable state?: */
  3281. static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
  3282. {
  3283. if (clp->cl_cb_state == NFSD4_CB_UP)
  3284. return true;
  3285. /*
  3286. * In the sessions case, since we don't have to establish a
  3287. * separate connection for callbacks, we assume it's OK
  3288. * until we hear otherwise:
  3289. */
  3290. return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
  3291. }
  3292. static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
  3293. {
  3294. struct file_lock *fl;
  3295. fl = locks_alloc_lock();
  3296. if (!fl)
  3297. return NULL;
  3298. fl->fl_lmops = &nfsd_lease_mng_ops;
  3299. fl->fl_flags = FL_DELEG;
  3300. fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
  3301. fl->fl_end = OFFSET_MAX;
  3302. fl->fl_owner = (fl_owner_t)fp;
  3303. fl->fl_pid = current->tgid;
  3304. return fl;
  3305. }
  3306. static int nfs4_setlease(struct nfs4_delegation *dp)
  3307. {
  3308. struct nfs4_file *fp = dp->dl_stid.sc_file;
  3309. struct file_lock *fl, *ret;
  3310. struct file *filp;
  3311. int status = 0;
  3312. fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
  3313. if (!fl)
  3314. return -ENOMEM;
  3315. filp = find_readable_file(fp);
  3316. if (!filp) {
  3317. /* We should always have a readable file here */
  3318. WARN_ON_ONCE(1);
  3319. return -EBADF;
  3320. }
  3321. fl->fl_file = filp;
  3322. ret = fl;
  3323. status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
  3324. if (fl)
  3325. locks_free_lock(fl);
  3326. if (status)
  3327. goto out_fput;
  3328. spin_lock(&state_lock);
  3329. spin_lock(&fp->fi_lock);
  3330. /* Did the lease get broken before we took the lock? */
  3331. status = -EAGAIN;
  3332. if (fp->fi_had_conflict)
  3333. goto out_unlock;
  3334. /* Race breaker */
  3335. if (fp->fi_deleg_file) {
  3336. status = 0;
  3337. atomic_inc(&fp->fi_delegees);
  3338. hash_delegation_locked(dp, fp);
  3339. goto out_unlock;
  3340. }
  3341. fp->fi_deleg_file = filp;
  3342. atomic_set(&fp->fi_delegees, 1);
  3343. hash_delegation_locked(dp, fp);
  3344. spin_unlock(&fp->fi_lock);
  3345. spin_unlock(&state_lock);
  3346. return 0;
  3347. out_unlock:
  3348. spin_unlock(&fp->fi_lock);
  3349. spin_unlock(&state_lock);
  3350. out_fput:
  3351. fput(filp);
  3352. return status;
  3353. }
  3354. static struct nfs4_delegation *
  3355. nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
  3356. struct nfs4_file *fp)
  3357. {
  3358. int status;
  3359. struct nfs4_delegation *dp;
  3360. if (fp->fi_had_conflict)
  3361. return ERR_PTR(-EAGAIN);
  3362. dp = alloc_init_deleg(clp, fh);
  3363. if (!dp)
  3364. return ERR_PTR(-ENOMEM);
  3365. get_nfs4_file(fp);
  3366. spin_lock(&state_lock);
  3367. spin_lock(&fp->fi_lock);
  3368. dp->dl_stid.sc_file = fp;
  3369. if (!fp->fi_deleg_file) {
  3370. spin_unlock(&fp->fi_lock);
  3371. spin_unlock(&state_lock);
  3372. status = nfs4_setlease(dp);
  3373. goto out;
  3374. }
  3375. atomic_inc(&fp->fi_delegees);
  3376. if (fp->fi_had_conflict) {
  3377. status = -EAGAIN;
  3378. goto out_unlock;
  3379. }
  3380. hash_delegation_locked(dp, fp);
  3381. status = 0;
  3382. out_unlock:
  3383. spin_unlock(&fp->fi_lock);
  3384. spin_unlock(&state_lock);
  3385. out:
  3386. if (status) {
  3387. nfs4_put_stid(&dp->dl_stid);
  3388. return ERR_PTR(status);
  3389. }
  3390. return dp;
  3391. }
  3392. static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
  3393. {
  3394. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  3395. if (status == -EAGAIN)
  3396. open->op_why_no_deleg = WND4_CONTENTION;
  3397. else {
  3398. open->op_why_no_deleg = WND4_RESOURCE;
  3399. switch (open->op_deleg_want) {
  3400. case NFS4_SHARE_WANT_READ_DELEG:
  3401. case NFS4_SHARE_WANT_WRITE_DELEG:
  3402. case NFS4_SHARE_WANT_ANY_DELEG:
  3403. break;
  3404. case NFS4_SHARE_WANT_CANCEL:
  3405. open->op_why_no_deleg = WND4_CANCELLED;
  3406. break;
  3407. case NFS4_SHARE_WANT_NO_DELEG:
  3408. WARN_ON_ONCE(1);
  3409. }
  3410. }
  3411. }
  3412. /*
  3413. * Attempt to hand out a delegation.
  3414. *
  3415. * Note we don't support write delegations, and won't until the vfs has
  3416. * proper support for them.
  3417. */
  3418. static void
  3419. nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
  3420. struct nfs4_ol_stateid *stp)
  3421. {
  3422. struct nfs4_delegation *dp;
  3423. struct nfs4_openowner *oo = openowner(stp->st_stateowner);
  3424. struct nfs4_client *clp = stp->st_stid.sc_client;
  3425. int cb_up;
  3426. int status = 0;
  3427. cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
  3428. open->op_recall = 0;
  3429. switch (open->op_claim_type) {
  3430. case NFS4_OPEN_CLAIM_PREVIOUS:
  3431. if (!cb_up)
  3432. open->op_recall = 1;
  3433. if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
  3434. goto out_no_deleg;
  3435. break;
  3436. case NFS4_OPEN_CLAIM_NULL:
  3437. case NFS4_OPEN_CLAIM_FH:
  3438. /*
  3439. * Let's not give out any delegations till everyone's
  3440. * had the chance to reclaim theirs....
  3441. */
  3442. if (locks_in_grace(clp->net))
  3443. goto out_no_deleg;
  3444. if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
  3445. goto out_no_deleg;
  3446. /*
  3447. * Also, if the file was opened for write or
  3448. * create, there's a good chance the client's
  3449. * about to write to it, resulting in an
  3450. * immediate recall (since we don't support
  3451. * write delegations):
  3452. */
  3453. if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
  3454. goto out_no_deleg;
  3455. if (open->op_create == NFS4_OPEN_CREATE)
  3456. goto out_no_deleg;
  3457. break;
  3458. default:
  3459. goto out_no_deleg;
  3460. }
  3461. dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
  3462. if (IS_ERR(dp))
  3463. goto out_no_deleg;
  3464. memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
  3465. dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
  3466. STATEID_VAL(&dp->dl_stid.sc_stateid));
  3467. open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
  3468. nfs4_put_stid(&dp->dl_stid);
  3469. return;
  3470. out_no_deleg:
  3471. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
  3472. if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
  3473. open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
  3474. dprintk("NFSD: WARNING: refusing delegation reclaim\n");
  3475. open->op_recall = 1;
  3476. }
  3477. /* 4.1 client asking for a delegation? */
  3478. if (open->op_deleg_want)
  3479. nfsd4_open_deleg_none_ext(open, status);
  3480. return;
  3481. }
  3482. static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
  3483. struct nfs4_delegation *dp)
  3484. {
  3485. if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
  3486. dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
  3487. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  3488. open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
  3489. } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
  3490. dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
  3491. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  3492. open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
  3493. }
  3494. /* Otherwise the client must be confused wanting a delegation
  3495. * it already has, therefore we don't return
  3496. * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
  3497. */
  3498. }
  3499. __be32
  3500. nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
  3501. {
  3502. struct nfsd4_compoundres *resp = rqstp->rq_resp;
  3503. struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
  3504. struct nfs4_file *fp = NULL;
  3505. struct nfs4_ol_stateid *stp = NULL;
  3506. struct nfs4_delegation *dp = NULL;
  3507. __be32 status;
  3508. /*
  3509. * Lookup file; if found, lookup stateid and check open request,
  3510. * and check for delegations in the process of being recalled.
  3511. * If not found, create the nfs4_file struct
  3512. */
  3513. fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
  3514. if (fp != open->op_file) {
  3515. status = nfs4_check_deleg(cl, open, &dp);
  3516. if (status)
  3517. goto out;
  3518. stp = nfsd4_find_existing_open(fp, open);
  3519. } else {
  3520. open->op_file = NULL;
  3521. status = nfserr_bad_stateid;
  3522. if (nfsd4_is_deleg_cur(open))
  3523. goto out;
  3524. status = nfserr_jukebox;
  3525. }
  3526. /*
  3527. * OPEN the file, or upgrade an existing OPEN.
  3528. * If truncate fails, the OPEN fails.
  3529. */
  3530. if (stp) {
  3531. /* Stateid was found, this is an OPEN upgrade */
  3532. status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
  3533. if (status)
  3534. goto out;
  3535. } else {
  3536. stp = open->op_stp;
  3537. open->op_stp = NULL;
  3538. init_open_stateid(stp, fp, open);
  3539. status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
  3540. if (status) {
  3541. release_open_stateid(stp);
  3542. goto out;
  3543. }
  3544. }
  3545. update_stateid(&stp->st_stid.sc_stateid);
  3546. memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
  3547. if (nfsd4_has_session(&resp->cstate)) {
  3548. if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
  3549. open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
  3550. open->op_why_no_deleg = WND4_NOT_WANTED;
  3551. goto nodeleg;
  3552. }
  3553. }
  3554. /*
  3555. * Attempt to hand out a delegation. No error return, because the
  3556. * OPEN succeeds even if we fail.
  3557. */
  3558. nfs4_open_delegation(current_fh, open, stp);
  3559. nodeleg:
  3560. status = nfs_ok;
  3561. dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
  3562. STATEID_VAL(&stp->st_stid.sc_stateid));
  3563. out:
  3564. /* 4.1 client trying to upgrade/downgrade delegation? */
  3565. if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
  3566. open->op_deleg_want)
  3567. nfsd4_deleg_xgrade_none_ext(open, dp);
  3568. if (fp)
  3569. put_nfs4_file(fp);
  3570. if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
  3571. nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
  3572. /*
  3573. * To finish the open response, we just need to set the rflags.
  3574. */
  3575. open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
  3576. if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
  3577. !nfsd4_has_session(&resp->cstate))
  3578. open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
  3579. if (dp)
  3580. nfs4_put_stid(&dp->dl_stid);
  3581. if (stp)
  3582. nfs4_put_stid(&stp->st_stid);
  3583. return status;
  3584. }
  3585. void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
  3586. struct nfsd4_open *open, __be32 status)
  3587. {
  3588. if (open->op_openowner) {
  3589. struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
  3590. nfsd4_cstate_assign_replay(cstate, so);
  3591. nfs4_put_stateowner(so);
  3592. }
  3593. if (open->op_file)
  3594. nfsd4_free_file(open->op_file);
  3595. if (open->op_stp)
  3596. nfs4_put_stid(&open->op_stp->st_stid);
  3597. }
  3598. __be32
  3599. nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  3600. clientid_t *clid)
  3601. {
  3602. struct nfs4_client *clp;
  3603. __be32 status;
  3604. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  3605. dprintk("process_renew(%08x/%08x): starting\n",
  3606. clid->cl_boot, clid->cl_id);
  3607. status = lookup_clientid(clid, cstate, nn);
  3608. if (status)
  3609. goto out;
  3610. clp = cstate->clp;
  3611. status = nfserr_cb_path_down;
  3612. if (!list_empty(&clp->cl_delegations)
  3613. && clp->cl_cb_state != NFSD4_CB_UP)
  3614. goto out;
  3615. status = nfs_ok;
  3616. out:
  3617. return status;
  3618. }
  3619. void
  3620. nfsd4_end_grace(struct nfsd_net *nn)
  3621. {
  3622. /* do nothing if grace period already ended */
  3623. if (nn->grace_ended)
  3624. return;
  3625. dprintk("NFSD: end of grace period\n");
  3626. nn->grace_ended = true;
  3627. /*
  3628. * If the server goes down again right now, an NFSv4
  3629. * client will still be allowed to reclaim after it comes back up,
  3630. * even if it hasn't yet had a chance to reclaim state this time.
  3631. *
  3632. */
  3633. nfsd4_record_grace_done(nn);
  3634. /*
  3635. * At this point, NFSv4 clients can still reclaim. But if the
  3636. * server crashes, any that have not yet reclaimed will be out
  3637. * of luck on the next boot.
  3638. *
  3639. * (NFSv4.1+ clients are considered to have reclaimed once they
  3640. * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
  3641. * have reclaimed after their first OPEN.)
  3642. */
  3643. locks_end_grace(&nn->nfsd4_manager);
  3644. /*
  3645. * At this point, and once lockd and/or any other containers
  3646. * exit their grace period, further reclaims will fail and
  3647. * regular locking can resume.
  3648. */
  3649. }
  3650. static time_t
  3651. nfs4_laundromat(struct nfsd_net *nn)
  3652. {
  3653. struct nfs4_client *clp;
  3654. struct nfs4_openowner *oo;
  3655. struct nfs4_delegation *dp;
  3656. struct nfs4_ol_stateid *stp;
  3657. struct list_head *pos, *next, reaplist;
  3658. time_t cutoff = get_seconds() - nn->nfsd4_lease;
  3659. time_t t, new_timeo = nn->nfsd4_lease;
  3660. dprintk("NFSD: laundromat service - starting\n");
  3661. nfsd4_end_grace(nn);
  3662. INIT_LIST_HEAD(&reaplist);
  3663. spin_lock(&nn->client_lock);
  3664. list_for_each_safe(pos, next, &nn->client_lru) {
  3665. clp = list_entry(pos, struct nfs4_client, cl_lru);
  3666. if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
  3667. t = clp->cl_time - cutoff;
  3668. new_timeo = min(new_timeo, t);
  3669. break;
  3670. }
  3671. if (mark_client_expired_locked(clp)) {
  3672. dprintk("NFSD: client in use (clientid %08x)\n",
  3673. clp->cl_clientid.cl_id);
  3674. continue;
  3675. }
  3676. list_add(&clp->cl_lru, &reaplist);
  3677. }
  3678. spin_unlock(&nn->client_lock);
  3679. list_for_each_safe(pos, next, &reaplist) {
  3680. clp = list_entry(pos, struct nfs4_client, cl_lru);
  3681. dprintk("NFSD: purging unused client (clientid %08x)\n",
  3682. clp->cl_clientid.cl_id);
  3683. list_del_init(&clp->cl_lru);
  3684. expire_client(clp);
  3685. }
  3686. spin_lock(&state_lock);
  3687. list_for_each_safe(pos, next, &nn->del_recall_lru) {
  3688. dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
  3689. if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
  3690. continue;
  3691. if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
  3692. t = dp->dl_time - cutoff;
  3693. new_timeo = min(new_timeo, t);
  3694. break;
  3695. }
  3696. unhash_delegation_locked(dp);
  3697. list_add(&dp->dl_recall_lru, &reaplist);
  3698. }
  3699. spin_unlock(&state_lock);
  3700. while (!list_empty(&reaplist)) {
  3701. dp = list_first_entry(&reaplist, struct nfs4_delegation,
  3702. dl_recall_lru);
  3703. list_del_init(&dp->dl_recall_lru);
  3704. revoke_delegation(dp);
  3705. }
  3706. spin_lock(&nn->client_lock);
  3707. while (!list_empty(&nn->close_lru)) {
  3708. oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
  3709. oo_close_lru);
  3710. if (time_after((unsigned long)oo->oo_time,
  3711. (unsigned long)cutoff)) {
  3712. t = oo->oo_time - cutoff;
  3713. new_timeo = min(new_timeo, t);
  3714. break;
  3715. }
  3716. list_del_init(&oo->oo_close_lru);
  3717. stp = oo->oo_last_closed_stid;
  3718. oo->oo_last_closed_stid = NULL;
  3719. spin_unlock(&nn->client_lock);
  3720. nfs4_put_stid(&stp->st_stid);
  3721. spin_lock(&nn->client_lock);
  3722. }
  3723. spin_unlock(&nn->client_lock);
  3724. new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
  3725. return new_timeo;
  3726. }
  3727. static struct workqueue_struct *laundry_wq;
  3728. static void laundromat_main(struct work_struct *);
  3729. static void
  3730. laundromat_main(struct work_struct *laundry)
  3731. {
  3732. time_t t;
  3733. struct delayed_work *dwork = container_of(laundry, struct delayed_work,
  3734. work);
  3735. struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
  3736. laundromat_work);
  3737. t = nfs4_laundromat(nn);
  3738. dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
  3739. queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
  3740. }
  3741. static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
  3742. {
  3743. if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
  3744. return nfserr_bad_stateid;
  3745. return nfs_ok;
  3746. }
  3747. static inline int
  3748. access_permit_read(struct nfs4_ol_stateid *stp)
  3749. {
  3750. return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
  3751. test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
  3752. test_access(NFS4_SHARE_ACCESS_WRITE, stp);
  3753. }
  3754. static inline int
  3755. access_permit_write(struct nfs4_ol_stateid *stp)
  3756. {
  3757. return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
  3758. test_access(NFS4_SHARE_ACCESS_BOTH, stp);
  3759. }
  3760. static
  3761. __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
  3762. {
  3763. __be32 status = nfserr_openmode;
  3764. /* For lock stateid's, we test the parent open, not the lock: */
  3765. if (stp->st_openstp)
  3766. stp = stp->st_openstp;
  3767. if ((flags & WR_STATE) && !access_permit_write(stp))
  3768. goto out;
  3769. if ((flags & RD_STATE) && !access_permit_read(stp))
  3770. goto out;
  3771. status = nfs_ok;
  3772. out:
  3773. return status;
  3774. }
  3775. static inline __be32
  3776. check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
  3777. {
  3778. if (ONE_STATEID(stateid) && (flags & RD_STATE))
  3779. return nfs_ok;
  3780. else if (locks_in_grace(net)) {
  3781. /* Answer in remaining cases depends on existence of
  3782. * conflicting state; so we must wait out the grace period. */
  3783. return nfserr_grace;
  3784. } else if (flags & WR_STATE)
  3785. return nfs4_share_conflict(current_fh,
  3786. NFS4_SHARE_DENY_WRITE);
  3787. else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
  3788. return nfs4_share_conflict(current_fh,
  3789. NFS4_SHARE_DENY_READ);
  3790. }
  3791. /*
  3792. * Allow READ/WRITE during grace period on recovered state only for files
  3793. * that are not able to provide mandatory locking.
  3794. */
  3795. static inline int
  3796. grace_disallows_io(struct net *net, struct inode *inode)
  3797. {
  3798. return locks_in_grace(net) && mandatory_lock(inode);
  3799. }
  3800. /* Returns true iff a is later than b: */
  3801. static bool stateid_generation_after(stateid_t *a, stateid_t *b)
  3802. {
  3803. return (s32)(a->si_generation - b->si_generation) > 0;
  3804. }
  3805. static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
  3806. {
  3807. /*
  3808. * When sessions are used the stateid generation number is ignored
  3809. * when it is zero.
  3810. */
  3811. if (has_session && in->si_generation == 0)
  3812. return nfs_ok;
  3813. if (in->si_generation == ref->si_generation)
  3814. return nfs_ok;
  3815. /* If the client sends us a stateid from the future, it's buggy: */
  3816. if (stateid_generation_after(in, ref))
  3817. return nfserr_bad_stateid;
  3818. /*
  3819. * However, we could see a stateid from the past, even from a
  3820. * non-buggy client. For example, if the client sends a lock
  3821. * while some IO is outstanding, the lock may bump si_generation
  3822. * while the IO is still in flight. The client could avoid that
  3823. * situation by waiting for responses on all the IO requests,
  3824. * but better performance may result in retrying IO that
  3825. * receives an old_stateid error if requests are rarely
  3826. * reordered in flight:
  3827. */
  3828. return nfserr_old_stateid;
  3829. }
  3830. static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
  3831. {
  3832. struct nfs4_stid *s;
  3833. struct nfs4_ol_stateid *ols;
  3834. __be32 status = nfserr_bad_stateid;
  3835. if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
  3836. return status;
  3837. /* Client debugging aid. */
  3838. if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
  3839. char addr_str[INET6_ADDRSTRLEN];
  3840. rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
  3841. sizeof(addr_str));
  3842. pr_warn_ratelimited("NFSD: client %s testing state ID "
  3843. "with incorrect client ID\n", addr_str);
  3844. return status;
  3845. }
  3846. spin_lock(&cl->cl_lock);
  3847. s = find_stateid_locked(cl, stateid);
  3848. if (!s)
  3849. goto out_unlock;
  3850. status = check_stateid_generation(stateid, &s->sc_stateid, 1);
  3851. if (status)
  3852. goto out_unlock;
  3853. switch (s->sc_type) {
  3854. case NFS4_DELEG_STID:
  3855. status = nfs_ok;
  3856. break;
  3857. case NFS4_REVOKED_DELEG_STID:
  3858. status = nfserr_deleg_revoked;
  3859. break;
  3860. case NFS4_OPEN_STID:
  3861. case NFS4_LOCK_STID:
  3862. ols = openlockstateid(s);
  3863. if (ols->st_stateowner->so_is_open_owner
  3864. && !(openowner(ols->st_stateowner)->oo_flags
  3865. & NFS4_OO_CONFIRMED))
  3866. status = nfserr_bad_stateid;
  3867. else
  3868. status = nfs_ok;
  3869. break;
  3870. default:
  3871. printk("unknown stateid type %x\n", s->sc_type);
  3872. /* Fallthrough */
  3873. case NFS4_CLOSED_STID:
  3874. case NFS4_CLOSED_DELEG_STID:
  3875. status = nfserr_bad_stateid;
  3876. }
  3877. out_unlock:
  3878. spin_unlock(&cl->cl_lock);
  3879. return status;
  3880. }
  3881. static __be32
  3882. nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
  3883. stateid_t *stateid, unsigned char typemask,
  3884. struct nfs4_stid **s, struct nfsd_net *nn)
  3885. {
  3886. __be32 status;
  3887. if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
  3888. return nfserr_bad_stateid;
  3889. status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
  3890. if (status == nfserr_stale_clientid) {
  3891. if (cstate->session)
  3892. return nfserr_bad_stateid;
  3893. return nfserr_stale_stateid;
  3894. }
  3895. if (status)
  3896. return status;
  3897. *s = find_stateid_by_type(cstate->clp, stateid, typemask);
  3898. if (!*s)
  3899. return nfserr_bad_stateid;
  3900. return nfs_ok;
  3901. }
  3902. /*
  3903. * Checks for stateid operations
  3904. */
  3905. __be32
  3906. nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
  3907. stateid_t *stateid, int flags, struct file **filpp)
  3908. {
  3909. struct nfs4_stid *s;
  3910. struct nfs4_ol_stateid *stp = NULL;
  3911. struct nfs4_delegation *dp = NULL;
  3912. struct svc_fh *current_fh = &cstate->current_fh;
  3913. struct inode *ino = current_fh->fh_dentry->d_inode;
  3914. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  3915. struct file *file = NULL;
  3916. __be32 status;
  3917. if (filpp)
  3918. *filpp = NULL;
  3919. if (grace_disallows_io(net, ino))
  3920. return nfserr_grace;
  3921. if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
  3922. return check_special_stateids(net, current_fh, stateid, flags);
  3923. status = nfsd4_lookup_stateid(cstate, stateid,
  3924. NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
  3925. &s, nn);
  3926. if (status)
  3927. return status;
  3928. status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
  3929. if (status)
  3930. goto out;
  3931. switch (s->sc_type) {
  3932. case NFS4_DELEG_STID:
  3933. dp = delegstateid(s);
  3934. status = nfs4_check_delegmode(dp, flags);
  3935. if (status)
  3936. goto out;
  3937. if (filpp) {
  3938. file = dp->dl_stid.sc_file->fi_deleg_file;
  3939. if (!file) {
  3940. WARN_ON_ONCE(1);
  3941. status = nfserr_serverfault;
  3942. goto out;
  3943. }
  3944. get_file(file);
  3945. }
  3946. break;
  3947. case NFS4_OPEN_STID:
  3948. case NFS4_LOCK_STID:
  3949. stp = openlockstateid(s);
  3950. status = nfs4_check_fh(current_fh, stp);
  3951. if (status)
  3952. goto out;
  3953. if (stp->st_stateowner->so_is_open_owner
  3954. && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
  3955. goto out;
  3956. status = nfs4_check_openmode(stp, flags);
  3957. if (status)
  3958. goto out;
  3959. if (filpp) {
  3960. struct nfs4_file *fp = stp->st_stid.sc_file;
  3961. if (flags & RD_STATE)
  3962. file = find_readable_file(fp);
  3963. else
  3964. file = find_writeable_file(fp);
  3965. }
  3966. break;
  3967. default:
  3968. status = nfserr_bad_stateid;
  3969. goto out;
  3970. }
  3971. status = nfs_ok;
  3972. if (file)
  3973. *filpp = file;
  3974. out:
  3975. nfs4_put_stid(s);
  3976. return status;
  3977. }
  3978. /*
  3979. * Test if the stateid is valid
  3980. */
  3981. __be32
  3982. nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  3983. struct nfsd4_test_stateid *test_stateid)
  3984. {
  3985. struct nfsd4_test_stateid_id *stateid;
  3986. struct nfs4_client *cl = cstate->session->se_client;
  3987. list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
  3988. stateid->ts_id_status =
  3989. nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
  3990. return nfs_ok;
  3991. }
  3992. __be32
  3993. nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  3994. struct nfsd4_free_stateid *free_stateid)
  3995. {
  3996. stateid_t *stateid = &free_stateid->fr_stateid;
  3997. struct nfs4_stid *s;
  3998. struct nfs4_delegation *dp;
  3999. struct nfs4_ol_stateid *stp;
  4000. struct nfs4_client *cl = cstate->session->se_client;
  4001. __be32 ret = nfserr_bad_stateid;
  4002. spin_lock(&cl->cl_lock);
  4003. s = find_stateid_locked(cl, stateid);
  4004. if (!s)
  4005. goto out_unlock;
  4006. switch (s->sc_type) {
  4007. case NFS4_DELEG_STID:
  4008. ret = nfserr_locks_held;
  4009. break;
  4010. case NFS4_OPEN_STID:
  4011. ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
  4012. if (ret)
  4013. break;
  4014. ret = nfserr_locks_held;
  4015. break;
  4016. case NFS4_LOCK_STID:
  4017. ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
  4018. if (ret)
  4019. break;
  4020. stp = openlockstateid(s);
  4021. ret = nfserr_locks_held;
  4022. if (check_for_locks(stp->st_stid.sc_file,
  4023. lockowner(stp->st_stateowner)))
  4024. break;
  4025. unhash_lock_stateid(stp);
  4026. spin_unlock(&cl->cl_lock);
  4027. nfs4_put_stid(s);
  4028. ret = nfs_ok;
  4029. goto out;
  4030. case NFS4_REVOKED_DELEG_STID:
  4031. dp = delegstateid(s);
  4032. list_del_init(&dp->dl_recall_lru);
  4033. spin_unlock(&cl->cl_lock);
  4034. nfs4_put_stid(s);
  4035. ret = nfs_ok;
  4036. goto out;
  4037. /* Default falls through and returns nfserr_bad_stateid */
  4038. }
  4039. out_unlock:
  4040. spin_unlock(&cl->cl_lock);
  4041. out:
  4042. return ret;
  4043. }
  4044. static inline int
  4045. setlkflg (int type)
  4046. {
  4047. return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
  4048. RD_STATE : WR_STATE;
  4049. }
  4050. static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
  4051. {
  4052. struct svc_fh *current_fh = &cstate->current_fh;
  4053. struct nfs4_stateowner *sop = stp->st_stateowner;
  4054. __be32 status;
  4055. status = nfsd4_check_seqid(cstate, sop, seqid);
  4056. if (status)
  4057. return status;
  4058. if (stp->st_stid.sc_type == NFS4_CLOSED_STID
  4059. || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
  4060. /*
  4061. * "Closed" stateid's exist *only* to return
  4062. * nfserr_replay_me from the previous step, and
  4063. * revoked delegations are kept only for free_stateid.
  4064. */
  4065. return nfserr_bad_stateid;
  4066. status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
  4067. if (status)
  4068. return status;
  4069. return nfs4_check_fh(current_fh, stp);
  4070. }
  4071. /*
  4072. * Checks for sequence id mutating operations.
  4073. */
  4074. static __be32
  4075. nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
  4076. stateid_t *stateid, char typemask,
  4077. struct nfs4_ol_stateid **stpp,
  4078. struct nfsd_net *nn)
  4079. {
  4080. __be32 status;
  4081. struct nfs4_stid *s;
  4082. struct nfs4_ol_stateid *stp = NULL;
  4083. dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
  4084. seqid, STATEID_VAL(stateid));
  4085. *stpp = NULL;
  4086. status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
  4087. if (status)
  4088. return status;
  4089. stp = openlockstateid(s);
  4090. nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
  4091. status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
  4092. if (!status)
  4093. *stpp = stp;
  4094. else
  4095. nfs4_put_stid(&stp->st_stid);
  4096. return status;
  4097. }
  4098. static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
  4099. stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
  4100. {
  4101. __be32 status;
  4102. struct nfs4_openowner *oo;
  4103. struct nfs4_ol_stateid *stp;
  4104. status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
  4105. NFS4_OPEN_STID, &stp, nn);
  4106. if (status)
  4107. return status;
  4108. oo = openowner(stp->st_stateowner);
  4109. if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
  4110. nfs4_put_stid(&stp->st_stid);
  4111. return nfserr_bad_stateid;
  4112. }
  4113. *stpp = stp;
  4114. return nfs_ok;
  4115. }
  4116. __be32
  4117. nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4118. struct nfsd4_open_confirm *oc)
  4119. {
  4120. __be32 status;
  4121. struct nfs4_openowner *oo;
  4122. struct nfs4_ol_stateid *stp;
  4123. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4124. dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
  4125. cstate->current_fh.fh_dentry);
  4126. status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
  4127. if (status)
  4128. return status;
  4129. status = nfs4_preprocess_seqid_op(cstate,
  4130. oc->oc_seqid, &oc->oc_req_stateid,
  4131. NFS4_OPEN_STID, &stp, nn);
  4132. if (status)
  4133. goto out;
  4134. oo = openowner(stp->st_stateowner);
  4135. status = nfserr_bad_stateid;
  4136. if (oo->oo_flags & NFS4_OO_CONFIRMED)
  4137. goto put_stateid;
  4138. oo->oo_flags |= NFS4_OO_CONFIRMED;
  4139. update_stateid(&stp->st_stid.sc_stateid);
  4140. memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
  4141. dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
  4142. __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
  4143. nfsd4_client_record_create(oo->oo_owner.so_client);
  4144. status = nfs_ok;
  4145. put_stateid:
  4146. nfs4_put_stid(&stp->st_stid);
  4147. out:
  4148. nfsd4_bump_seqid(cstate, status);
  4149. return status;
  4150. }
  4151. static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
  4152. {
  4153. if (!test_access(access, stp))
  4154. return;
  4155. nfs4_file_put_access(stp->st_stid.sc_file, access);
  4156. clear_access(access, stp);
  4157. }
  4158. static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
  4159. {
  4160. switch (to_access) {
  4161. case NFS4_SHARE_ACCESS_READ:
  4162. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
  4163. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
  4164. break;
  4165. case NFS4_SHARE_ACCESS_WRITE:
  4166. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
  4167. nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
  4168. break;
  4169. case NFS4_SHARE_ACCESS_BOTH:
  4170. break;
  4171. default:
  4172. WARN_ON_ONCE(1);
  4173. }
  4174. }
  4175. __be32
  4176. nfsd4_open_downgrade(struct svc_rqst *rqstp,
  4177. struct nfsd4_compound_state *cstate,
  4178. struct nfsd4_open_downgrade *od)
  4179. {
  4180. __be32 status;
  4181. struct nfs4_ol_stateid *stp;
  4182. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4183. dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
  4184. cstate->current_fh.fh_dentry);
  4185. /* We don't yet support WANT bits: */
  4186. if (od->od_deleg_want)
  4187. dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
  4188. od->od_deleg_want);
  4189. status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
  4190. &od->od_stateid, &stp, nn);
  4191. if (status)
  4192. goto out;
  4193. status = nfserr_inval;
  4194. if (!test_access(od->od_share_access, stp)) {
  4195. dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
  4196. stp->st_access_bmap, od->od_share_access);
  4197. goto put_stateid;
  4198. }
  4199. if (!test_deny(od->od_share_deny, stp)) {
  4200. dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
  4201. stp->st_deny_bmap, od->od_share_deny);
  4202. goto put_stateid;
  4203. }
  4204. nfs4_stateid_downgrade(stp, od->od_share_access);
  4205. reset_union_bmap_deny(od->od_share_deny, stp);
  4206. update_stateid(&stp->st_stid.sc_stateid);
  4207. memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
  4208. status = nfs_ok;
  4209. put_stateid:
  4210. nfs4_put_stid(&stp->st_stid);
  4211. out:
  4212. nfsd4_bump_seqid(cstate, status);
  4213. return status;
  4214. }
  4215. static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
  4216. {
  4217. struct nfs4_client *clp = s->st_stid.sc_client;
  4218. LIST_HEAD(reaplist);
  4219. s->st_stid.sc_type = NFS4_CLOSED_STID;
  4220. spin_lock(&clp->cl_lock);
  4221. unhash_open_stateid(s, &reaplist);
  4222. if (clp->cl_minorversion) {
  4223. put_ol_stateid_locked(s, &reaplist);
  4224. spin_unlock(&clp->cl_lock);
  4225. free_ol_stateid_reaplist(&reaplist);
  4226. } else {
  4227. spin_unlock(&clp->cl_lock);
  4228. free_ol_stateid_reaplist(&reaplist);
  4229. move_to_close_lru(s, clp->net);
  4230. }
  4231. }
  4232. /*
  4233. * nfs4_unlock_state() called after encode
  4234. */
  4235. __be32
  4236. nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4237. struct nfsd4_close *close)
  4238. {
  4239. __be32 status;
  4240. struct nfs4_ol_stateid *stp;
  4241. struct net *net = SVC_NET(rqstp);
  4242. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  4243. dprintk("NFSD: nfsd4_close on file %pd\n",
  4244. cstate->current_fh.fh_dentry);
  4245. status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
  4246. &close->cl_stateid,
  4247. NFS4_OPEN_STID|NFS4_CLOSED_STID,
  4248. &stp, nn);
  4249. nfsd4_bump_seqid(cstate, status);
  4250. if (status)
  4251. goto out;
  4252. update_stateid(&stp->st_stid.sc_stateid);
  4253. memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
  4254. nfsd4_close_open_stateid(stp);
  4255. /* put reference from nfs4_preprocess_seqid_op */
  4256. nfs4_put_stid(&stp->st_stid);
  4257. out:
  4258. return status;
  4259. }
  4260. __be32
  4261. nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4262. struct nfsd4_delegreturn *dr)
  4263. {
  4264. struct nfs4_delegation *dp;
  4265. stateid_t *stateid = &dr->dr_stateid;
  4266. struct nfs4_stid *s;
  4267. __be32 status;
  4268. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4269. if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
  4270. return status;
  4271. status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
  4272. if (status)
  4273. goto out;
  4274. dp = delegstateid(s);
  4275. status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
  4276. if (status)
  4277. goto put_stateid;
  4278. destroy_delegation(dp);
  4279. put_stateid:
  4280. nfs4_put_stid(&dp->dl_stid);
  4281. out:
  4282. return status;
  4283. }
  4284. #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
  4285. static inline u64
  4286. end_offset(u64 start, u64 len)
  4287. {
  4288. u64 end;
  4289. end = start + len;
  4290. return end >= start ? end: NFS4_MAX_UINT64;
  4291. }
  4292. /* last octet in a range */
  4293. static inline u64
  4294. last_byte_offset(u64 start, u64 len)
  4295. {
  4296. u64 end;
  4297. WARN_ON_ONCE(!len);
  4298. end = start + len;
  4299. return end > start ? end - 1: NFS4_MAX_UINT64;
  4300. }
  4301. /*
  4302. * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
  4303. * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
  4304. * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
  4305. * locking, this prevents us from being completely protocol-compliant. The
  4306. * real solution to this problem is to start using unsigned file offsets in
  4307. * the VFS, but this is a very deep change!
  4308. */
  4309. static inline void
  4310. nfs4_transform_lock_offset(struct file_lock *lock)
  4311. {
  4312. if (lock->fl_start < 0)
  4313. lock->fl_start = OFFSET_MAX;
  4314. if (lock->fl_end < 0)
  4315. lock->fl_end = OFFSET_MAX;
  4316. }
  4317. static void nfsd4_fl_get_owner(struct file_lock *dst, struct file_lock *src)
  4318. {
  4319. struct nfs4_lockowner *lo = (struct nfs4_lockowner *)src->fl_owner;
  4320. dst->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lo->lo_owner));
  4321. }
  4322. static void nfsd4_fl_put_owner(struct file_lock *fl)
  4323. {
  4324. struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
  4325. if (lo) {
  4326. nfs4_put_stateowner(&lo->lo_owner);
  4327. fl->fl_owner = NULL;
  4328. }
  4329. }
  4330. static const struct lock_manager_operations nfsd_posix_mng_ops = {
  4331. .lm_get_owner = nfsd4_fl_get_owner,
  4332. .lm_put_owner = nfsd4_fl_put_owner,
  4333. };
  4334. static inline void
  4335. nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
  4336. {
  4337. struct nfs4_lockowner *lo;
  4338. if (fl->fl_lmops == &nfsd_posix_mng_ops) {
  4339. lo = (struct nfs4_lockowner *) fl->fl_owner;
  4340. deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
  4341. lo->lo_owner.so_owner.len, GFP_KERNEL);
  4342. if (!deny->ld_owner.data)
  4343. /* We just don't care that much */
  4344. goto nevermind;
  4345. deny->ld_owner.len = lo->lo_owner.so_owner.len;
  4346. deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
  4347. } else {
  4348. nevermind:
  4349. deny->ld_owner.len = 0;
  4350. deny->ld_owner.data = NULL;
  4351. deny->ld_clientid.cl_boot = 0;
  4352. deny->ld_clientid.cl_id = 0;
  4353. }
  4354. deny->ld_start = fl->fl_start;
  4355. deny->ld_length = NFS4_MAX_UINT64;
  4356. if (fl->fl_end != NFS4_MAX_UINT64)
  4357. deny->ld_length = fl->fl_end - fl->fl_start + 1;
  4358. deny->ld_type = NFS4_READ_LT;
  4359. if (fl->fl_type != F_RDLCK)
  4360. deny->ld_type = NFS4_WRITE_LT;
  4361. }
  4362. static struct nfs4_lockowner *
  4363. find_lockowner_str_locked(clientid_t *clid, struct xdr_netobj *owner,
  4364. struct nfs4_client *clp)
  4365. {
  4366. unsigned int strhashval = ownerstr_hashval(owner);
  4367. struct nfs4_stateowner *so;
  4368. lockdep_assert_held(&clp->cl_lock);
  4369. list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
  4370. so_strhash) {
  4371. if (so->so_is_open_owner)
  4372. continue;
  4373. if (same_owner_str(so, owner))
  4374. return lockowner(nfs4_get_stateowner(so));
  4375. }
  4376. return NULL;
  4377. }
  4378. static struct nfs4_lockowner *
  4379. find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
  4380. struct nfs4_client *clp)
  4381. {
  4382. struct nfs4_lockowner *lo;
  4383. spin_lock(&clp->cl_lock);
  4384. lo = find_lockowner_str_locked(clid, owner, clp);
  4385. spin_unlock(&clp->cl_lock);
  4386. return lo;
  4387. }
  4388. static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
  4389. {
  4390. unhash_lockowner_locked(lockowner(sop));
  4391. }
  4392. static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
  4393. {
  4394. struct nfs4_lockowner *lo = lockowner(sop);
  4395. kmem_cache_free(lockowner_slab, lo);
  4396. }
  4397. static const struct nfs4_stateowner_operations lockowner_ops = {
  4398. .so_unhash = nfs4_unhash_lockowner,
  4399. .so_free = nfs4_free_lockowner,
  4400. };
  4401. /*
  4402. * Alloc a lock owner structure.
  4403. * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
  4404. * occurred.
  4405. *
  4406. * strhashval = ownerstr_hashval
  4407. */
  4408. static struct nfs4_lockowner *
  4409. alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
  4410. struct nfs4_ol_stateid *open_stp,
  4411. struct nfsd4_lock *lock)
  4412. {
  4413. struct nfs4_lockowner *lo, *ret;
  4414. lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
  4415. if (!lo)
  4416. return NULL;
  4417. INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
  4418. lo->lo_owner.so_is_open_owner = 0;
  4419. lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
  4420. lo->lo_owner.so_ops = &lockowner_ops;
  4421. spin_lock(&clp->cl_lock);
  4422. ret = find_lockowner_str_locked(&clp->cl_clientid,
  4423. &lock->lk_new_owner, clp);
  4424. if (ret == NULL) {
  4425. list_add(&lo->lo_owner.so_strhash,
  4426. &clp->cl_ownerstr_hashtbl[strhashval]);
  4427. ret = lo;
  4428. } else
  4429. nfs4_free_lockowner(&lo->lo_owner);
  4430. spin_unlock(&clp->cl_lock);
  4431. return lo;
  4432. }
  4433. static void
  4434. init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
  4435. struct nfs4_file *fp, struct inode *inode,
  4436. struct nfs4_ol_stateid *open_stp)
  4437. {
  4438. struct nfs4_client *clp = lo->lo_owner.so_client;
  4439. lockdep_assert_held(&clp->cl_lock);
  4440. atomic_inc(&stp->st_stid.sc_count);
  4441. stp->st_stid.sc_type = NFS4_LOCK_STID;
  4442. stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
  4443. get_nfs4_file(fp);
  4444. stp->st_stid.sc_file = fp;
  4445. stp->st_stid.sc_free = nfs4_free_lock_stateid;
  4446. stp->st_access_bmap = 0;
  4447. stp->st_deny_bmap = open_stp->st_deny_bmap;
  4448. stp->st_openstp = open_stp;
  4449. list_add(&stp->st_locks, &open_stp->st_locks);
  4450. list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
  4451. spin_lock(&fp->fi_lock);
  4452. list_add(&stp->st_perfile, &fp->fi_stateids);
  4453. spin_unlock(&fp->fi_lock);
  4454. }
  4455. static struct nfs4_ol_stateid *
  4456. find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
  4457. {
  4458. struct nfs4_ol_stateid *lst;
  4459. struct nfs4_client *clp = lo->lo_owner.so_client;
  4460. lockdep_assert_held(&clp->cl_lock);
  4461. list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
  4462. if (lst->st_stid.sc_file == fp) {
  4463. atomic_inc(&lst->st_stid.sc_count);
  4464. return lst;
  4465. }
  4466. }
  4467. return NULL;
  4468. }
  4469. static struct nfs4_ol_stateid *
  4470. find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
  4471. struct inode *inode, struct nfs4_ol_stateid *ost,
  4472. bool *new)
  4473. {
  4474. struct nfs4_stid *ns = NULL;
  4475. struct nfs4_ol_stateid *lst;
  4476. struct nfs4_openowner *oo = openowner(ost->st_stateowner);
  4477. struct nfs4_client *clp = oo->oo_owner.so_client;
  4478. spin_lock(&clp->cl_lock);
  4479. lst = find_lock_stateid(lo, fi);
  4480. if (lst == NULL) {
  4481. spin_unlock(&clp->cl_lock);
  4482. ns = nfs4_alloc_stid(clp, stateid_slab);
  4483. if (ns == NULL)
  4484. return NULL;
  4485. spin_lock(&clp->cl_lock);
  4486. lst = find_lock_stateid(lo, fi);
  4487. if (likely(!lst)) {
  4488. lst = openlockstateid(ns);
  4489. init_lock_stateid(lst, lo, fi, inode, ost);
  4490. ns = NULL;
  4491. *new = true;
  4492. }
  4493. }
  4494. spin_unlock(&clp->cl_lock);
  4495. if (ns)
  4496. nfs4_put_stid(ns);
  4497. return lst;
  4498. }
  4499. static int
  4500. check_lock_length(u64 offset, u64 length)
  4501. {
  4502. return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
  4503. LOFF_OVERFLOW(offset, length)));
  4504. }
  4505. static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
  4506. {
  4507. struct nfs4_file *fp = lock_stp->st_stid.sc_file;
  4508. lockdep_assert_held(&fp->fi_lock);
  4509. if (test_access(access, lock_stp))
  4510. return;
  4511. __nfs4_file_get_access(fp, access);
  4512. set_access(access, lock_stp);
  4513. }
  4514. static __be32
  4515. lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
  4516. struct nfs4_ol_stateid *ost,
  4517. struct nfsd4_lock *lock,
  4518. struct nfs4_ol_stateid **lst, bool *new)
  4519. {
  4520. __be32 status;
  4521. struct nfs4_file *fi = ost->st_stid.sc_file;
  4522. struct nfs4_openowner *oo = openowner(ost->st_stateowner);
  4523. struct nfs4_client *cl = oo->oo_owner.so_client;
  4524. struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
  4525. struct nfs4_lockowner *lo;
  4526. unsigned int strhashval;
  4527. lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, cl);
  4528. if (!lo) {
  4529. strhashval = ownerstr_hashval(&lock->v.new.owner);
  4530. lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
  4531. if (lo == NULL)
  4532. return nfserr_jukebox;
  4533. } else {
  4534. /* with an existing lockowner, seqids must be the same */
  4535. status = nfserr_bad_seqid;
  4536. if (!cstate->minorversion &&
  4537. lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
  4538. goto out;
  4539. }
  4540. *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
  4541. if (*lst == NULL) {
  4542. status = nfserr_jukebox;
  4543. goto out;
  4544. }
  4545. status = nfs_ok;
  4546. out:
  4547. nfs4_put_stateowner(&lo->lo_owner);
  4548. return status;
  4549. }
  4550. /*
  4551. * LOCK operation
  4552. */
  4553. __be32
  4554. nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4555. struct nfsd4_lock *lock)
  4556. {
  4557. struct nfs4_openowner *open_sop = NULL;
  4558. struct nfs4_lockowner *lock_sop = NULL;
  4559. struct nfs4_ol_stateid *lock_stp = NULL;
  4560. struct nfs4_ol_stateid *open_stp = NULL;
  4561. struct nfs4_file *fp;
  4562. struct file *filp = NULL;
  4563. struct file_lock *file_lock = NULL;
  4564. struct file_lock *conflock = NULL;
  4565. __be32 status = 0;
  4566. int lkflg;
  4567. int err;
  4568. bool new = false;
  4569. struct net *net = SVC_NET(rqstp);
  4570. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  4571. dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
  4572. (long long) lock->lk_offset,
  4573. (long long) lock->lk_length);
  4574. if (check_lock_length(lock->lk_offset, lock->lk_length))
  4575. return nfserr_inval;
  4576. if ((status = fh_verify(rqstp, &cstate->current_fh,
  4577. S_IFREG, NFSD_MAY_LOCK))) {
  4578. dprintk("NFSD: nfsd4_lock: permission denied!\n");
  4579. return status;
  4580. }
  4581. if (lock->lk_is_new) {
  4582. if (nfsd4_has_session(cstate))
  4583. /* See rfc 5661 18.10.3: given clientid is ignored: */
  4584. memcpy(&lock->v.new.clientid,
  4585. &cstate->session->se_client->cl_clientid,
  4586. sizeof(clientid_t));
  4587. status = nfserr_stale_clientid;
  4588. if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
  4589. goto out;
  4590. /* validate and update open stateid and open seqid */
  4591. status = nfs4_preprocess_confirmed_seqid_op(cstate,
  4592. lock->lk_new_open_seqid,
  4593. &lock->lk_new_open_stateid,
  4594. &open_stp, nn);
  4595. if (status)
  4596. goto out;
  4597. open_sop = openowner(open_stp->st_stateowner);
  4598. status = nfserr_bad_stateid;
  4599. if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
  4600. &lock->v.new.clientid))
  4601. goto out;
  4602. status = lookup_or_create_lock_state(cstate, open_stp, lock,
  4603. &lock_stp, &new);
  4604. } else {
  4605. status = nfs4_preprocess_seqid_op(cstate,
  4606. lock->lk_old_lock_seqid,
  4607. &lock->lk_old_lock_stateid,
  4608. NFS4_LOCK_STID, &lock_stp, nn);
  4609. }
  4610. if (status)
  4611. goto out;
  4612. lock_sop = lockowner(lock_stp->st_stateowner);
  4613. lkflg = setlkflg(lock->lk_type);
  4614. status = nfs4_check_openmode(lock_stp, lkflg);
  4615. if (status)
  4616. goto out;
  4617. status = nfserr_grace;
  4618. if (locks_in_grace(net) && !lock->lk_reclaim)
  4619. goto out;
  4620. status = nfserr_no_grace;
  4621. if (!locks_in_grace(net) && lock->lk_reclaim)
  4622. goto out;
  4623. file_lock = locks_alloc_lock();
  4624. if (!file_lock) {
  4625. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  4626. status = nfserr_jukebox;
  4627. goto out;
  4628. }
  4629. fp = lock_stp->st_stid.sc_file;
  4630. switch (lock->lk_type) {
  4631. case NFS4_READ_LT:
  4632. case NFS4_READW_LT:
  4633. spin_lock(&fp->fi_lock);
  4634. filp = find_readable_file_locked(fp);
  4635. if (filp)
  4636. get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
  4637. spin_unlock(&fp->fi_lock);
  4638. file_lock->fl_type = F_RDLCK;
  4639. break;
  4640. case NFS4_WRITE_LT:
  4641. case NFS4_WRITEW_LT:
  4642. spin_lock(&fp->fi_lock);
  4643. filp = find_writeable_file_locked(fp);
  4644. if (filp)
  4645. get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
  4646. spin_unlock(&fp->fi_lock);
  4647. file_lock->fl_type = F_WRLCK;
  4648. break;
  4649. default:
  4650. status = nfserr_inval;
  4651. goto out;
  4652. }
  4653. if (!filp) {
  4654. status = nfserr_openmode;
  4655. goto out;
  4656. }
  4657. file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
  4658. file_lock->fl_pid = current->tgid;
  4659. file_lock->fl_file = filp;
  4660. file_lock->fl_flags = FL_POSIX;
  4661. file_lock->fl_lmops = &nfsd_posix_mng_ops;
  4662. file_lock->fl_start = lock->lk_offset;
  4663. file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
  4664. nfs4_transform_lock_offset(file_lock);
  4665. conflock = locks_alloc_lock();
  4666. if (!conflock) {
  4667. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  4668. status = nfserr_jukebox;
  4669. goto out;
  4670. }
  4671. err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
  4672. switch (-err) {
  4673. case 0: /* success! */
  4674. update_stateid(&lock_stp->st_stid.sc_stateid);
  4675. memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
  4676. sizeof(stateid_t));
  4677. status = 0;
  4678. break;
  4679. case (EAGAIN): /* conflock holds conflicting lock */
  4680. status = nfserr_denied;
  4681. dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
  4682. nfs4_set_lock_denied(conflock, &lock->lk_denied);
  4683. break;
  4684. case (EDEADLK):
  4685. status = nfserr_deadlock;
  4686. break;
  4687. default:
  4688. dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
  4689. status = nfserrno(err);
  4690. break;
  4691. }
  4692. out:
  4693. if (filp)
  4694. fput(filp);
  4695. if (lock_stp) {
  4696. /* Bump seqid manually if the 4.0 replay owner is openowner */
  4697. if (cstate->replay_owner &&
  4698. cstate->replay_owner != &lock_sop->lo_owner &&
  4699. seqid_mutating_err(ntohl(status)))
  4700. lock_sop->lo_owner.so_seqid++;
  4701. /*
  4702. * If this is a new, never-before-used stateid, and we are
  4703. * returning an error, then just go ahead and release it.
  4704. */
  4705. if (status && new)
  4706. release_lock_stateid(lock_stp);
  4707. nfs4_put_stid(&lock_stp->st_stid);
  4708. }
  4709. if (open_stp)
  4710. nfs4_put_stid(&open_stp->st_stid);
  4711. nfsd4_bump_seqid(cstate, status);
  4712. if (file_lock)
  4713. locks_free_lock(file_lock);
  4714. if (conflock)
  4715. locks_free_lock(conflock);
  4716. return status;
  4717. }
  4718. /*
  4719. * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
  4720. * so we do a temporary open here just to get an open file to pass to
  4721. * vfs_test_lock. (Arguably perhaps test_lock should be done with an
  4722. * inode operation.)
  4723. */
  4724. static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
  4725. {
  4726. struct file *file;
  4727. __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
  4728. if (!err) {
  4729. err = nfserrno(vfs_test_lock(file, lock));
  4730. nfsd_close(file);
  4731. }
  4732. return err;
  4733. }
  4734. /*
  4735. * LOCKT operation
  4736. */
  4737. __be32
  4738. nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4739. struct nfsd4_lockt *lockt)
  4740. {
  4741. struct file_lock *file_lock = NULL;
  4742. struct nfs4_lockowner *lo = NULL;
  4743. __be32 status;
  4744. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4745. if (locks_in_grace(SVC_NET(rqstp)))
  4746. return nfserr_grace;
  4747. if (check_lock_length(lockt->lt_offset, lockt->lt_length))
  4748. return nfserr_inval;
  4749. if (!nfsd4_has_session(cstate)) {
  4750. status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
  4751. if (status)
  4752. goto out;
  4753. }
  4754. if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
  4755. goto out;
  4756. file_lock = locks_alloc_lock();
  4757. if (!file_lock) {
  4758. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  4759. status = nfserr_jukebox;
  4760. goto out;
  4761. }
  4762. switch (lockt->lt_type) {
  4763. case NFS4_READ_LT:
  4764. case NFS4_READW_LT:
  4765. file_lock->fl_type = F_RDLCK;
  4766. break;
  4767. case NFS4_WRITE_LT:
  4768. case NFS4_WRITEW_LT:
  4769. file_lock->fl_type = F_WRLCK;
  4770. break;
  4771. default:
  4772. dprintk("NFSD: nfs4_lockt: bad lock type!\n");
  4773. status = nfserr_inval;
  4774. goto out;
  4775. }
  4776. lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner,
  4777. cstate->clp);
  4778. if (lo)
  4779. file_lock->fl_owner = (fl_owner_t)lo;
  4780. file_lock->fl_pid = current->tgid;
  4781. file_lock->fl_flags = FL_POSIX;
  4782. file_lock->fl_start = lockt->lt_offset;
  4783. file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
  4784. nfs4_transform_lock_offset(file_lock);
  4785. status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
  4786. if (status)
  4787. goto out;
  4788. if (file_lock->fl_type != F_UNLCK) {
  4789. status = nfserr_denied;
  4790. nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
  4791. }
  4792. out:
  4793. if (lo)
  4794. nfs4_put_stateowner(&lo->lo_owner);
  4795. if (file_lock)
  4796. locks_free_lock(file_lock);
  4797. return status;
  4798. }
  4799. __be32
  4800. nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
  4801. struct nfsd4_locku *locku)
  4802. {
  4803. struct nfs4_ol_stateid *stp;
  4804. struct file *filp = NULL;
  4805. struct file_lock *file_lock = NULL;
  4806. __be32 status;
  4807. int err;
  4808. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4809. dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
  4810. (long long) locku->lu_offset,
  4811. (long long) locku->lu_length);
  4812. if (check_lock_length(locku->lu_offset, locku->lu_length))
  4813. return nfserr_inval;
  4814. status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
  4815. &locku->lu_stateid, NFS4_LOCK_STID,
  4816. &stp, nn);
  4817. if (status)
  4818. goto out;
  4819. filp = find_any_file(stp->st_stid.sc_file);
  4820. if (!filp) {
  4821. status = nfserr_lock_range;
  4822. goto put_stateid;
  4823. }
  4824. file_lock = locks_alloc_lock();
  4825. if (!file_lock) {
  4826. dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
  4827. status = nfserr_jukebox;
  4828. goto fput;
  4829. }
  4830. file_lock->fl_type = F_UNLCK;
  4831. file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
  4832. file_lock->fl_pid = current->tgid;
  4833. file_lock->fl_file = filp;
  4834. file_lock->fl_flags = FL_POSIX;
  4835. file_lock->fl_lmops = &nfsd_posix_mng_ops;
  4836. file_lock->fl_start = locku->lu_offset;
  4837. file_lock->fl_end = last_byte_offset(locku->lu_offset,
  4838. locku->lu_length);
  4839. nfs4_transform_lock_offset(file_lock);
  4840. err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
  4841. if (err) {
  4842. dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
  4843. goto out_nfserr;
  4844. }
  4845. update_stateid(&stp->st_stid.sc_stateid);
  4846. memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
  4847. fput:
  4848. fput(filp);
  4849. put_stateid:
  4850. nfs4_put_stid(&stp->st_stid);
  4851. out:
  4852. nfsd4_bump_seqid(cstate, status);
  4853. if (file_lock)
  4854. locks_free_lock(file_lock);
  4855. return status;
  4856. out_nfserr:
  4857. status = nfserrno(err);
  4858. goto fput;
  4859. }
  4860. /*
  4861. * returns
  4862. * true: locks held by lockowner
  4863. * false: no locks held by lockowner
  4864. */
  4865. static bool
  4866. check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
  4867. {
  4868. struct file_lock **flpp;
  4869. int status = false;
  4870. struct file *filp = find_any_file(fp);
  4871. struct inode *inode;
  4872. if (!filp) {
  4873. /* Any valid lock stateid should have some sort of access */
  4874. WARN_ON_ONCE(1);
  4875. return status;
  4876. }
  4877. inode = file_inode(filp);
  4878. spin_lock(&inode->i_lock);
  4879. for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
  4880. if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
  4881. status = true;
  4882. break;
  4883. }
  4884. }
  4885. spin_unlock(&inode->i_lock);
  4886. fput(filp);
  4887. return status;
  4888. }
  4889. __be32
  4890. nfsd4_release_lockowner(struct svc_rqst *rqstp,
  4891. struct nfsd4_compound_state *cstate,
  4892. struct nfsd4_release_lockowner *rlockowner)
  4893. {
  4894. clientid_t *clid = &rlockowner->rl_clientid;
  4895. struct nfs4_stateowner *sop;
  4896. struct nfs4_lockowner *lo = NULL;
  4897. struct nfs4_ol_stateid *stp;
  4898. struct xdr_netobj *owner = &rlockowner->rl_owner;
  4899. unsigned int hashval = ownerstr_hashval(owner);
  4900. __be32 status;
  4901. struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
  4902. struct nfs4_client *clp;
  4903. dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
  4904. clid->cl_boot, clid->cl_id);
  4905. status = lookup_clientid(clid, cstate, nn);
  4906. if (status)
  4907. return status;
  4908. clp = cstate->clp;
  4909. /* Find the matching lock stateowner */
  4910. spin_lock(&clp->cl_lock);
  4911. list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
  4912. so_strhash) {
  4913. if (sop->so_is_open_owner || !same_owner_str(sop, owner))
  4914. continue;
  4915. /* see if there are still any locks associated with it */
  4916. lo = lockowner(sop);
  4917. list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
  4918. if (check_for_locks(stp->st_stid.sc_file, lo)) {
  4919. status = nfserr_locks_held;
  4920. spin_unlock(&clp->cl_lock);
  4921. return status;
  4922. }
  4923. }
  4924. nfs4_get_stateowner(sop);
  4925. break;
  4926. }
  4927. spin_unlock(&clp->cl_lock);
  4928. if (lo)
  4929. release_lockowner(lo);
  4930. return status;
  4931. }
  4932. static inline struct nfs4_client_reclaim *
  4933. alloc_reclaim(void)
  4934. {
  4935. return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
  4936. }
  4937. bool
  4938. nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
  4939. {
  4940. struct nfs4_client_reclaim *crp;
  4941. crp = nfsd4_find_reclaim_client(name, nn);
  4942. return (crp && crp->cr_clp);
  4943. }
  4944. /*
  4945. * failure => all reset bets are off, nfserr_no_grace...
  4946. */
  4947. struct nfs4_client_reclaim *
  4948. nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
  4949. {
  4950. unsigned int strhashval;
  4951. struct nfs4_client_reclaim *crp;
  4952. dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
  4953. crp = alloc_reclaim();
  4954. if (crp) {
  4955. strhashval = clientstr_hashval(name);
  4956. INIT_LIST_HEAD(&crp->cr_strhash);
  4957. list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
  4958. memcpy(crp->cr_recdir, name, HEXDIR_LEN);
  4959. crp->cr_clp = NULL;
  4960. nn->reclaim_str_hashtbl_size++;
  4961. }
  4962. return crp;
  4963. }
  4964. void
  4965. nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
  4966. {
  4967. list_del(&crp->cr_strhash);
  4968. kfree(crp);
  4969. nn->reclaim_str_hashtbl_size--;
  4970. }
  4971. void
  4972. nfs4_release_reclaim(struct nfsd_net *nn)
  4973. {
  4974. struct nfs4_client_reclaim *crp = NULL;
  4975. int i;
  4976. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  4977. while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
  4978. crp = list_entry(nn->reclaim_str_hashtbl[i].next,
  4979. struct nfs4_client_reclaim, cr_strhash);
  4980. nfs4_remove_reclaim_record(crp, nn);
  4981. }
  4982. }
  4983. WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
  4984. }
  4985. /*
  4986. * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
  4987. struct nfs4_client_reclaim *
  4988. nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
  4989. {
  4990. unsigned int strhashval;
  4991. struct nfs4_client_reclaim *crp = NULL;
  4992. dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
  4993. strhashval = clientstr_hashval(recdir);
  4994. list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
  4995. if (same_name(crp->cr_recdir, recdir)) {
  4996. return crp;
  4997. }
  4998. }
  4999. return NULL;
  5000. }
  5001. /*
  5002. * Called from OPEN. Look for clientid in reclaim list.
  5003. */
  5004. __be32
  5005. nfs4_check_open_reclaim(clientid_t *clid,
  5006. struct nfsd4_compound_state *cstate,
  5007. struct nfsd_net *nn)
  5008. {
  5009. __be32 status;
  5010. /* find clientid in conf_id_hashtbl */
  5011. status = lookup_clientid(clid, cstate, nn);
  5012. if (status)
  5013. return nfserr_reclaim_bad;
  5014. if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
  5015. return nfserr_no_grace;
  5016. if (nfsd4_client_record_check(cstate->clp))
  5017. return nfserr_reclaim_bad;
  5018. return nfs_ok;
  5019. }
  5020. #ifdef CONFIG_NFSD_FAULT_INJECTION
  5021. static inline void
  5022. put_client(struct nfs4_client *clp)
  5023. {
  5024. atomic_dec(&clp->cl_refcount);
  5025. }
  5026. static struct nfs4_client *
  5027. nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
  5028. {
  5029. struct nfs4_client *clp;
  5030. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5031. nfsd_net_id);
  5032. if (!nfsd_netns_ready(nn))
  5033. return NULL;
  5034. list_for_each_entry(clp, &nn->client_lru, cl_lru) {
  5035. if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
  5036. return clp;
  5037. }
  5038. return NULL;
  5039. }
  5040. u64
  5041. nfsd_inject_print_clients(void)
  5042. {
  5043. struct nfs4_client *clp;
  5044. u64 count = 0;
  5045. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5046. nfsd_net_id);
  5047. char buf[INET6_ADDRSTRLEN];
  5048. if (!nfsd_netns_ready(nn))
  5049. return 0;
  5050. spin_lock(&nn->client_lock);
  5051. list_for_each_entry(clp, &nn->client_lru, cl_lru) {
  5052. rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
  5053. pr_info("NFS Client: %s\n", buf);
  5054. ++count;
  5055. }
  5056. spin_unlock(&nn->client_lock);
  5057. return count;
  5058. }
  5059. u64
  5060. nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
  5061. {
  5062. u64 count = 0;
  5063. struct nfs4_client *clp;
  5064. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5065. nfsd_net_id);
  5066. if (!nfsd_netns_ready(nn))
  5067. return count;
  5068. spin_lock(&nn->client_lock);
  5069. clp = nfsd_find_client(addr, addr_size);
  5070. if (clp) {
  5071. if (mark_client_expired_locked(clp) == nfs_ok)
  5072. ++count;
  5073. else
  5074. clp = NULL;
  5075. }
  5076. spin_unlock(&nn->client_lock);
  5077. if (clp)
  5078. expire_client(clp);
  5079. return count;
  5080. }
  5081. u64
  5082. nfsd_inject_forget_clients(u64 max)
  5083. {
  5084. u64 count = 0;
  5085. struct nfs4_client *clp, *next;
  5086. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5087. nfsd_net_id);
  5088. LIST_HEAD(reaplist);
  5089. if (!nfsd_netns_ready(nn))
  5090. return count;
  5091. spin_lock(&nn->client_lock);
  5092. list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
  5093. if (mark_client_expired_locked(clp) == nfs_ok) {
  5094. list_add(&clp->cl_lru, &reaplist);
  5095. if (max != 0 && ++count >= max)
  5096. break;
  5097. }
  5098. }
  5099. spin_unlock(&nn->client_lock);
  5100. list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
  5101. expire_client(clp);
  5102. return count;
  5103. }
  5104. static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
  5105. const char *type)
  5106. {
  5107. char buf[INET6_ADDRSTRLEN];
  5108. rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
  5109. printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
  5110. }
  5111. static void
  5112. nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
  5113. struct list_head *collect)
  5114. {
  5115. struct nfs4_client *clp = lst->st_stid.sc_client;
  5116. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5117. nfsd_net_id);
  5118. if (!collect)
  5119. return;
  5120. lockdep_assert_held(&nn->client_lock);
  5121. atomic_inc(&clp->cl_refcount);
  5122. list_add(&lst->st_locks, collect);
  5123. }
  5124. static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
  5125. struct list_head *collect,
  5126. void (*func)(struct nfs4_ol_stateid *))
  5127. {
  5128. struct nfs4_openowner *oop;
  5129. struct nfs4_ol_stateid *stp, *st_next;
  5130. struct nfs4_ol_stateid *lst, *lst_next;
  5131. u64 count = 0;
  5132. spin_lock(&clp->cl_lock);
  5133. list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
  5134. list_for_each_entry_safe(stp, st_next,
  5135. &oop->oo_owner.so_stateids, st_perstateowner) {
  5136. list_for_each_entry_safe(lst, lst_next,
  5137. &stp->st_locks, st_locks) {
  5138. if (func) {
  5139. func(lst);
  5140. nfsd_inject_add_lock_to_list(lst,
  5141. collect);
  5142. }
  5143. ++count;
  5144. /*
  5145. * Despite the fact that these functions deal
  5146. * with 64-bit integers for "count", we must
  5147. * ensure that it doesn't blow up the
  5148. * clp->cl_refcount. Throw a warning if we
  5149. * start to approach INT_MAX here.
  5150. */
  5151. WARN_ON_ONCE(count == (INT_MAX / 2));
  5152. if (count == max)
  5153. goto out;
  5154. }
  5155. }
  5156. }
  5157. out:
  5158. spin_unlock(&clp->cl_lock);
  5159. return count;
  5160. }
  5161. static u64
  5162. nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
  5163. u64 max)
  5164. {
  5165. return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
  5166. }
  5167. static u64
  5168. nfsd_print_client_locks(struct nfs4_client *clp)
  5169. {
  5170. u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
  5171. nfsd_print_count(clp, count, "locked files");
  5172. return count;
  5173. }
  5174. u64
  5175. nfsd_inject_print_locks(void)
  5176. {
  5177. struct nfs4_client *clp;
  5178. u64 count = 0;
  5179. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5180. nfsd_net_id);
  5181. if (!nfsd_netns_ready(nn))
  5182. return 0;
  5183. spin_lock(&nn->client_lock);
  5184. list_for_each_entry(clp, &nn->client_lru, cl_lru)
  5185. count += nfsd_print_client_locks(clp);
  5186. spin_unlock(&nn->client_lock);
  5187. return count;
  5188. }
  5189. static void
  5190. nfsd_reap_locks(struct list_head *reaplist)
  5191. {
  5192. struct nfs4_client *clp;
  5193. struct nfs4_ol_stateid *stp, *next;
  5194. list_for_each_entry_safe(stp, next, reaplist, st_locks) {
  5195. list_del_init(&stp->st_locks);
  5196. clp = stp->st_stid.sc_client;
  5197. nfs4_put_stid(&stp->st_stid);
  5198. put_client(clp);
  5199. }
  5200. }
  5201. u64
  5202. nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
  5203. {
  5204. unsigned int count = 0;
  5205. struct nfs4_client *clp;
  5206. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5207. nfsd_net_id);
  5208. LIST_HEAD(reaplist);
  5209. if (!nfsd_netns_ready(nn))
  5210. return count;
  5211. spin_lock(&nn->client_lock);
  5212. clp = nfsd_find_client(addr, addr_size);
  5213. if (clp)
  5214. count = nfsd_collect_client_locks(clp, &reaplist, 0);
  5215. spin_unlock(&nn->client_lock);
  5216. nfsd_reap_locks(&reaplist);
  5217. return count;
  5218. }
  5219. u64
  5220. nfsd_inject_forget_locks(u64 max)
  5221. {
  5222. u64 count = 0;
  5223. struct nfs4_client *clp;
  5224. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5225. nfsd_net_id);
  5226. LIST_HEAD(reaplist);
  5227. if (!nfsd_netns_ready(nn))
  5228. return count;
  5229. spin_lock(&nn->client_lock);
  5230. list_for_each_entry(clp, &nn->client_lru, cl_lru) {
  5231. count += nfsd_collect_client_locks(clp, &reaplist, max - count);
  5232. if (max != 0 && count >= max)
  5233. break;
  5234. }
  5235. spin_unlock(&nn->client_lock);
  5236. nfsd_reap_locks(&reaplist);
  5237. return count;
  5238. }
  5239. static u64
  5240. nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
  5241. struct list_head *collect,
  5242. void (*func)(struct nfs4_openowner *))
  5243. {
  5244. struct nfs4_openowner *oop, *next;
  5245. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5246. nfsd_net_id);
  5247. u64 count = 0;
  5248. lockdep_assert_held(&nn->client_lock);
  5249. spin_lock(&clp->cl_lock);
  5250. list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
  5251. if (func) {
  5252. func(oop);
  5253. if (collect) {
  5254. atomic_inc(&clp->cl_refcount);
  5255. list_add(&oop->oo_perclient, collect);
  5256. }
  5257. }
  5258. ++count;
  5259. /*
  5260. * Despite the fact that these functions deal with
  5261. * 64-bit integers for "count", we must ensure that
  5262. * it doesn't blow up the clp->cl_refcount. Throw a
  5263. * warning if we start to approach INT_MAX here.
  5264. */
  5265. WARN_ON_ONCE(count == (INT_MAX / 2));
  5266. if (count == max)
  5267. break;
  5268. }
  5269. spin_unlock(&clp->cl_lock);
  5270. return count;
  5271. }
  5272. static u64
  5273. nfsd_print_client_openowners(struct nfs4_client *clp)
  5274. {
  5275. u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
  5276. nfsd_print_count(clp, count, "openowners");
  5277. return count;
  5278. }
  5279. static u64
  5280. nfsd_collect_client_openowners(struct nfs4_client *clp,
  5281. struct list_head *collect, u64 max)
  5282. {
  5283. return nfsd_foreach_client_openowner(clp, max, collect,
  5284. unhash_openowner_locked);
  5285. }
  5286. u64
  5287. nfsd_inject_print_openowners(void)
  5288. {
  5289. struct nfs4_client *clp;
  5290. u64 count = 0;
  5291. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5292. nfsd_net_id);
  5293. if (!nfsd_netns_ready(nn))
  5294. return 0;
  5295. spin_lock(&nn->client_lock);
  5296. list_for_each_entry(clp, &nn->client_lru, cl_lru)
  5297. count += nfsd_print_client_openowners(clp);
  5298. spin_unlock(&nn->client_lock);
  5299. return count;
  5300. }
  5301. static void
  5302. nfsd_reap_openowners(struct list_head *reaplist)
  5303. {
  5304. struct nfs4_client *clp;
  5305. struct nfs4_openowner *oop, *next;
  5306. list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
  5307. list_del_init(&oop->oo_perclient);
  5308. clp = oop->oo_owner.so_client;
  5309. release_openowner(oop);
  5310. put_client(clp);
  5311. }
  5312. }
  5313. u64
  5314. nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
  5315. size_t addr_size)
  5316. {
  5317. unsigned int count = 0;
  5318. struct nfs4_client *clp;
  5319. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5320. nfsd_net_id);
  5321. LIST_HEAD(reaplist);
  5322. if (!nfsd_netns_ready(nn))
  5323. return count;
  5324. spin_lock(&nn->client_lock);
  5325. clp = nfsd_find_client(addr, addr_size);
  5326. if (clp)
  5327. count = nfsd_collect_client_openowners(clp, &reaplist, 0);
  5328. spin_unlock(&nn->client_lock);
  5329. nfsd_reap_openowners(&reaplist);
  5330. return count;
  5331. }
  5332. u64
  5333. nfsd_inject_forget_openowners(u64 max)
  5334. {
  5335. u64 count = 0;
  5336. struct nfs4_client *clp;
  5337. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5338. nfsd_net_id);
  5339. LIST_HEAD(reaplist);
  5340. if (!nfsd_netns_ready(nn))
  5341. return count;
  5342. spin_lock(&nn->client_lock);
  5343. list_for_each_entry(clp, &nn->client_lru, cl_lru) {
  5344. count += nfsd_collect_client_openowners(clp, &reaplist,
  5345. max - count);
  5346. if (max != 0 && count >= max)
  5347. break;
  5348. }
  5349. spin_unlock(&nn->client_lock);
  5350. nfsd_reap_openowners(&reaplist);
  5351. return count;
  5352. }
  5353. static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
  5354. struct list_head *victims)
  5355. {
  5356. struct nfs4_delegation *dp, *next;
  5357. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5358. nfsd_net_id);
  5359. u64 count = 0;
  5360. lockdep_assert_held(&nn->client_lock);
  5361. spin_lock(&state_lock);
  5362. list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
  5363. if (victims) {
  5364. /*
  5365. * It's not safe to mess with delegations that have a
  5366. * non-zero dl_time. They might have already been broken
  5367. * and could be processed by the laundromat outside of
  5368. * the state_lock. Just leave them be.
  5369. */
  5370. if (dp->dl_time != 0)
  5371. continue;
  5372. atomic_inc(&clp->cl_refcount);
  5373. unhash_delegation_locked(dp);
  5374. list_add(&dp->dl_recall_lru, victims);
  5375. }
  5376. ++count;
  5377. /*
  5378. * Despite the fact that these functions deal with
  5379. * 64-bit integers for "count", we must ensure that
  5380. * it doesn't blow up the clp->cl_refcount. Throw a
  5381. * warning if we start to approach INT_MAX here.
  5382. */
  5383. WARN_ON_ONCE(count == (INT_MAX / 2));
  5384. if (count == max)
  5385. break;
  5386. }
  5387. spin_unlock(&state_lock);
  5388. return count;
  5389. }
  5390. static u64
  5391. nfsd_print_client_delegations(struct nfs4_client *clp)
  5392. {
  5393. u64 count = nfsd_find_all_delegations(clp, 0, NULL);
  5394. nfsd_print_count(clp, count, "delegations");
  5395. return count;
  5396. }
  5397. u64
  5398. nfsd_inject_print_delegations(void)
  5399. {
  5400. struct nfs4_client *clp;
  5401. u64 count = 0;
  5402. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5403. nfsd_net_id);
  5404. if (!nfsd_netns_ready(nn))
  5405. return 0;
  5406. spin_lock(&nn->client_lock);
  5407. list_for_each_entry(clp, &nn->client_lru, cl_lru)
  5408. count += nfsd_print_client_delegations(clp);
  5409. spin_unlock(&nn->client_lock);
  5410. return count;
  5411. }
  5412. static void
  5413. nfsd_forget_delegations(struct list_head *reaplist)
  5414. {
  5415. struct nfs4_client *clp;
  5416. struct nfs4_delegation *dp, *next;
  5417. list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
  5418. list_del_init(&dp->dl_recall_lru);
  5419. clp = dp->dl_stid.sc_client;
  5420. revoke_delegation(dp);
  5421. put_client(clp);
  5422. }
  5423. }
  5424. u64
  5425. nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
  5426. size_t addr_size)
  5427. {
  5428. u64 count = 0;
  5429. struct nfs4_client *clp;
  5430. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5431. nfsd_net_id);
  5432. LIST_HEAD(reaplist);
  5433. if (!nfsd_netns_ready(nn))
  5434. return count;
  5435. spin_lock(&nn->client_lock);
  5436. clp = nfsd_find_client(addr, addr_size);
  5437. if (clp)
  5438. count = nfsd_find_all_delegations(clp, 0, &reaplist);
  5439. spin_unlock(&nn->client_lock);
  5440. nfsd_forget_delegations(&reaplist);
  5441. return count;
  5442. }
  5443. u64
  5444. nfsd_inject_forget_delegations(u64 max)
  5445. {
  5446. u64 count = 0;
  5447. struct nfs4_client *clp;
  5448. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5449. nfsd_net_id);
  5450. LIST_HEAD(reaplist);
  5451. if (!nfsd_netns_ready(nn))
  5452. return count;
  5453. spin_lock(&nn->client_lock);
  5454. list_for_each_entry(clp, &nn->client_lru, cl_lru) {
  5455. count += nfsd_find_all_delegations(clp, max - count, &reaplist);
  5456. if (max != 0 && count >= max)
  5457. break;
  5458. }
  5459. spin_unlock(&nn->client_lock);
  5460. nfsd_forget_delegations(&reaplist);
  5461. return count;
  5462. }
  5463. static void
  5464. nfsd_recall_delegations(struct list_head *reaplist)
  5465. {
  5466. struct nfs4_client *clp;
  5467. struct nfs4_delegation *dp, *next;
  5468. list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
  5469. list_del_init(&dp->dl_recall_lru);
  5470. clp = dp->dl_stid.sc_client;
  5471. /*
  5472. * We skipped all entries that had a zero dl_time before,
  5473. * so we can now reset the dl_time back to 0. If a delegation
  5474. * break comes in now, then it won't make any difference since
  5475. * we're recalling it either way.
  5476. */
  5477. spin_lock(&state_lock);
  5478. dp->dl_time = 0;
  5479. spin_unlock(&state_lock);
  5480. nfsd_break_one_deleg(dp);
  5481. put_client(clp);
  5482. }
  5483. }
  5484. u64
  5485. nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
  5486. size_t addr_size)
  5487. {
  5488. u64 count = 0;
  5489. struct nfs4_client *clp;
  5490. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5491. nfsd_net_id);
  5492. LIST_HEAD(reaplist);
  5493. if (!nfsd_netns_ready(nn))
  5494. return count;
  5495. spin_lock(&nn->client_lock);
  5496. clp = nfsd_find_client(addr, addr_size);
  5497. if (clp)
  5498. count = nfsd_find_all_delegations(clp, 0, &reaplist);
  5499. spin_unlock(&nn->client_lock);
  5500. nfsd_recall_delegations(&reaplist);
  5501. return count;
  5502. }
  5503. u64
  5504. nfsd_inject_recall_delegations(u64 max)
  5505. {
  5506. u64 count = 0;
  5507. struct nfs4_client *clp, *next;
  5508. struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
  5509. nfsd_net_id);
  5510. LIST_HEAD(reaplist);
  5511. if (!nfsd_netns_ready(nn))
  5512. return count;
  5513. spin_lock(&nn->client_lock);
  5514. list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
  5515. count += nfsd_find_all_delegations(clp, max - count, &reaplist);
  5516. if (max != 0 && ++count >= max)
  5517. break;
  5518. }
  5519. spin_unlock(&nn->client_lock);
  5520. nfsd_recall_delegations(&reaplist);
  5521. return count;
  5522. }
  5523. #endif /* CONFIG_NFSD_FAULT_INJECTION */
  5524. /*
  5525. * Since the lifetime of a delegation isn't limited to that of an open, a
  5526. * client may quite reasonably hang on to a delegation as long as it has
  5527. * the inode cached. This becomes an obvious problem the first time a
  5528. * client's inode cache approaches the size of the server's total memory.
  5529. *
  5530. * For now we avoid this problem by imposing a hard limit on the number
  5531. * of delegations, which varies according to the server's memory size.
  5532. */
  5533. static void
  5534. set_max_delegations(void)
  5535. {
  5536. /*
  5537. * Allow at most 4 delegations per megabyte of RAM. Quick
  5538. * estimates suggest that in the worst case (where every delegation
  5539. * is for a different inode), a delegation could take about 1.5K,
  5540. * giving a worst case usage of about 6% of memory.
  5541. */
  5542. max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
  5543. }
  5544. static int nfs4_state_create_net(struct net *net)
  5545. {
  5546. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5547. int i;
  5548. nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
  5549. CLIENT_HASH_SIZE, GFP_KERNEL);
  5550. if (!nn->conf_id_hashtbl)
  5551. goto err;
  5552. nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
  5553. CLIENT_HASH_SIZE, GFP_KERNEL);
  5554. if (!nn->unconf_id_hashtbl)
  5555. goto err_unconf_id;
  5556. nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
  5557. SESSION_HASH_SIZE, GFP_KERNEL);
  5558. if (!nn->sessionid_hashtbl)
  5559. goto err_sessionid;
  5560. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  5561. INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
  5562. INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
  5563. }
  5564. for (i = 0; i < SESSION_HASH_SIZE; i++)
  5565. INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
  5566. nn->conf_name_tree = RB_ROOT;
  5567. nn->unconf_name_tree = RB_ROOT;
  5568. INIT_LIST_HEAD(&nn->client_lru);
  5569. INIT_LIST_HEAD(&nn->close_lru);
  5570. INIT_LIST_HEAD(&nn->del_recall_lru);
  5571. spin_lock_init(&nn->client_lock);
  5572. INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
  5573. get_net(net);
  5574. return 0;
  5575. err_sessionid:
  5576. kfree(nn->unconf_id_hashtbl);
  5577. err_unconf_id:
  5578. kfree(nn->conf_id_hashtbl);
  5579. err:
  5580. return -ENOMEM;
  5581. }
  5582. static void
  5583. nfs4_state_destroy_net(struct net *net)
  5584. {
  5585. int i;
  5586. struct nfs4_client *clp = NULL;
  5587. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5588. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  5589. while (!list_empty(&nn->conf_id_hashtbl[i])) {
  5590. clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
  5591. destroy_client(clp);
  5592. }
  5593. }
  5594. for (i = 0; i < CLIENT_HASH_SIZE; i++) {
  5595. while (!list_empty(&nn->unconf_id_hashtbl[i])) {
  5596. clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
  5597. destroy_client(clp);
  5598. }
  5599. }
  5600. kfree(nn->sessionid_hashtbl);
  5601. kfree(nn->unconf_id_hashtbl);
  5602. kfree(nn->conf_id_hashtbl);
  5603. put_net(net);
  5604. }
  5605. int
  5606. nfs4_state_start_net(struct net *net)
  5607. {
  5608. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5609. int ret;
  5610. ret = nfs4_state_create_net(net);
  5611. if (ret)
  5612. return ret;
  5613. nn->boot_time = get_seconds();
  5614. nn->grace_ended = false;
  5615. locks_start_grace(net, &nn->nfsd4_manager);
  5616. nfsd4_client_tracking_init(net);
  5617. printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
  5618. nn->nfsd4_grace, net);
  5619. queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
  5620. return 0;
  5621. }
  5622. /* initialization to perform when the nfsd service is started: */
  5623. int
  5624. nfs4_state_start(void)
  5625. {
  5626. int ret;
  5627. ret = set_callback_cred();
  5628. if (ret)
  5629. return -ENOMEM;
  5630. laundry_wq = create_singlethread_workqueue("nfsd4");
  5631. if (laundry_wq == NULL) {
  5632. ret = -ENOMEM;
  5633. goto out_recovery;
  5634. }
  5635. ret = nfsd4_create_callback_queue();
  5636. if (ret)
  5637. goto out_free_laundry;
  5638. set_max_delegations();
  5639. return 0;
  5640. out_free_laundry:
  5641. destroy_workqueue(laundry_wq);
  5642. out_recovery:
  5643. return ret;
  5644. }
  5645. void
  5646. nfs4_state_shutdown_net(struct net *net)
  5647. {
  5648. struct nfs4_delegation *dp = NULL;
  5649. struct list_head *pos, *next, reaplist;
  5650. struct nfsd_net *nn = net_generic(net, nfsd_net_id);
  5651. cancel_delayed_work_sync(&nn->laundromat_work);
  5652. locks_end_grace(&nn->nfsd4_manager);
  5653. INIT_LIST_HEAD(&reaplist);
  5654. spin_lock(&state_lock);
  5655. list_for_each_safe(pos, next, &nn->del_recall_lru) {
  5656. dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
  5657. unhash_delegation_locked(dp);
  5658. list_add(&dp->dl_recall_lru, &reaplist);
  5659. }
  5660. spin_unlock(&state_lock);
  5661. list_for_each_safe(pos, next, &reaplist) {
  5662. dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
  5663. list_del_init(&dp->dl_recall_lru);
  5664. nfs4_put_deleg_lease(dp->dl_stid.sc_file);
  5665. nfs4_put_stid(&dp->dl_stid);
  5666. }
  5667. nfsd4_client_tracking_exit(net);
  5668. nfs4_state_destroy_net(net);
  5669. }
  5670. void
  5671. nfs4_state_shutdown(void)
  5672. {
  5673. destroy_workqueue(laundry_wq);
  5674. nfsd4_destroy_callback_queue();
  5675. }
  5676. static void
  5677. get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
  5678. {
  5679. if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
  5680. memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
  5681. }
  5682. static void
  5683. put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
  5684. {
  5685. if (cstate->minorversion) {
  5686. memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
  5687. SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
  5688. }
  5689. }
  5690. void
  5691. clear_current_stateid(struct nfsd4_compound_state *cstate)
  5692. {
  5693. CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
  5694. }
  5695. /*
  5696. * functions to set current state id
  5697. */
  5698. void
  5699. nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
  5700. {
  5701. put_stateid(cstate, &odp->od_stateid);
  5702. }
  5703. void
  5704. nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
  5705. {
  5706. put_stateid(cstate, &open->op_stateid);
  5707. }
  5708. void
  5709. nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
  5710. {
  5711. put_stateid(cstate, &close->cl_stateid);
  5712. }
  5713. void
  5714. nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
  5715. {
  5716. put_stateid(cstate, &lock->lk_resp_stateid);
  5717. }
  5718. /*
  5719. * functions to consume current state id
  5720. */
  5721. void
  5722. nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
  5723. {
  5724. get_stateid(cstate, &odp->od_stateid);
  5725. }
  5726. void
  5727. nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
  5728. {
  5729. get_stateid(cstate, &drp->dr_stateid);
  5730. }
  5731. void
  5732. nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
  5733. {
  5734. get_stateid(cstate, &fsp->fr_stateid);
  5735. }
  5736. void
  5737. nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
  5738. {
  5739. get_stateid(cstate, &setattr->sa_stateid);
  5740. }
  5741. void
  5742. nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
  5743. {
  5744. get_stateid(cstate, &close->cl_stateid);
  5745. }
  5746. void
  5747. nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
  5748. {
  5749. get_stateid(cstate, &locku->lu_stateid);
  5750. }
  5751. void
  5752. nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
  5753. {
  5754. get_stateid(cstate, &read->rd_stateid);
  5755. }
  5756. void
  5757. nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
  5758. {
  5759. get_stateid(cstate, &write->wr_stateid);
  5760. }